Ejemplo n.º 1
0
    def get_object_features(self, contours, original_image):
        objects_with_features = []
        for contour in contours:
            feature_vector = []
            # Compute image moments
            moments = cv2.moments(contour)
            area = moments['m00']

            # Get length of blob edge
            perimeter = cv2.arcLength(contour, True)

            circularity = 4 * np.pi * area / (perimeter**2)

            # Get bounding box
            box_x, box_y, w, h = cv2.boundingRect(contour)

            # Width/height ratio
            aspect_ratio = w / h

            compactness = area / (w * h)

            # Bounding circle
            (circ_x, circ_y), radius = cv2.minEnclosingCircle(contour)
            radius = int(radius)
            circle_area_overlap = area / (np.pi * radius**2)

            # Bounding triangle
            triangle_area, triangle = cv2.minEnclosingTriangle(contour)
            triangle_area_overlap = area / triangle_area

            # Finds the average of each colour channel inside the contour
            mask = np.zeros((original_image.shape[0], original_image.shape[1]),
                            np.uint8)
            cv2.drawContours(mask, [contour], 0, 255, -1)
            mean_of_colours = cv2.mean(original_image, mask=mask)

            # Distance from center of mass to center of bounding box
            # Scaled to account for differing box areas
            com = [
                int(moments['m10'] / moments['m00']),
                int(moments['m01'] / moments['m00'])
            ]
            box_center_distance = np.sqrt((com[0] - box_x)**2 +
                                          (com[1] - box_y)**2) / (w * h)

            # Distance from center of mass to center of bounding circle
            circle_center_distance = np.sqrt(
                (com[0] - int(circ_x))**2 +
                (com[1] - int(circ_y))**2) / (np.pi * radius**2)

            feature_vector = [
                circularity, aspect_ratio, circle_area_overlap,
                triangle_area_overlap, mean_of_colours, compactness,
                box_center_distance, circle_center_distance
            ]
            objects_with_features.append(feature_vector)

        return objects_with_features
Ejemplo n.º 2
0
def triangle_fill_ratio(contour, triangle=None) -> float:
    """
    Returns the ratio between a given contour and the smallest enclosing rectangle
    :param contour: numpy array
    :param triangle: convex hull for the minEnclosingTriangle function, can be ignored
    :return: returns the ratio between the contour and the bounding triangle
    :rtype: float
    """
    bounding_triangle_area, triangle = cv2.minEnclosingTriangle(
        contour, triangle)
    contour_area = cv2.contourArea(contour)
    return contour_area / float(bounding_triangle_area)
Ejemplo n.º 3
0
def find_robots(frame, Lower,Upper,n_robots,stop,YR_Lower,YR_Upper,points ) :
    blurred = cv2.GaussianBlur(frame,(11,11),0)
#    hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
    _mask = cv2.inRange(frame,Lower,Upper)
    '''
    for i in range(0,len(YR_Lower)) :
        _mask = cv2.inRange(frame,YR_Lower[i],YR_Upper[i])
        _mask = cv2.erode(_mask,None, iterations=2)
        _mask = cv2.dilate(_mask,None,iterations=2)
        mask+=_mask
    '''
    mask = cv2.bitwise_not(_mask)
    # remove any small blobs that may e left on the mask
#    mask = cv2.erode(mask,None, iterations=2)
#    mask = cv2.dilate(mask,None,iterations=2)

    cnts = cv2.findContours(mask.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)

    dots = list()
    _tmp = list()
    for i in range(len(cnts)) :
        c = cnts[i]
        tri = cv2.minEnclosingTriangle(c)
        point = list()
        if tri[0] >= 700 :
            for j in range(0,3) :
                point.append([np.int(tri[1][j][0][0]),np.int(tri[1][j][0][1])])
            dots.append(point)
            center,o,vertex = Calculate_X_Y_O_from_Triangle(frame, np.array(point[0]),np.array(point[1]),np.array(point[2]))
            _x = [min(point[0][0],point[1][0],point[2][0]),max(point[0][0],point[1][0],point[2][0])]
            _y = [min(point[0][1],point[1][1],point[2][1]),max(point[0][1],point[1][1],point[2][1])]
            if _x[0] <= 0 :
                _x[0] = 1
            if _y[0] <= 0 :
                _y[0] = 1
            image = frame.copy()
            image = image[_y[0]:_y[1],_x[0]:_x[1]]
            h,w,c = image.shape
            l_image = cv2.pyrUp(image,dstsize=(w*2,h*2),borderType = cv2.BORDER_DEFAULT)
            data = Robot_num(l_image,YR_Lower,YR_Upper)
            _tmp.append(data)
            if data == 'None' or int(data)-1 >= len(points):
                print('Wrong Detection')
                stop = 1
            else :
                points[int(data)-1] = [center,o]
            cv2.circle(frame,tuple(center),4,(0,255,0),-1)
    print(_tmp)
    print('------')
    frame = draw_Triangle(frame,dots)
    return frame, points, stop, points
Ejemplo n.º 4
0
 def on_stroke_process(self, event):
     try:
         if self.stroke_data.found_gesture and not self.write_mode:
             points = []
             for stroke in self.stroke_data.strokes:
                 if len(stroke) == 0:
                     continue
                 points.append([])
                 points[-1].append([[v[0],v[1]]
                                for v in stroke])
                 if self.stroke_data.gesture == 'Circle':
                     points = np.array(
                         [item for sublist in
                          points for item in sublist]).reshape(-1,2)
                     center = np.int0(np.mean(points,axis=0))
                     radius = np.int0(norm(np.std(points,axis=0)))
                     cv2.circle(self.drawing_im,(center[0],
                                             center[1]), radius,
                            [255,255,255], self.size)
                 elif self.stroke_data.gesture == 'Line':
                     points = np.array(
                         [item for sublist in points for item in sublist])
                     cv2.line(self.drawing_im, tuple(points[0][0])
                              , tuple(points[-1][-1]),
                              [255, 255, 255], self.size)
                 elif self.stroke_data.gesture == 'Rectangle':
                     points = np.array(
                         [item for sublist in points for item in sublist])
                     rect = cv2.minAreaRect(points)
                     box = np.int0(cv2.boxPoints(rect))
                     cv2.drawContours(self.drawing_im, [box], 0,
                                      [255, 255, 255], self.size)
                 elif self.stroke_data.gesture == 'Triangle':
                     points = np.array(
                         [item for sublist in points 
                          for item in sublist]).reshape(1,-1,2)
                     triangle = np.int0(cv2.minEnclosingTriangle(
                         points)[1].squeeze())
                     cv2.drawContours(self.drawing_im,
                                      [triangle], 0,
                                      [255, 255, 255], self.size)
                 self.temporary_im = np.zeros_like(self.drawing_im)
         else:
             if self.write_mode:
                 self.drawing_im += self.temporary_im
             else:
                 self.temporary_im = np.zeros_like(self.drawing_im)
     except Exception as e:
          exc_type, exc_value, exc_traceback = sys.exc_info()
          traceback.print_exception(exc_type,
                             exc_value,
                             exc_traceback, limit=2, file=sys.stdout)
Ejemplo n.º 5
0
def get_fill_ratio_triangle(contour, triangle=None, reverse_div=False):
    """
    Action: returns the ratio
    :param contour:
    :param triangle: convex hull for the minEnclosingTriangle function, can be ignored
    :param reverse_div: If the triangle's area should be divided by the contour.
    :return:
    """
    t_area, t = cv2.minEnclosingTriangle(contour, triangle)
    c_area = cv2.contourArea(contour)
    if reverse_div:
        return float(t_area) / c_area
    return c_area / float(t_area)
    def min_area_features(self, cnt):
        """
        Produces min area features of the contour

        @param cnt: A convex hull contour

        @returns: Min area triangle, min area rect, min area circle radius
        """
        min_rect = cv.minAreaRect(cnt)
        min_rect = np.int0(cv.boxPoints(min_rect))
        min_tri = cv.minEnclosingTriangle(cnt)[1]  # Sometimes returns None
        min_circ_rad = cv.minEnclosingCircle(cnt)[1]
        return min_rect, min_tri, min_circ_rad
Ejemplo n.º 7
0
def findBoundingTriangleArea(img, contours):

    #This function tries to fit the minimum bounding triangle for the given contours.
    bounding_triangle = 0
    # TODO: Find the minimum enclosing triangle that can fit the given contours.
    # Hint: Check the function minEnclosingTriangle in opencv and place its output in the variable x
    x = cv2.minEnclosingTriangle(contours[1])

    #TODO (Optional): You can uncomment the following command(s) to show or display the bounded triangle.
    #bounding_triangle = cv2.polylines(img.copy(), np.int32([x[1]]), True, (0, 255, 0), 2)
    # cv2.imshow('Image_Triangle', bounding_triangle)
    # cv2.waitKey(0)
    print(x[0])
    # TODO: Find the area of the bounding circle
    area = x[0]
    return area, bounding_triangle
Ejemplo n.º 8
0
def scissorTest():
    for x in range(0, no_contours):
        length = family_tree[x].__len__() - 1
        if length >= 2 and x in parents:
            parent = x
            tri_area, sizes = cv2.minEnclosingTriangle(contours[parent])
            hull = cv2.convexHull(contours[parent], returnPoints=True)
            poly = cv2.approxPolyDP(hull, 0.001, True)
            hull_area = cv2.contourArea(poly)
            ratio = hull_area / tri_area
            if ratio > 0.85:
                cv2.drawContours(ref, contours, parent, (0, 0, 255), 2)
                M = moments[x]
                text_x = int(M['m10'] / M['m00'])
                text_y = int(M['m01'] / M['m00'])
                cv2.putText(ref, "Scissor", (text_x, text_y),
                            cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 0, 255), 2)
Ejemplo n.º 9
0
def find_robot(img, crange):
    conts = find_col(img, crange)
    conts_size = [(cnt, cv2.contourArea(cnt)) for cnt in conts]
    mx = max([s for c, s in conts_size])
    bot = [c for c, s in conts_size if s == mx][0]
    _, bot = cv2.minEnclosingTriangle(bot)
    bari_p = [
        bot[0][0][0] / 3 + bot[1][0][0] / 3 + bot[2][0][0] / 3,
        bot[0][0][1] / 3 + bot[1][0][1] / 3 + bot[2][0][1] / 3
    ]

    top = max(points_dst(bari_p, point[0]) for point in bot)
    top_p = [point[0] for point in bot
             if points_dst(bari_p, point[0]) == top][0]
    bari_p = tuple([int(z) for z in bari_p])
    top_p = tuple([int(z) for z in top_p])
    theta = atan2(bari_p[1] - top_p[1], bari_p[0] - top_p[0])
    return np.int0(bot), bari_p, top_p, theta
Ejemplo n.º 10
0
def fit_contours(contours, mod='R', thd=625):
    result = []
    for cnt in contours:
        if len(cnt.shape)<2: continue # 1-vertex
        mm = cv2.moments(cnt) # 多边形的矩
        if mm['m00']==0: continue # 多边形中心
        ct = mm['m10']/mm['m00'], mm['m01']/mm['m00']
        area = cv2.contourArea(cnt)
        if area > thd: # area_threshold
            if type(mod)==str and mod in 'Cc': # 最小包围圆形
                fit = cv2.minEnclosingCircle(cnt) # (center,radius)
            elif type(mod)==str and mod in 'Ee': # 最优拟合椭圆
                fit = cv2.fitEllipse(cnt) # (center,axes,theta)
            elif type(mod)==str and mod in 'Rr': # 最小包围矩形
                fit = cv2.minAreaRect(cnt) # (center,(w,h),theta)
                #fit = cv2.boxPoints(fit) # convert: [4-vertex]
            elif type(mod)==str and mod in 'Tt': # 最优外包三角形
                _, fit = cv2.minEnclosingTriangle(cnt) # (area,3-vertex)
            elif type(mod) in (int,float) and mod>0: # 逼近多边形
                s = area/cv2.arcLength(cnt, closed=True)
                fit = cv2.approxPolyDP(cnt, mod*s, True).squeeze()
            result.append([ct,fit]) # [center,fit]
    return result
Ejemplo n.º 11
0
def tar3(cons):
  goodcons = []
  for con in cons:
    cen, wh, ang = cv2.minAreaRect(con)
    try:
      h_w = wh [1]/wh [0]
      if h_w > .95 and h_w < 1.28:
        if wh [1] > 10 and wh [0] > 10:
          area = cv2.contourArea(con)
          if area > 190 and area < 700:
            hull = cv2.convexHull(con)
            
            sol = area/cv2.contourArea(hull)
            
            if sol > .85:
            
              arc = cv2.arcLength(con, True)
              if arc > 65 and arc < 75:
                
                ret, tri = cv2.minEnclosingTriangle(con)
                
              
                print("Angle :", ang)
                print('Width :', wh [0], "Height :", wh [1])
                print('Area :', area)
                print('Arc Length:', cv2.arcLength(con, True))
                print('Solidity:', sol)
                
                goodcons.append(con)
                
                
                print(tri)
                
    except ZeroDivisionError:
      pass
  return goodcons
Ejemplo n.º 12
0
def analyze_triangles(rects, parentim):

    #if type(parentim) == type(Shape):
    if isinstance(parentim, Shape):
        parentim = parentim.img
    #else:
    #print(type(parentim))
    #print(type(Shape.__class__))

    for x in rects:
        area, tri = cv2.minEnclosingTriangle(x.ocontour)
        x.triangle = np.reshape(np.round(tri).astype(np.int32), (3, 2))
        x.triangle_area = area
        inside = np.zeros(parentim.shape, dtype=np.uint8)
        cv2.drawContours(inside, [x.triangle],
                         0,
                         255,
                         -1,
                         offset=tuple(x.offset))
        inside = (cv2.bitwise_and(parentim, inside) + (inside != 255) * 255)

        #x['triangle-area-ratio'] = count_black(x['img'])/area
        x.triangle_area_ratio = count_black(inside) / area
        x.triangle_perimeter = cv2.arcLength(tri, True)
Ejemplo n.º 13
0
import numpy as np

if __name__ == '__main__':
    # 生成空白图像
    image = np.zeros((500, 500))

    # 生成随机点
    points = np.random.randint(150, 270, [100, 2]).astype('float32')

    # 在图像上绘制随机点
    for pt in points:
        cv.circle(image, (pt[0], pt[1]), 1, (255, 255, 255), -1)
    image1 = image.copy()

    # 寻找包围点集的三角形
    _, triangle = cv.minEnclosingTriangle(np.array([points]))
    # 寻找包围点集的圆形
    center, radius = cv.minEnclosingCircle(points)

    # 绘制三角形(为便于读者理解,此处写出了triangle的详细拆分及绘制方式)
    a = triangle[0][0]
    b = triangle[1][0]
    c = triangle[2][0]
    cv.line(image, (a[0], a[1]), (b[0], b[1]), (255, 255, 255), 1, 16)
    cv.line(image, (a[0], a[1]), (c[0], c[1]), (255, 255, 255), 1, 16)
    cv.line(image, (b[0], b[1]), (c[0], c[1]), (255, 255, 255), 1, 16)

    # 绘制圆形
    center = np.int0(center)
    cv.circle(image1, (center[0], center[1]), int(radius), (255, 255, 255), 1,
              cv.LINE_AA)
Ejemplo n.º 14
0
def analyse_frame_tail_only(image,
                            background,
                            thresh1,
                            thresh2,
                            n_points,
                            return_image=False):
    """Main function for tracking the tail of the fish in a video frame without tracking the eyes

    Parameters
    ----------
    image : np.int8
        8-bit numpy array representing a rasterized color frame from a video

    background : np.int8
        8-bit numpy array representing a background image

    thresh1 : int (0-255)
        The threshold used for finding the fish within the frame

    thresh2 : int (0-255)
        The threshold used for finding the eyes and swim bladder of the fish within the frame

    n_points : int
        The number of points to fit to the tail

    return_image : bool, optional (default = False)
        Whether to return the result of the tracking as an image. This should be set to True if the function is being
        called to check the output of tracking (e.g. when setting thresholds) and False when only tracked data is being
        extracted from video frames for saving.

    Returns
    -------
    If return_image = False:
        tracking_params : dict
            Dictionary of tracking data for the frame

    If return_image = True:
        show_contours, show_tracking : np.int8, np.int8
            Two images showing: 1) the contours found using given thresholds; and 2) the final result of the tracking

    See Also
    --------
    analyse_frame
    track_video_tail_only
    find_contours
    contour_info
    """
    # BACKGROUND SUBTRACTION
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    bg = background_division(gray, background)
    scaled = cv2.resize(bg, None, fx=2, fy=2)

    # FIND FISH
    fish_contours = find_contours(scaled, thresh1)
    mask = np.zeros(scaled.shape, np.uint8)
    masked = mask.copy()
    cv2.drawContours(mask, fish_contours, 0, 1, -1)
    mask = mask.astype(np.bool)
    masked[mask] = scaled[mask]
    masked = cv2.equalizeHist(masked)

    # FIND HEAD
    internal_contours = find_contours(masked, thresh2)[:3]
    try:
        # find the minimum enclosing triangle for the contours
        internal_points = np.concatenate(internal_contours, axis=0)
        ret, triangle_points = cv2.minEnclosingTriangle(internal_points)
        triangle_points = np.squeeze(triangle_points)
        triangle_centre = np.mean(triangle_points, axis=0)
        distance_from_centre = np.linalg.norm(triangle_points -
                                              triangle_centre,
                                              axis=1)
        p1_idx = np.argmax(distance_from_centre)
        p1 = (triangle_points[p1_idx] + triangle_centre) / 2.
        p2 = triangle_points[np.arange(3) != p1_idx].mean(axis=0)

        heading_vector = p2 - p1
        heading_vector /= np.linalg.norm(heading_vector)
        heading = np.arctan2(*heading_vector[::-1])

        # TAIL TRACKING
        tail_points = fit_tail(mask, p1, heading_vector, n_points)

        if return_image:
            contour_img = cv2.resize(image, None, fx=2, fy=2)
            tracking_img = contour_img.copy()
            colors = dict(k=0,
                          w=(255, 255, 255),
                          b=(255, 0, 0),
                          g=(0, 255, 0),
                          r=(0, 0, 255),
                          y=(0, 255, 255))
            # draw contours
            cv2.drawContours(contour_img, fish_contours, 0, colors['k'], 1)
            cv2.drawContours(contour_img, internal_contours, -1, colors['w'],
                             1)
            # draw tracked points
            # plot heading
            cv2.circle(tracking_img, array2point(p1), 3, colors['y'], -1)
            cv2.line(tracking_img, array2point(p1),
                     array2point(p1 + (80 * heading_vector)), colors['y'], 2)
            # plot tail points
            for p in tail_points:
                cv2.circle(tracking_img, array2point(p), 1, colors['b'], -1)

            return contour_img, tracking_img

        tracking_params = {
            'centre': tuple(p1),
            'heading': heading,
            'midpoint': tuple(p2),
            'tail_points': tail_points,
            'tracked': True
        }

    except Exception:

        if return_image:
            contour_img = cv2.resize(image, None, fx=2, fy=2)
            tracking_img = contour_img.copy()
            colors = dict(k=0,
                          w=(255, 255, 255),
                          b=(255, 0, 0),
                          g=(0, 255, 0),
                          r=(0, 0, 255),
                          y=(0, 255, 255))
            # draw contours
            cv2.drawContours(contour_img, fish_contours, 0, colors['k'], 1)
            cv2.drawContours(contour_img, internal_contours, -1, colors['w'],
                             1)
            return contour_img, tracking_img

        param_names = ['centre', 'heading', 'midpoint']
        tracking_params = dict([(param_name, None)
                                for param_name in param_names])
        tracking_params['tail_points'] = np.zeros((n_points, 2)) * np.nan
        tracking_params['tracked'] = False

    return tracking_params
def findBoundingTriangleArea(img, contours):
    x = cv2.minEnclosingTriangle(contours[1])
    bounding_triangle = cv2.polylines(img.copy(), np.int32([x[1]]), True,
                                      (0, 255, 0), 2)
    return x[0], bounding_triangle
Ejemplo n.º 16
0
                              (center[0] + 5, center[1] + 5), (0, 128, 255),
                              -1)
                #cv2.circle(contours,center,radius,(0,0,255),4)
                canny_found += 1

thresh_found = 0
for c in thresh_cnts:
    # compute the center of the contour, then detect the name of the
    # shape using only the contour
    M = cv2.moments(c)
    peri = cv2.arcLength(c, True)
    approx = cv2.approxPolyDP(c, 0.04 * peri, True)
    shape = sd.detect(c)
    if (M["m00"] > 10):
        if (len(approx) == 3):
            triangle = cv2.minEnclosingTriangle(c)
            print "---"
            print "triangle"
            print M["m00"]
            print triangle[0]
            area = cv2.contourArea(c)
            print area
            print "---"
            if (M["m00"] / triangle[0] > 0.5):
                for i in range(0, 3):
                    cv2.line(contours,
                             (triangle[1][i][0][0], triangle[1][i][0][1]),
                             (triangle[1][(i + 1) % 3][0][0],
                              triangle[1][(i + 1) % 3][0][1]), (0, 255, 0), 2)
                thresh_found += 1
        if (len(approx) == 4):
Ejemplo n.º 17
0
def shape_features(obj_input,
                   df_contours=None,
                   return_basic=True,
                   return_moments=False,
                   return_hu_moments=False):
    """
    Collects a set of 41 shape descriptors from every contour. There are three sets of 
    descriptors: basic shape descriptors, moments, and hu moments. Two additional features,
    contour area and diameter are already provided by the find_contours function.
    https://docs.opencv.org/3.4.9/d3/dc0/group__imgproc__shape.html

    Of the basic shape descriptors, all 10 are translational invariants, 8 are rotation 
    invariant (rect_height and rect_width are not) and  4 are also scale invariant 
    (circularity, compactness, roundness, solidity).
    https://en.wikipedia.org/wiki/Shape_factor_(image_analysis_and_microscopy)  
                                
    The moments set encompasses 10 raw spatial moments (some are translation and rotation
    invariants, but not all), 7 central moments (all translational invariant) and 7 central 
    normalized moments (all translational and scale invariant).
    https://en.wikipedia.org/wiki/Image_moment
    
    The 7 hu moments are derived of the central moments, and are all translation, scale 
    and rotation invariant.
    http://www.sci.utah.edu/~gerig/CS7960-S2010/handouts/Hu.pdf
        
    Basic shape descriptors:
        circularity = 4 * np.pi * contour_area / contour_perimeter_length^2
        compactness = √(4 * contour_area / pi) / contour_diameter
        min_rect_max = minimum bounding rectangle major axis
        min_rect_min = minimum bounding rectangle minor axis
        perimeter_length = total length of contour perimenter
        rect_height = height of the bounding rectangle ("caliper dim 1")
        rect_width = width of the bounding rectangle ("caliper dim 2")
        roundness = (4 * contour_area) / pi * contour_perimeter_length^2
        solidity = contour_area / convex_hull_area
        tri_area = area of minimum bounding triangle

    Moments:
        raw moments = m00, m10, m01, m20, m11, m02, m30, m21,  m12, m03
        central moments = mu20, mu11, mu02, mu30, mu21, mu12, mu03,  
        normalized central moments = nu20, nu11, nu02, nu30, nu21, nu12, nu03

    Hu moments:
        hu moments = hu1, hu2, hu3, hu4, hu5, hu6, hu7

    Parameters
    ----------
    obj_input : array or container
        input object
    df_contours : DataFrame, optional
        contains the contours
    return_basic: True, opational
        append the basic shape descriptors to a provided contour DataFrame
    return_moments: False, optional
        append the basic shape descriptors to a provided contour DataFrame
    return_hu_moments: False, optional
        append the basic shape descriptors to a provided contour DataFrame
        
    Returns
    -------
    df_contours : DataFrame or container
        contains contours, and added features

    """

    ## load df
    if obj_input.__class__.__name__ == "DataFrame":
        df_contours = obj_input
    elif obj_input.__class__.__name__ == "container":
        if hasattr(obj_input, "df_contours"):
            df_contours = obj_input.df_contours
    else:
        print("wrong input format.")
        return

    if df_contours.__class__.__name__ == "NoneType":
        print("no df supplied - cannot measure colour intensity")
        return

    ## custom shape descriptors
    desc_basic_shape = [
        'circularity', 'compactness', 'min_rect_max', 'min_rect_min',
        'perimeter_length', 'rect_height', 'rect_width', 'roundness',
        'solidity', 'tri_area'
    ]
    for name in desc_basic_shape:
        df_contours = df_contours.assign(**{name: "NA"})

    ## moments
    desc_moments = [
        'm00', 'm10', 'm01', 'm20', 'm11', 'm02', 'm30', 'm21', 'm12', 'm03',
        'mu20', 'mu11', 'mu02', 'mu30', 'mu21', 'mu12', 'mu03', 'nu20', 'nu11',
        'nu02', 'nu30', 'nu21', 'nu12', 'nu03'
    ]
    for name in desc_moments:
        df_contours = df_contours.assign(**{name: "NA"})

    ## hu moments
    desc_hu = ['hu1', 'hu2', 'hu3', 'hu4', 'hu5', 'hu6', 'hu7']
    for name in desc_hu:
        df_contours = df_contours.assign(**{name: "NA"})

    ## calculate shape descriptors from contours
    for index, row in df_contours.iterrows():

        ## contour coords
        coords = row["coords"]

        ## retrieve area and diameter
        cnt_area = row["area"]
        cnt_diameter = row["diameter"]

        ## custom shape descriptors
        convex_hull = cv2.convexHull(coords)
        tri_area, tri_coords = cv2.minEnclosingTriangle(coords)
        min_rect_center, min_rect_min_max, min_rect_angle = cv2.minAreaRect(
            coords)
        min_rect_min, min_rect_max = min_rect_min_max[0], min_rect_min_max[1]
        rect_x, rect_y, rect_width, rect_height = cv2.boundingRect(coords)
        perimeter_length = cv2.arcLength(coords, closed=True)
        circularity = 4 * np.pi * cnt_area / math.pow(perimeter_length, 2)
        roundness = (4 * cnt_area) / (np.pi * math.pow(cnt_diameter, 2))
        solidity = cnt_area / cv2.contourArea(convex_hull)
        compactness = math.sqrt(4 * cnt_area / np.pi) / cnt_diameter

        df_contours.at[index,
                       desc_basic_shape] = (circularity, compactness,
                                            min_rect_max, min_rect_min,
                                            perimeter_length, rect_height,
                                            rect_width, roundness, solidity,
                                            tri_area)

        ## moments
        moments = cv2.moments(coords)
        df_contours.at[index, desc_moments] = list(moments.values())

        ## hu moments
        hu_moments = cv2.HuMoments(moments)
        hu_moments_list = []
        for i in hu_moments:
            hu_moments_list.append(i[0])
        df_contours.at[index, desc_hu] = hu_moments_list

    ## drop unwanted columns
    if return_basic == False:
        df_contours.drop(desc_basic_shape, axis=1, inplace=True)
    if return_moments == False:
        df_contours.drop(desc_moments, axis=1, inplace=True)
    if return_hu_moments == False:
        df_contours.drop(desc_hu, axis=1, inplace=True)

    ## return
    if obj_input.__class__.__name__ == "DataFrame":
        return df_contours
    elif obj_input.__class__.__name__ == "container":
        obj_input.df_contours = df_contours
Ejemplo n.º 18
0
def track_with_watershed(image, contours):
    """Watershed algorithm for finding the centres and angles of the eyes and swim bladder if simple thresholding fails

    The algorithm works by first fitting a triangle that encloses all the points in the internal contours of the fish.
    Using this triangle, the approximate locations of the eyes and swim bladder are calculated. These approximate
    locations are used as seeds for a watershed on the original background-subtracted image. The watershed marks
    contiguous areas of the image belonging to the same feature, from which a contour is calculated and the its centre
    and angle.

    The function is considerably slower at finding the internal features than straightforward thresholding. However, it
    is useful for when two contours fuse for a couple of frames in a recording, as occasionally happens during tracking.
    The function can still work when the fish rolls, however in these cases the eye tracking tends to be very inaccurate.
    Nonetheless, it is still useful for approximating the heading of the fish in such cases.

    Parameters
    ----------
    image : array-like
        Unsigned 8-bit integer array representing a background-subtracted image
    contours : list
        The contours that were found after applying a threshold and finding contours

    Returns
    -------
    centres, angles : np.array
        Arrays representing the centres, shape (3, 2), and angles, shape (3,), of internal features

    Raises
    ------
    TrackingError
        If any error is encountered during the watershed process. Errors tend to occur if contours is an empty list, or
        if a cv2 error is encountered when trying to calculate the minEnclosingTriangle.

    References
    ----------
    Uses slightly modified version of the watershed algorithm here:
    http://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_watershed/py_watershed.html
    """
    try:
        # find the minimum enclosing triangle for the contours
        internal_points = np.concatenate(contours, axis=0)
        ret, triangle_points = cv2.minEnclosingTriangle(internal_points)
        triangle_points = np.squeeze(triangle_points)
        # find approximate locations of the features
        triangle_centre = np.mean(triangle_points, axis=0)
        estimated_feature_centres = (triangle_points + triangle_centre) / 2
        sure_fg = np.zeros(image.shape, np.uint8)
        for c in estimated_feature_centres:
            contour_check = np.array([
                cv2.pointPolygonTest(cntr, array2point(c), False)
                for cntr in contours
            ])
            if np.all(contour_check == -1):
                internal_points = np.squeeze(internal_points)
                distances = np.linalg.norm(internal_points - c, axis=1)
                c = internal_points[np.argmin(distances)]
            cv2.circle(sure_fg, array2point(c), 1, 255, -1)
        # watershed
        unknown = np.zeros(image.shape, np.uint8)
        cv2.drawContours(unknown, contours, -1, 255, -1)
        unknown = cv2.morphologyEx(unknown,
                                   cv2.MORPH_CLOSE,
                                   np.ones((3, 3), np.uint8),
                                   iterations=3)
        unknown[sure_fg == 255] = 0
        ret, markers = cv2.connectedComponents(sure_fg, connectivity=4)
        markers = markers + 1
        markers[unknown == 255] = 0
        markers = cv2.watershed(cv2.cvtColor(image, cv2.COLOR_GRAY2BGR),
                                markers)
        # calculate contour features
        centres, angles = [], []
        for i in range(2, 5):
            contour_mask = np.zeros(image.shape, np.uint8)
            contour_mask[markers == i] = 255
            img, contours, hierarchy = cv2.findContours(
                contour_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
            contour = contours[0]
            c, th = contour_info(contour)
            centres.append(c)
            angles.append(th)
        centres, angles = np.array(centres), np.array(angles)
        return centres, angles
    except Exception:
        raise TrackingError()
def objects_detector(img_bgr8, i):
    width, height, d = np.shape(img_bgr8)
    if width > 130 or height > 130:
        return
    detected_objects_list = []
    img_bgr8_copy = img_bgr8.copy()
    hsv = cv2.cvtColor(img_bgr8_copy, cv2.COLOR_RGB2HSV)
    # define the values range
    hh = 255
    hl = 0
    sh = 255
    sl = 100  # filter the white color background
    vh = 255
    vl = 0  # to ignore the black in the background
    lowerbound = np.array([hl, sl, vl], np.uint8)
    upperbound = np.array([hh, sh, vh], np.uint8)
    # filter the image to generate the mask
    filtered_hsv = cv2.inRange(hsv, lowerbound, upperbound)
    filtered_hsv = cv2.bitwise_and(hsv, hsv, mask=filtered_hsv)
    filtered_hsv_s = cv2.resize(filtered_hsv, (256, 256))

    cv2.imshow('Filtered', filtered_hsv_s)
    cv2.waitKey(1)
    # convert the image to grayscale in order to find contours
    img_bgr = cv2.cvtColor(filtered_hsv, cv2.COLOR_HSV2BGR)
    img_gray = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)
    kernel = np.ones((3, 3), np.uint8)
    img_gray_af = cv2.dilate(img_gray.copy(), kernel, iterations=1)
    # ret, img_gray = cv2.threshold(img_gray, 50, 255, cv2.THRESH_BINARY)
    img_gray_s = cv2.resize(img_gray, (256, 256))
    img_gray_bf = cv2.resize(img_gray_af, (256, 256))
    cv2.imshow('FIlGRAY', img_gray_bf)
    img_gray = img_gray_af
    cv2.imshow('Filtered grayscale', img_gray_s)
    cv2.waitKey(1)
    maxx = 0
    img_gray_copy = img_gray.copy()
    im2, contours, hierarchy = cv2.findContours(img_gray_copy.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    # Find the index of the largest contour
    if not contours:
        print 'No contours found =('
        return
    areas = [cv2.contourArea(c) for c in contours]
    max_index = np.argmax(areas)
    # max_len_index = np.argmax(len_)
    cnt = contours[max_index]

    # print cv2.contourArea(cnt)

    epsilon = 0.0001 * cv2.arcLength(cnt, True)
    cnt = cv2.approxPolyDP(cnt, epsilon, True)
    # for i in range(4):
    #     # find the image contours
    #     im2, contours, hierarchy = cv2.findContours(img_gray_copy.copy(), 1, cv2.CHAIN_APPROX_NONE)
    #     # Find the index of the largest contour
    #     if not contours:
    #         print 'No contours found =('
    #         return
    #     areas = [cv2.contourArea(c) for c in contours]
    #     max_index = np.argmax(areas)
    #     cnt = contours[max_index]
    #     x, y, width, height = cv2.boundingRect(cnt)
    #     cv2.min
    #     hull = cv2.convexHull(cnt)
    #
    #     Mom = cv2.moments(hull)
    #     cx = int(Mom['m10'] / Mom['m00'])**2
    #     cy = int(Mom['m01'] / Mom['m00'])**2
    #     summ = cx-cy
    #     print str(i) + ' Cx = ' + str(cx)
    #     print str(i) + ' Cy = ' + str(cy)
    #     print str(i) + ' Sum = ' + str(cx-cy)
    #     if maxx < summ:
    #         maxx = summ
    #         final_cont = cnt
    #         img_gray = img_gray_copy
    #     rows, cols = img_gray_copy.shape
    #     M = cv2.getRotationMatrix2D((cols / 2, rows / 2), 90, 1)
    #     img_gray_copy = cv2.warpAffine(img_gray_copy, M, (cols, rows))
    #     print '\n'
    # cv2.imshow('GOLDEN!',img_gray)
    #


    # cv2.circle(contour_img_cropped, (cx, cy), 3, 255, -1)
    # Moments = cv2.moments(cnt)
    # cx = int(Moments['m10'] / Moments['m00'])
    # cy = int(Moments['m01'] / Moments['m00'])


    # Mom = cv2.moments(cnt)
    # cx = int(Mom['m10'] / Mom['m00'])
    # cy = int(Mom['m01'] / Mom['m00'])
    # summ = cx + cy
    # print str(i) + ' Cx = ' + str(cx)
    # print str(i) + ' Cy = ' + str(cy)
    # print str(i) + ' Sum = ' + str(cx+cy)

    # ellipse = cv2.fitEllipse(cnt)
    # cv2.ellipse(contour_img, ellipse, (0, 255, 0), 2)
    hull = cv2.convexHull(cnt)
    # epsilon = 0.001 * cv2.arcLength(cnt, True)
    # cnt = cv2.approxPolyDP(cnt, epsilon, True)
    height, width, channels = img_bgr8_copy.shape
    contour_img = img_bgr8_copy.copy()
    contour_img_clean = img_bgr8_copy.copy()
    cv2.drawContours(contour_img, cnt, -1, (0, 255, 0), 3)
    cv2.drawContours(contour_img_clean, cnt, -1, (0, 255, 0), 3)

    points = cv2.minEnclosingTriangle(hull)
    # print points
    points00 = points[1][0][0][0]
    points01 = points[1][0][0][1]
    points10 = points[1][1][0][0]
    points11 = points[1][1][0][1]
    points20 = points[1][2][0][0]
    points21 = points[1][2][0][1]
    a_new = np.array([(points00, points01), (points10, points11), (points20, points21)])
    a_new_new = a_new.copy()
    a_old = average_points
    print 'a_new'
    print a_new
    print 'a_old'
    print a_old
    # print len(a_old)
    for i2 in range(3):
        indice_min = -1
        dist_deb = 99999999
        for i in range(3):  # for each point in a_new compare with the first point in the vector we have
            dist = np.linalg.norm(a_new[i]-a_old[i2])
            if dist < dist_deb:
                dist_deb = dist
                indice_min = i
        #     print 'dist ' + str(i)
        #     print dist
        # print indice_min
        a_new_new[i2] = a_new[indice_min].copy()
        a_new[indice_min] = 9999999
    print 'A new new'
    print a_new_new


    global pointsIndex
    global lastpoints
    pointsIndex += 1
    global NUMBER_LAST_POINTS
    if pointsIndex >= NUMBER_LAST_POINTS:
        pointsIndex = 0
    lastpoints[pointsIndex] = a_new_new.copy()
    print 'All last!'
    # print lastpoints
    # print lastpoints
    #
    # # print lastpoints[pointsIndex]
    # print 'Points'

    global average_points
    # avg_point = np.array([(0, 0), (0, 0), (0, 0)])
    for i in range(NUMBER_LAST_POINTS):
        average_points = average_points + lastpoints[i]
    print 'avg'
    average_points = average_points/(NUMBER_LAST_POINTS+1)
    # print avg_point

    #
    # Mom = cv2.moments(hull)
    # print Mom
    # cx = int(Mom['m10'] / Mom['m00'])
    # cy = int(Mom['m01'] / Mom['m00'])
    # # summ = cx + cy
    # print str(i) + ' Cx = ' + str(cx)
    # print str(i) + ' Cy = ' + str(cy)
    # print str(i) + ' Sum = ' + str(cx+cy)

    # avg_point = avg_point/3
    # print 'Avg = '
    # print avg_point
    # print avg_point
    # print avg_point

    # print type(avg_point)

    # lastpoints[pointsIndex]
    # global pointsIndex
    # global lastpoints
    # # print points
    # if pointsIndex > NB_DEPTH_IMGS:
    #     pointsIndex = 0
    # lastpoints[pointsIndex] = np.copy(points)
    # pointsIndex += 1
    # # creates an image which is the average of the last ones
    # points_avg = np.copy(lastpoints[0])
    # # cnt_avg = np.array(0)
    # for i in range(1, NB_DEPTH_IMGS):
    #     points_avg += lastpoints[i]
    # print NB_DEPTH_IMGS
    # points_avg /= NB_DEPTH_IMGS
    # points = points_avg

    p0 = average_points[0]
    p1 = average_points[1]
    p2 = average_points[2]
    dist0 = np.linalg.norm(p0 - p1)
    dist1 = np.linalg.norm(p1 - p2)
    dist2 = np.linalg.norm(p0 - p2)
    print 'Dist 0'
    print dist0
    print 'Dist 1'
    print dist1
    print 'Dist 2'
    print dist2
    dist_arr = [dist0, dist1, dist2]
    maxx = np.argmax(dist_arr)
    dist_arr2 = copy.copy(dist_arr)
    dist_arr2[maxx] = -9999
    print dist_arr
    print dist_arr2
    maxx2 = np.argmax(dist_arr2)
    print 'Abs = '
    print abs(dist_arr[maxx]-dist_arr[maxx2])
    max_abs = abs(dist_arr[maxx]-dist_arr[maxx2])

    minn = np.argmin(dist_arr)
    dist_arr2 = copy.copy(dist_arr)
    dist_arr2[minn] = 99999
    # print dist_arr
    # print dist_arr2
    minn2 = np.argmin(dist_arr2)
    print 'Abs = '
    print abs(dist_arr[minn]-dist_arr[minn2])
    min_abs = abs(dist_arr[minn]-dist_arr[minn2])

    if min_abs > max_abs*3:
        maxx = np.argmin(dist_arr)
    elif max_abs > min_abs*3:
        maxx = np.argmax(dist_arr)
    else:
        maxx = np.argmin(dist_arr)

    # print minn
    if maxx == 0:
        # print p0
        # print p1
        point_zica = [p0, p1]
        dist_arr[0] = 0
    if maxx == 1:
        # print p1
        # print p2
        point_zica = [p1, p2]
        dist_arr[1] = 0
    if maxx == 2:
        # print p0
        # print p2
        point_zica = [p0, p2]
        dist_arr[2] = 0

    # maxx = np.argmax(dist_arr)
    # if maxx == 0:
    #     # print p0
    #     # print p1
    #     point_zica2 = [p0, p1]
    #     dist_arr[0] = 0
    # if maxx == 1:
    #     # print p1
    #     # print p2
    #     point_zica2 = [p1, p2]
    #     dist_arr[1] = 0
    # if maxx == 2:
    #     # print p0
    #     # print p2
    #     point_zica2 = [p0, p2]
    #     dist_arr[2] = 0

    # print '\n'
    # print 'Dist0 = ' + str(dist0)
    # print 'Dist1 = ' + str(dist1)
    # print 'Dist2 = ' + str(dist2)
    # cv2.polylines(contour_img, hull, True, (255, 0, 0), 3)
    # cv2.drawContours(contour_img,hull,-1,(255,0,0))
    cv2.polylines(contour_img, np.int32([hull]), True, 255)
    # cv2.polylines(contour_img, np.int32([points[1]]), True, 255)
    # average_points += 30
    contour_img = cv2.copyMakeBorder(contour_img, 30, 30, 30, 30, cv2.BORDER_CONSTANT)

    cv2.drawContours(contour_img, np.int32([point_zica]), -1, (0,255,0), offset=(30,30))
    cv2.drawContours(contour_img, np.int32([points[1]]), -1, (255,0,0), offset=(30,30))
    cv2.drawContours(contour_img, np.int32([average_points]), -1, (0,0,255), offset=(30,30))

    # cv2.imshow('HULL', contour_img)
    # Mom = cv2.moments(hull)
    # cx = int(Mom['m10'] / Mom['m00'])
    # cy = int(Mom['m01'] / Mom['m00'])
    # summ = cx + cy
    # print str(i) + ' Cx = ' + str(cx)
    # print str(i) + ' Cy = ' + str(cy)
    # print str(i) + ' Sum = ' + str(summ)
    # ellipse = cv2.fitEllipse(cnt)
    # cv2.ellipse(contour_img, ellipse, (0, 255, 0), 2)
    # get rotated rect of contour and split into components
    # center, size, angle = cv2.minAreaRect(cnt)
    # box = cv2.cv.BoxPoints(cv2.minAreaRect(cnt))
    # box = np.int32(box)
    x, y, width, height = cv2.boundingRect(cnt)
    contour_img_box = contour_img.copy()
    # cv2.rectangle(contour_img_box, (x, y), (x + width, y + height), (0, 255, 0), 2)
    # cv2.drawContours(contour_img, [box], 0, (0, 0, 255), 2)
    # cv2.drawContours(contour_img, [box2], 0, (0, 0, 255), 2)
    resized_cnt = cv2.resize(contour_img_box, (256, 256))
    cv2.imshow('Contour', resized_cnt)
    cv2.waitKey(1)
    cropped_bgr8 = img_bgr8_copy[y:y + height, x:x + width]
    contour_img_cropped = contour_img
    resized_cnt = cv2.resize(cropped_bgr8, (256, 256))

    cv2.imshow('zica', resized_cnt)
    # if height > 1.2*width:
    #     rotmat = cv2.getRotationMatrix2D((height / 2.0, width / 2.0), 90, 1.0)
    #     roi = cv2.warpAffine(roi, rotmat, (height, width), flags=cv2.INTER_LINEAR)  # INTER_CUBIC
    # cv2.imshow('uprightRect', roi)
    # std_length = 80
    n_bin = 4  # number of orientations for the HoG
    b_size = 2  # block size
    c_size = 2  # cell size
    cropped_gray = cv2.cvtColor(cropped_bgr8, cv2.COLOR_BGR2GRAY)
    resized_cnt = cv2.resize(cropped_gray, (256, 256))

    cv2.imshow('CropGray', resized_cnt)
    fd, hog_image = hog(cropped_gray, orientations=n_bin, pixels_per_cell=(c_size, c_size),
                        cells_per_block=(b_size / c_size, b_size / c_size), visualise=True, normalise=True)
    hog_image = exposure.rescale_intensity(hog_image, in_range=(0, 4))
    # cv2.imshow('HOGG', hog_image)
    features_hog = fd
    features_hog = np.reshape(features_hog, (np.shape(features_hog)[0]/4, 4))
    sum_features = sum(features_hog)
    # print np.shape(features_hog)
    # print sum_features
    # # print np.sum(sum_features)
    # print np.argmax(sum_features)
    # rows, cols = contour_img_cropped.shape[:2]
    # [vx, vy, x, y] = cv2.fitLine(cnt,cv2.cv.CV_DIST_L2, 0, 0.01, 0.01)
    # lefty = int((-x * vy / vx) + y)
    # righty = int(((cols - x) * vy / vx) + y)
    # cv2.line(contour_img_cropped, (cols - 1, righty), (0, lefty), (0, 255, 0), 2)
    height, width = cropped_gray.shape
    cropped_gray = cv2.copyMakeBorder(cropped_gray, 30, 30, 30, 30, cv2.BORDER_CONSTANT)
    resized_cnt = cv2.resize(cropped_gray, (256, 256))

    cv2.imshow('CropGray', resized_cnt)
    # cropped_gray_thres = np.where(cropped_gray > 170, cropped_gray, 0)
    # cv2.imshow('CropThre',cropped_gray_thres)
    # print blank
    resized_cnt = cv2.resize(contour_img_cropped, (256, 256))

    cv2.imshow('AAA', resized_cnt)
    contour_img_cropped = np.where(contour_img_cropped[:, :, 1] == 255, contour_img_cropped[:, :, 1], 0)

    contour_img_cropped = np.where(contour_img_clean[:, :, 1] == 255, contour_img_clean[:, :, 1], 0)
    resized_cnt = cv2.resize(contour_img_cropped, (256, 256))

    cv2.imshow('BBBB', resized_cnt)
    sum1 = list()
    sum1.append(contour_img_cropped[0:height / 2, 0:width / 2])
    sum1.append(contour_img_cropped[height / 2:height, 0:width / 2])
    sum1.append(contour_img_cropped[0:height / 2, width / 2:width])
    sum1.append(contour_img_cropped[height / 2:height, width / 2:width])
    # cv2.imshow('a', sum1[0])
    # cv2.imshow('b', sum1[1])
    # cv2.imshow('c', sum1[2])
    # cv2.imshow('d', sum1[3])
    sum_tot = [0, 0, 0, 0]
    #
    # for i in range(4):
    #     # std_length = 80
    #     n_bin = 4  # number of orientations for the HoG
    #     b_size = 64  # block size
    #     c_size = 8  # cell size
    #     # cropped_cnt_gray = cv2.cvtColor(contour_img_cropped, cv2.COLOR_BGR2GRAY)
    #     fd, hog_image = hog(contour_img_cropped, orientations=n_bin, pixels_per_cell=(c_size, c_size),
    #                         cells_per_block=(b_size / c_size, b_size / c_size), visualise=True)
    #     hog_image = exposure.rescale_intensity(hog_image, in_range=(0, 4))
    #     # cv2.imshow('HOGG', hog_image)
    #     features_hog = fd
    #     features_hog = np.reshape(features_hog, (np.shape(features_hog)[0]/4, 4))
    #     sum_features = sum(features_hog)
    #     # print np.shape(features_hog)
    #     print i
    #     # print min(sum_features)
    #     print sum_features
        # print np.sum(sum_features)
        # print np.argmax(sum_features)
    #     sum1 = list()
    #     sum1.append(contour_img_cropped[0:height / 2, 0:width / 2])
    #     sum1.append(contour_img_cropped[height / 2:height, 0:width / 2])
    #     sum1.append(contour_img_cropped[0:height / 2, width / 2:width])
    #     sum1.append(contour_img_cropped[height / 2:height, width / 2:width])
    #     sum_index = 0
    #     highest_first = 0
    #     highest_first_index = 0
    #     summ = 0
    #     # for sum_ in sum1:
    #     summ = 0
    #     height, width = contour_img_cropped.shape
    #     for index_lin, lin in enumerate(contour_img_cropped):
    #         # print lin
    #         # print index_lin
    #         index_col = 0
    #         for col in lin:
    #             if col == 255:
    #                 summ = summ + index_col ** 2
    #             index_col += 1
    #             # print summ
    #             # sum_lin = sum_[:, :]
    #             # first_255 = 0
    #             # for index, point in enumerate(sum_lin):
    #             #     if point == 255:
    #             #         sum_tot[sum_index] = sum_tot[sum_index] + index
    #             #         first_255 = index
    #             #
    #             # print ' First = ' + str(first_255)
    #             # if highest_first < first_255:
    #             #     highest_first = first_255
    #             #     highest_first_index = sum_index
    # #             # sum_index += 1
    #     rows, cols = contour_img_cropped.shape
    #     M = cv2.getRotationMatrix2D((cols / 2, rows / 2), 90, 1)
    #     contour_img_cropped = cv2.warpAffine(contour_img_cropped, M, (cols, rows))
    # #     # print 'Stage ' + str(i)
    #     sum_tot[i] = summ
    # print sum_tot
    # print sum_tot.index((min(sum_tot)))
    #
    # rows, cols, d = cropped_bgr8.shape
    # M = cv2.getRotationMatrix2D((cols / 2, rows / 2), sum_tot.index((min(sum_tot))) * 90, 1)
    # cropped_bgr8 = cv2.warpAffine(cropped_bgr8, M, (cols, rows))
    # print cv2.
    cv2.imshow('Sent', cv2.resize(cropped_bgr8, (256, 256)))
Ejemplo n.º 20
0
def find_homograpy_points(img_src):
    #todo: Split this guy out into functions
    wk_img = img_src.copy()

    gray = cv2.cvtColor(wk_img, cv2.COLOR_BGR2GRAY)
    bw_img = get_bw_img(image=gray, threshold=140)

    clean_img = remove_noise(bw_img)

    zero_count = np.count_nonzero(clean_img)
    #print "Zero-Count:", zero_count

    no_lava_pass_1 = np.zeros_like(clean_img)
    kernel = np.ones((5, 5), np.uint8)
    dilation = cv2.dilate(clean_img, kernel, iterations=3)
    im2, contours, hierarchy = cv2.findContours(dilation, cv2.RETR_TREE,
                                                cv2.CHAIN_APPROX_SIMPLE)
    off_edge = [
        contour for contour in contours
        if np.count_nonzero(contour) == contour.shape[0] * contour.shape[1] *
        contour.shape[2]
    ]
    cv2.drawContours(no_lava_pass_1, off_edge, -1, (255, 255, 255), cv2.FILLED)

    no_lava_pass_2 = np.zeros_like(no_lava_pass_1)
    flipped = cv2.flip(no_lava_pass_1, 0)
    kernel = np.ones((5, 5), np.uint8)
    dilation = cv2.dilate(flipped, kernel, iterations=5)
    im2, contours, hierarchy = cv2.findContours(dilation, cv2.RETR_TREE,
                                                cv2.CHAIN_APPROX_SIMPLE)
    off_edge2 = [
        contour for contour in contours
        if np.count_nonzero(contour) == contour.shape[0] * contour.shape[1] *
        contour.shape[2]
    ]
    big_contour = max(contours, key=cv2.contourArea)
    cv2.drawContours(no_lava_pass_2, [big_contour], -1, (255, 255, 255), 3)
    edges = cv2.flip(no_lava_pass_2, 0)

    #edges = cv2.Canny(no_lava,1,1)
    #cv2.imshow('debug', edges)

    xs, ys = np.where(edges > 0)
    edgePts = np.array(zip(ys, xs))

    rect = cv2.minAreaRect(edgePts)
    box = cv2.boxPoints(rect)
    box = np.int0(box)

    # # convert box points to tuple
    rect_bot_left = tuple(box[0])
    rect_top_left = tuple(box[1])
    rect_top_right = tuple(box[2])
    rect_bot_right = tuple(box[3])

    # Fit triangle around edgepoints
    a, triangle = cv2.minEnclosingTriangle(np.array([edgePts]))

    # get proper points of the triangle...
    tri_t = triangle[0]
    tri_l = triangle[0]
    tri_r = triangle[0]

    for i in range(len(triangle)):
        if triangle[i][0][1] < tri_t[0][1]:
            tri_t = triangle[i]

        if triangle[i][0][0] < tri_l[0][0]:
            tri_l = triangle[i]

        if triangle[i][0][0] > tri_r[0][0]:
            tri_r = triangle[i]

    # Convert the points to tuple
    tri_bot_right = tuple(tri_r[0])
    tri_top = tuple(tri_t[0])
    tri_bot_left = tuple(tri_l[0])

    rect_top = line(rect_top_left, rect_top_right)
    tri_left = line(tri_bot_left, tri_top)
    tri_right = line(tri_bot_right, tri_top)

    l_intersection = find_intersection(rect_top, tri_left)
    r_intersection = find_intersection(rect_top, tri_right)

    homography_points = np.array([
        (int(tri_bot_left[0]), int(tri_bot_left[1])),
        (int(tri_bot_right[0]), int(tri_bot_right[1])),
        (int(r_intersection[0]), int(r_intersection[1])),
        (int(l_intersection[0]), int(l_intersection[1]))
    ])

    # Preview
    disp = working_img.copy()
    cv2.drawContours(disp, [box], 0, (255, 0, 255), 2)
    img = cv2.line(disp, tri_bot_left, tri_top, (255, 255, 0), 2)
    img = cv2.line(disp, tri_top, tri_bot_right, (255, 255, 0), 2)
    img = cv2.line(disp, tri_bot_right, tri_bot_left, (255, 255, 0), 2)
    img = cv2.polylines(disp, [homography_points], True, (255, 0, 0), 2)
    # cv2.imshow('edges', edges)
    cv2.imshow('Mapping', disp)

    return homography_points
Ejemplo n.º 21
0
    def recon(self, na='0', ti=0, CP=False, datt=[], tr=[]):
        self.posiciones = []
        for i in range(0, 64):
            self.posiciones.append(False)
        name = 'CP' + na + '.jpg'
        ##        im= self.captura(name)
        im = cv2.imread(name)
        self.plt_show(im)
        ##        print('in encuadre')
        en = self.encuadre(im, CP=CP)
        ##        print('out encuadre')
        self.plt_show(en)
        ##        sen='en'+na+'.jpg'
        ##        cv2.imwrite(sen,en)
        grw = cv2.cvtColor(en, cv2.COLOR_BGR2GRAY)

        #thresh de otsu
        ret, th = cv2.threshold(grw, 0, 255,
                                cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        self.plt_show(th)
        sth = th.shape
        sth = th.shape
        back = np.ones(sth) * 255
        # se extraen los contornos
        _, cont, _ = cv2.findContours(th.copy(), cv2.RETR_LIST,
                                      cv2.CHAIN_APPROX_NONE)
        ##        print(len(cont))
        q = 0
        cc = []
        humm = []
        icon = self.print_cuad(back)
        ico = icon
        board = np.ones((8, 8)) * 0
        pboard = board.copy()
        cboard = np.ones((8, 8)) * 5
        for c in cont:
            ar = cv2.contourArea(c)
            if 700 < ar < 5000:
                q += 1
                ihu = []
                hum = cv2.HuMoments(cv2.moments(c)).flatten()
                for h in range(len(hum)):
                    nh = 1 / hum[h]
                    nh = round(float(nh), 2)
                    ihu.append(nh)
                peri = cv2.arcLength(c, True)
                f1 = ar / peri
                ihu.append(f1)
                att, _ = cv2.minEnclosingTriangle(c)
                ihu.append(att / 5000)
                if 4 < ihu[0] < 6:

                    ##                    print((ar,ihu))
                    cx, cy, w, h = cv2.boundingRect(c)
                    cx += w / 2
                    cx1 = cx + (w / 8)
                    cx2 = cx - (w / 8)
                    cy += h / 2
                    cy1 = cy + (h / 8)
                    cy2 = cy - (h / 8)
                    cx = int(cx) + 10
                    cy = int(cy) - 10
                    px = self.ubicar(cx, self.xf)
                    py = self.ubicar(cy, self.yf)
                    ind = px + (8 * py)
                    self.posiciones[ind] = (cx, cy)
                    pieza = self.mlp.check(ihu)
                    tr.append(pieza)

                    board[py, px] = pieza
                    pboard[py, px] = 1
                    ##                    print((cx,cy,cx1,cx2,cy1,cy2))
                    cl1 = ((th[round(cy1), round(cx)] / 255) +
                           (th[round(cy1), round(cx1)] / 255) +
                           (th[round(cy1), round(cx2)] / 255)) / 3
                    cl = ((th[round(cy), round(cx)] / 255) +
                          (th[round(cy), round(cx1)] / 255) +
                          (th[round(cy), round(cx2)] / 255)) / 3
                    cl2 = ((th[round(cy2), round(cx)] / 255) +
                           (th[round(cy2), round(cx1)] / 255) +
                           (th[round(cy2), round(cx2)] / 255)) / 3
                    cor = (cl + cl1 + cl2) / 3
                    if cor > 0.5:
                        color = 1
                    else:
                        color = 0
                    cboard[py, px] = color
                    datt.append(np.asarray(ihu))
                    cc.append(c)
                    ico = cv2.drawContours(ico, cc, -1, (00, 120, 120))
                    if int(na) < 400:
                        self.plt_show(ico)
                    print(((att), pieza))
                    inp = input('1=P ; 2=R ; 3=N ;4=B; 5=Q ; 6=K')
                    if inp == '':
                        tr.append(int(pieza))
                    else:
                        tr.append(int(inp))

        icon = cv2.drawContours(icon, cc, -1, (00, 120, 120))
        icc = self.print_cuad(icon, color=(0, 0, 0))
        ecc = self.print_cuad(en, color=(0, 0, 0))
        cif = self.show_coor(en, self.posiciones)
        self.plt_show(cif)
        self.plt_show(icc)
        #print_cuad(xf,yf,icon)
        ##        return(pboard,board,cboard,icc,datt,tr)
        print(board)
        print(pboard)
        print(cboard)
        ##        return(pboard,board,cboard,icc)
        return (pboard, board, cboard, icc, datt, tr)
Ejemplo n.º 22
0
import numpy
import cv2
import random

contour = numpy.array([[[0, 20]], [[80, 0]], [[100, 100]],
                       [[0, 100]]])  #make a fake array

triangle = cv2.minEnclosingTriangle(contour)

print(triangle)

# drawing = numpy.zeros([100, 100],numpy.uint8)
# cv2.drawContours(drawing,[triangle],0,(255,255,255),2)

# cv2.imshow('output',drawing)
cv2.waitKey(0)
Ejemplo n.º 23
0
def triangStats(img, noHoles = False, minPercArea = 0.1):


    imggray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    ret, imbw = cv2.threshold(imggray, 10, 255, 0)
    _, contours, _ = cv2.findContours(imbw, 1, 2)

    maxArea = 0;
    Ax = Ay = Bx = By = Cx = Cy = 0
    areaCnt = 0
    maxCnt = None
    idx = -1
    for cnt in contours:
        idx += 1
        retval, triangle = cv2.minEnclosingTriangle(cnt)
        if (triangle is None):
            continue
        areaCnt = cv2.contourArea(cnt)
        if (areaCnt <= maxArea):
            continue
        maxArea = areaCnt
        maxCnt = idx
        Ax = triangle[0][0][0]
        Ay = triangle[0][0][1]

        Bx = triangle[1][0][0]
        By = triangle[1][0][1]

        Cx = triangle[2][0][0]
        Cy = triangle[2][0][1]

    if (maxArea <= minPercArea * imggray.shape[0] * imggray.shape[1]):
        return False, None, None, None, None
    v1x = 0
    v1y = 0
    v2x = 0
    v2y = 0
    v3x = 0
    v3y = 0
    imgCnt = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)
    mask = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)
    cv2.drawContours(mask, contours, maxCnt, color=(255, 255, 255), thickness=cv2.FILLED)

    color = [0, 0, 0]
    contActivePixels = 0
    valret = True
    for i in range(mask.shape[0]):
        for j in range(mask.shape[1]):
            if (mask[i, j, 0] == 255 and mask[i, j, 1] == 255 and mask[i, j, 2] == 255):
                if(img[i, j, 0] != 0 or img[i, j, 1] != 0 or img[i, j, 2] != 0):
                    contActivePixels+=1
                if (color[0] == 0 and color[1] == 0 and color[2] == 0):
                    color[0] = int(img[i][j][0])
                    color[1] = int(img[i][j][1])
                    color[2] = int(img[i][j][2])
                else:
                    if (img[i][j][0] != color[0] or img[i][j][1] != color[1] or img[i][j][2] != color[2]):
                        if (noHoles or (img[i][j][0] != 0 or img[i][j][1] != 0 or img[i][j][2] != 0)):
                            valret = False

    if(valret == False):
        return False, None, None, None, None

    cv2.drawContours(imgCnt, contours, maxCnt, color=color, thickness=cv2.FILLED)

    if (Cy < By and Cy < Ay):
        v1y = Cy
        v1x = Cx
        if (Ax < Bx):
            v2x = Ax
            v2y = Ay
            v3x = Bx
            v3y = By
        else:
            v2x = Bx
            v2y = By
            v3x = Ax
            v3y = Ay
    elif (By < Cy and By < Ay):
        v1y = By
        v1x = Bx
        if (Ax < Cx):
            v2x = Ax
            v2y = Ay
            v3x = Cx
            v3y = Cy
        else:
            v2x = Cx
            v2y = Cy
            v3x = Ax
            v3y = Ay
    else:
        v1y = Ay
        v1x = Ax
        if (Bx < Cx):
            v2x = Bx
            v2y = By
            v3x = Cx
            v3y = Cy
        else:
            v2x = Cx
            v2y = Cy
            v3x = Bx
            v3y = By

    # (x,y),radius = cv2.minEnclosingCircle(cnt)
    triangleArea = abs((v2x * (v1y - v3y) + v1x * (v3y - v2y) + v3x * (v2y - v1y)) / 2)
    # print(f"({v1x},{v1y}) ({v2x},{v2y}) ({v3x},{v3y}) {maxArea} {triangleArea}")
    # a=input('pare')
    # center = (int(x),int(y))
    # radius = int(radius)
    # cv2.circle(img,center,radius,(255,255,0),2)

    #desc = [maxArea / triangleArea, 0 if v3y - v1y == 0 else (v2y - v1y) / (v3y - v1y),
            #1 if v1x - v2x > 0 and v3x - v1x > 0 else 0, np.rad2deg(np.arctan( abs(v3y-v2y) / (v3x - v2x)))]
    if triangleArea == 0 or (v3x - v2x) == 0:
        return False, None, None, None, None
    desc = [contActivePixels/triangleArea, np.rad2deg(np.arctan(abs(v3y - v2y) / (v3x - v2x))), 1 if v1x - v2x > 0 and v3x - v1x > 0 else 0 ]
    return True, np.array([desc]),contActivePixels/(imggray.shape[0] * imggray.shape[1]), imgCnt, color
    [box],
    -1,
    (0, 255, 0),
    3
)

(x, y), radius = cv2.minEnclosingCircle(contr)
cv2.circle(
    img,
    (int(x), int(y)),
    int(radius),
    (255, 0, 0),
    2
)

ret, tri = cv2.minEnclosingTriangle(contr)
cv2.polylines(
    img,
    [np.int32(tri)],
    True,
    (255, 0, 255), 2
)

ellipse = cv2.fitEllipse(contr)
cv2.ellipse(
    img,
    ellipse,
    (0, 255, 255),
    3,
)
Ejemplo n.º 25
0
import cv2
import numpy as np
import matplotlib.pyplot as plt

points = np.array([[[1, 1]], [[5, 10]], [[5, 1]], [[1, 10]], [[2, 5]]],
                  np.float32)
print(points.dtype)
print(points.shape)
# 最小外包三角形
for i in range(points.shape[0]):
    plt.scatter(points[i, 0, 0], points[i, 0, 1])

area, triangle = cv2.minEnclosingTriangle(points)

print(area)
print(triangle)
print(triangle.shape)
plt.plot((triangle[0, 0, 0], triangle[1, 0, 0]),
         (triangle[0, 0, 1], triangle[1, 0, 1]))
plt.plot((triangle[1, 0, 0], triangle[2, 0, 0]),
         (triangle[1, 0, 1], triangle[2, 0, 1]))
plt.plot((triangle[2, 0, 0], triangle[0, 0, 0]),
         (triangle[2, 0, 1], triangle[0, 0, 1]))
plt.show()
Ejemplo n.º 26
0
 def minEnclosingTriangle(self):
     min_triangle = cv2.minEnclosingTriangle(self)
     return vContour(min_triangle)
Ejemplo n.º 27
0
import cv2

o = cv2.imread("cc.bmp")
cv2.imshow("original", o)
gray = cv2.cvtColor(o, cv2.COLOR_BGR2GRAY)
ret, binary = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(binary, cv2.RETR_LIST,
                                       cv2.CHAIN_APPROX_SIMPLE)
area, trgl = cv2.minEnclosingTriangle(contours[0])
print("area=", area)
print("trgl=", trgl)
for i in range(0, 3):
    p1 = tuple(int(i) for i in trgl[i][0])
    p2 = tuple(int(i) for i in trgl[(i + 1) % 3][0])
    print("===========", i)
    print(p1)
    print(p2)
    cv2.line(o, p1, p2, (2555, 255, 255), 2)

cv2.imshow("result", o)
cv2.waitKey()
cv2.destroyAllWindows()
def objects_detector(img):
    global average_points
    # just taking the last image instead of all
    img_cp = copy.copy(img)
    # 1-Convert to HSV space in order to let pass only the vivid colors
    hsv = cv2.cvtColor(img_cp, cv2.COLOR_RGB2HSV)
    # define the values range in which to filter
    hh = 255
    hl = 0
    sh = 255
    sl = 100  # filter the white color background
    vh = 255
    vl = 0
    lowerbound = np.array([hl, sl, vl], np.uint8)
    upperbound = np.array([hh, sh, vh], np.uint8)
    # 2-filter the image to generate the mask
    filtered_hsv = cv2.inRange(hsv, lowerbound, upperbound)
    filtered_hsv = cv2.bitwise_and(hsv, hsv, mask=filtered_hsv)
    filtered_hsv_s = cv2.resize(filtered_hsv, (256, 256))
    cv2.imshow('HSV-Filtering', filtered_hsv_s)
    cv2.waitKey(1)
    # convert the image to grayscale in order to find contours
    img_bgr = cv2.cvtColor(filtered_hsv, cv2.COLOR_HSV2BGR)
    img_gray = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)
    kernel = np.ones((3, 3), np.uint8)
    # 3-dilate image in order to merge areas which were separated by filtering
    img_gray_dilated = cv2.dilate(img_gray.copy(), kernel, iterations=1)
    img_gray_bf = cv2.resize(img_gray_dilated, (256, 256))
    cv2.imshow('Gray-dilated', img_gray_bf)
    cv2.waitKey(1)
    img_gray = img_gray_dilated
    img_gray_copy = img_gray.copy()
    im2, contours, hierarchy = cv2.findContours(img_gray_copy.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    # Find the index of the largest contour
    if not contours:
        print 'No contours found =('
        return
    areas = [cv2.contourArea(c) for c in contours]
    max_index = np.argmax(areas)
    # 4-get the biggest contour found
    cnt = contours[max_index]
    # 5-Approximate it by a polygon allowing only a small (0.1%) change in lenght in order to reduce high frequency noise
    epsilon = 0.001 * cv2.arcLength(cnt, True)
    cnt = cv2.approxPolyDP(cnt, epsilon, True)
    '''Here the ideia is to rotate the image in order to always have sum(cx-cy) the lowest where cx and cy are the
    moments of area of the contour in question. It does not work well, this is the reason it is commented'''
    # for i in range(4):
    #     # find the image contours
    #     im2, contours, hierarchy = cv2.findContours(img_gray_copy.copy(), 1, cv2.CHAIN_APPROX_NONE)
    #     # Find the index of the largest contour
    #     if not contours:
    #         print 'No contours found =('
    #         return
    #     areas = [cv2.contourArea(c) for c in contours]
    #     max_index = np.argmax(areas)
    #     cnt = contours[max_index]
    #     x, y, width, height = cv2.boundingRect(cnt)
    #     cv2.min
    #     hull = cv2.convexHull(cnt)
    #
    #     Mom = cv2.moments(hull)
    #     cx = int(Mom['m10'] / Mom['m00'])**2
    #     cy = int(Mom['m01'] / Mom['m00'])**2
    #     summ = cx-cy
    #     print str(i) + ' Cx = ' + str(cx)
    #     print str(i) + ' Cy = ' + str(cy)
    #     print str(i) + ' Sum = ' + str(cx-cy)
    #     if lowest < summ:
    #         lowest = summ
    #         final_cont = cnt
    #         img_gray = img_gray_copy
    #     rows, cols = img_gray_copy.shape
    #     M = cv2.getRotationMatrix2D((cols / 2, rows / 2), 90, 1)
    #     img_gray_copy = cv2.warpAffine(img_gray_copy, M, (cols, rows))
    #     print '\n'
    # cv2.imshow('GOLDEN!',img_gray)

    # 6-taking the convex hull enclosing the contour in order to further reduce noise propagation from small changes in
    # the image from one frame to the other
    hull = cv2.convexHull(cnt)
    contour_img = img_cp.copy()
    contour_img_clean = img_cp.copy()
    cv2.drawContours(contour_img, cnt, -1, (0, 255, 0), 3)
    cv2.drawContours(contour_img_clean, cnt, -1, (0, 255, 0), 3)

    points = cv2.minEnclosingTriangle(hull)
    # 7-Getting triangle x y coordinates
    points00 = points[1][0][0][0]
    points01 = points[1][0][0][1]
    points10 = points[1][1][0][0]
    points11 = points[1][1][0][1]
    points20 = points[1][2][0][0]
    points21 = points[1][2][0][1]
    # a_new = new points as given by the cv2 function
    # a_new_new =  a_new points reordered to match previous recorded points
    a_new = np.array([(points00, points01), (points10, points11), (points20, points21)])
    a_new_new = a_new.copy()
    a_old = average_points
    ''' Ordering the triangle points so we can compare with the previous recored points because they come in any order.'''
    # 8- order the triangle in a way that the distance between each of the new points and the old point is the shortest
    for i2 in range(3):
        indice_min = -1
        dist_deb = 99999999
        for i in range(3):  # for each point in a_new compare with the first point in the vector we have
            dist = np.linalg.norm(a_new[i]-a_old[i2])
            if dist < dist_deb:
                dist_deb = dist
                indice_min = i
        a_new_new[i2] = a_new[indice_min].copy()
        a_new[indice_min] = 9999999
    ''' Saving current points in the average '''
    global pointsIndex
    global lastpoints
    pointsIndex += 1
    global NUMBER_LAST_POINTS
    if pointsIndex >= NUMBER_LAST_POINTS:
        pointsIndex = 0
    lastpoints[pointsIndex] = a_new_new.copy()
    for i in range(NUMBER_LAST_POINTS):
        average_points = average_points + lastpoints[i]
    # 9- Saving points in history and recalculating the average
    average_points /= NUMBER_LAST_POINTS + 1

    ''' Getting either the shortest triangle side or longest (which ever makes it most different from the other two)'''
    p0 = average_points[0]
    p1 = average_points[1]
    p2 = average_points[2]
    # 10- Get the length of the triangle sides
    dist0 = np.linalg.norm(p0 - p1)
    dist1 = np.linalg.norm(p1 - p2)
    dist2 = np.linalg.norm(p0 - p2)
    print 'Dist 0'
    print dist0
    print 'Dist 1'
    print dist1
    print 'Dist 2'
    print dist2
    # getting the two longest sides
    dist_arr = [dist0, dist1, dist2]
    maxx = np.argmax(dist_arr)
    dist_arr2 = copy.copy(dist_arr)
    dist_arr2[maxx] = -9999
    print dist_arr
    print dist_arr2
    maxx2 = np.argmax(dist_arr2)
    print 'Abs = '
    print abs(dist_arr[maxx]-dist_arr[maxx2])
    max_abs = abs(dist_arr[maxx]-dist_arr[maxx2])
    # getting the two shortest sides
    minn = np.argmin(dist_arr)
    dist_arr2 = copy.copy(dist_arr)
    dist_arr2[minn] = 99999
    minn2 = np.argmin(dist_arr2)
    print 'Abs = '
    print abs(dist_arr[minn]-dist_arr[minn2])
    min_abs = abs(dist_arr[minn]-dist_arr[minn2])
    # get the difference between the two longest and the two shortest
    # if the difference of one two longest if 3 times bigger than the difference of the two shortests, use the longest
    # and vice versa, else use the shortest
    if min_abs > max_abs*3:
        maxx = np.argmin(dist_arr)
    elif max_abs > min_abs*3:
        maxx = np.argmax(dist_arr)
    else:
        maxx = np.argmin(dist_arr)

    if maxx == 0:
        # print p0
        # print p1
        point_zica = [p0, p1]
        dist_arr[0] = 0
    if maxx == 1:
        # print p1
        # print p2
        point_zica = [p1, p2]
        dist_arr[1] = 0
    if maxx == 2:
        # print p0
        # print p2
        point_zica = [p0, p2]
        dist_arr[2] = 0
    print ('Zica = ' + str(point_zica))
    cv2.polylines(contour_img, np.int32([hull]), True, 255)
    contour_img = cv2.copyMakeBorder(contour_img, 30, 30, 30, 30, cv2.BORDER_CONSTANT)
    '''Drawing the most different side which would be used to get a good orientation'''
    # drawing the chosen side
    cv2.drawContours(contour_img, np.int32([point_zica]), -1, (255,0,0), offset=(30,30), thickness=3)
    # the next line draws the 'instant' triangles
    cv2.drawContours(contour_img, np.int32([points[1]]), -1, (255,0,0), offset=(30,30))
    # drawing the averaged triangle
    cv2.drawContours(contour_img, np.int32([average_points]), -1, (0,0,255), offset=(30,30))
    contour_img_box = contour_img.copy()
    resized_cnt = cv2.resize(contour_img_box, (256, 256))
    cv2.imshow('Contour', resized_cnt)
    cv2.waitKey(1)
    # getting only the contour itself and drawing it separately
    contour_img_cropped = np.where(contour_img_clean[:, :, 1] == 255, contour_img_clean[:, :, 1], 0)
    resized_cnt = cv2.resize(contour_img_cropped, (256, 256))
    cv2.imshow('OnlyContour', resized_cnt)
Ejemplo n.º 29
0
#fig.show()

rows, cols = img.shape[:2]
for cnt in arrow.filter_contours_output:
    [vx, vy, x, y] = cv2.fitLine(cnt, cv2.DIST_L2, 0, 0.01, 0.01)
    lefty = int((-x * vy / vx) + y)
    righty = int(((cols - x) * vy / vx) + y)
    #cv2.line(img,(cols-1,righty),(0,lefty),(0,255,0),2)
    x, y, w, h = cv2.boundingRect(cnt)
    cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
    rect = cv2.minAreaRect(cnt)
    box = cv2.boxPoints(rect)
    box = np.int0(box)
    cv2.drawContours(img, [box], 0, (0, 0, 255), 2)
    # leftmost = tuple(cnt[cnt[:,:,0].argmin()][0])
    # rightmost = tuple(cnt[cnt[:,:,0].argmax()][0])
    # topmost = tuple(cnt[cnt[:,:,1].argmin()][0])
    # bottommost = tuple(cnt[cnt[:,:,1].argmax()][0])

    #min enclosing triagle
    retval, triangle = cv2.minEnclosingTriangle(cnt)
    #formatting hell
    Tlist = triangle.tolist()
    Tlist_flat = [item for sublist in Tlist for item in sublist]
    #should be good now
    cv2.circle(img, tuple([int(i) for i in Tlist_flat[0]]), 1, (255, 0, 0), 10)
    cv2.circle(img, tuple([int(i) for i in Tlist_flat[1]]), 1, (255, 0, 0), 10)
    cv2.circle(img, tuple([int(i) for i in Tlist_flat[2]]), 1, (255, 0, 0), 10)
cv2.imshow('image', img)

cv2.waitKey(0)
 def get_minimumenclosingtriangle(self, index):
     cnt = self.get_contour(index)
     if cnt is None: return None
     res, (p1, p2, p3) = cv2.minEnclosingTriangle(cnt)
     if not res: return None
     return p1, p2, p3
Ejemplo n.º 31
0
print(w)
ws = 5
gray = cv.cvtColor(img, cv.COLOR_RGB2GRAY)  #把输入图像灰度化
#直接阈值化是对输入的单通道矩阵逐像素进行阈值分割。
ret, binary = cv.threshold(gray, 0, 255, cv.THRESH_BINARY | cv.THRESH_TRIANGLE)
image_median = cv.medianBlur(binary, 7)
kernel = np.ones((7, 7), np.uint8)
dilation = cv.dilate(image_median, kernel, iterations=1)
#cv.imshow('img1',dilation)
image, contours, hierarchy = cv.findContours(dilation, cv.RETR_TREE,
                                             cv.CHAIN_APPROX_SIMPLE)
#cv.imshow('img',image)
for i in range(len(contours)):
    area = cv.contourArea(contours[i])  # 处理掉小的轮廓区域,这个区域的大小自己定义。
    if (area < 5000): continue
    triangle = cv.minEnclosingTriangle(contours[i])
    points = triangle[1].reshape(3, 2)  #trinagle[1] 三个顶点的坐标 3,1,2的数组
    print(points)
    for j in range(3):
        img = cv.line(img, (points[j][0], points[j][1]),
                      (points[(j + 1) % 3][0], points[(j + 1) % 3][1]),
                      (255, 0, 0), 2)
    longth = []
    angles = 0
    for k in range(3):
        longth.append(
            math.sqrt((points[k][0] - points[(k + 1) % 3][0]) *
                      (points[k][0] - points[(k + 1) % 3][0]) +
                      (points[k][1] - points[(k + 1) % 3][1]) *
                      (points[k][1] - points[(k + 1) % 3][1])))
    index = 0
Ejemplo n.º 32
0
    def detect(self, image):
        output = image.copy()
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        #cv2.imshow("Original gray", gray)

        #lap = cv2.Laplacian(gray, cv2.CV_64F)
        #lap = np.uint8(np.absolute(lap))
        #cv2.imshow("Laplacian", lap)

        #sobelX = cv2.Sobel(gray, cv2.CV_64F, 1, 0)
        #sobelY = cv2.Sobel(gray, cv2.CV_64F, 0, 1)
        #sobelX = np.uint8(np.absolute(sobelX))
        #sobelY = np.uint8(np.absolute(sobelY))
        #sobelCombined = cv2.bitwise_or(sobelX, sobelY)
        #cv2.imshow("Sobel X", sobelX)
        #cv2.imshow("Sobel Y", sobelY)
        #cv2.imshow("Sobel Combined", sobelCombined)

        blur = cv2.GaussianBlur(gray, (5, 5), 0)
        blur = cv2.medianBlur(blur, 5)
        #cv2.imshow("blur", blur)

        canny = cv2.Canny(blur, 30, 150)
        #cv2.imshow("Canny", canny)

        ######
        # apply GuassianBlur to reduce noise. medianBlur is also added for smoothening, reducing noise.
        # Adaptive Guassian Threshold is to detect sharp edges in the Image. For more information Google it.
        thresh = cv2.adaptiveThreshold(canny, 255,
                                       cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                       cv2.THRESH_BINARY, 11, 9)
        #cv2.imshow("thresh", thresh)

        kernel = np.ones((5, 5), np.uint8)
        thresh = cv2.erode(thresh, kernel, iterations=1)
        thresh = cv2.dilate(thresh, kernel, iterations=1)
        img_size = thresh.shape
        #cv2.imshow("thresh", thresh)

        #resized = imutils.resize(canny, width=300)
        #ratio = canny.shape[0] / float(resized.shape[0])
        ratio = 1

        (_, canny_cnts, _) = cv2.findContours(canny.copy(), cv2.RETR_EXTERNAL,
                                              cv2.CHAIN_APPROX_SIMPLE)
        (_, thresh_cnts, _) = cv2.findContours(thresh.copy(),
                                               cv2.RETR_EXTERNAL,
                                               cv2.CHAIN_APPROX_SIMPLE)
        contours = image.copy()
        #print("I count {} contours in this image".format(len(thresh_cnts)))
        #cv2.drawContours(contours, thresh_cnts, -1, (0, 255, 0), 2)
        #cv2.imshow("all contours", contours)

        #sd = ShapeDetector()

        # not accurate enough
        canny_enable = 0

        thresh_enable = 1

        canny_found = 0
        thresh_found = 0

        min_area = 100
        low_threshold = 0.8
        high_threshold = 1.0 / low_threshold

        # future work
        # implement a univeral poly area
        # detect overlapped poly
        # divide complex poly into triangles

        if canny_enable:
            for c in canny_cnts:
                M = cv2.moments(c)
                area = cv2.contourArea(c)
                peri = cv2.arcLength(c, True)
                approx = cv2.approxPolyDP(c, 0.04 * peri, True)
                #shape = sd.detect(c)
                if (area > min_area):
                    if (len(approx) == 3):
                        triangle = cv2.minEnclosingTriangle(c)
                        D1 = dist.euclidean(
                            (triangle[1][0][0][0], triangle[1][0][0][1]),
                            (triangle[1][1][0][0], triangle[1][1][0][1]))
                        D2 = dist.euclidean(
                            (triangle[1][1][0][0], triangle[1][1][0][1]),
                            (triangle[1][2][0][0], triangle[1][2][0][1]))
                        D3 = dist.euclidean(
                            (triangle[1][2][0][0], triangle[1][2][0][1]),
                            (triangle[1][0][0][0], triangle[1][0][0][1]))
                        p = (D1 + D2 + D3) / 2
                        area_calc = math.sqrt(p * (p - D1) * (p - D2) *
                                              (p - D3))
                        if (area / area_calc > low_threshold
                                and area / area_calc < high_threshold):
                            for i in range(0, 3):
                                cv2.line(contours, (triangle[1][i][0][0],
                                                    triangle[1][i][0][1]),
                                         (triangle[1][(i + 1) % 3][0][0],
                                          triangle[1][(i + 1) % 3][0][1]),
                                         (0, 255, 0), 2)
                            canny_found += 1
                    elif (len(approx) == 4):
                        rect = cv2.minAreaRect(c)
                        box = cv2.boxPoints(rect)
                        box = np.int0(box)
                        D1 = dist.euclidean((box[0][0], box[0][1]),
                                            (box[1][0], box[1][1]))
                        D2 = dist.euclidean((box[1][0], box[1][1]),
                                            (box[2][0], box[2][1]))
                        area_calc = D1 * D2
                        if (area / area_calc > low_threshold
                                and area / area_calc < high_threshold):
                            cv2.drawContours(contours, [box], 0, (0, 255, 0),
                                             4)
                            canny_found += 1
                    elif (len(approx) == 5):
                        # do nothing
                        pass
                    else:
                        #cX = int((M["m10"] / M["m00"]) * ratio)
                        #cY = int((M["m01"] / M["m00"]) * ratio)
                        #text = str(M["m00"])
                        #cv2.putText(contours, text, (cX, cY), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
                        (x, y), radius = cv2.minEnclosingCircle(c)
                        center = (int(x), int(y))
                        radius = int(radius)
                        area_calc = radius * radius * math.pi
                        if (area / area_calc > low_threshold
                                and area / area_calc < high_threshold):
                            cv2.rectangle(contours,
                                          (center[0] - 5, center[1] - 5),
                                          (center[0] + 5, center[1] + 5),
                                          (0, 128, 255), -1)
                            cv2.circle(contours, center, radius, (0, 255, 0),
                                       4)
                            canny_found += 1

        if thresh_enable:
            for c in thresh_cnts:
                M = cv2.moments(c)
                area = cv2.contourArea(c)
                peri = cv2.arcLength(c, True)
                approx = cv2.approxPolyDP(c, 0.04 * peri, True)
                #shape = sd.detect(c)
                if (area > min_area):
                    if (len(approx) == 3):
                        triangle = cv2.minEnclosingTriangle(c)
                        D1 = dist.euclidean(
                            (triangle[1][0][0][0], triangle[1][0][0][1]),
                            (triangle[1][1][0][0], triangle[1][1][0][1]))
                        D2 = dist.euclidean(
                            (triangle[1][1][0][0], triangle[1][1][0][1]),
                            (triangle[1][2][0][0], triangle[1][2][0][1]))
                        D3 = dist.euclidean(
                            (triangle[1][2][0][0], triangle[1][2][0][1]),
                            (triangle[1][0][0][0], triangle[1][0][0][1]))
                        p = (D1 + D2 + D3) / 2
                        area_calc = math.sqrt(p * (p - D1) * (p - D2) *
                                              (p - D3))
                        if (area / area_calc > low_threshold
                                and area / area_calc < high_threshold):
                            for i in range(0, 3):
                                cv2.line(contours, (triangle[1][i][0][0],
                                                    triangle[1][i][0][1]),
                                         (triangle[1][(i + 1) % 3][0][0],
                                          triangle[1][(i + 1) % 3][0][1]),
                                         (0, 255, 0), 2)
                            thresh_found += 1
                    elif (len(approx) == 4):
                        rect = cv2.minAreaRect(c)
                        box = cv2.boxPoints(rect)
                        box = np.int0(box)
                        D1 = dist.euclidean((box[0][0], box[0][1]),
                                            (box[1][0], box[1][1]))
                        D2 = dist.euclidean((box[1][0], box[1][1]),
                                            (box[2][0], box[2][1]))
                        area_calc = D1 * D2
                        if (area / area_calc > low_threshold
                                and area / area_calc < high_threshold):
                            cv2.drawContours(contours, [box], 0, (0, 255, 0),
                                             4)
                            thresh_found += 1
                    elif (len(approx) == 5):
                        # do nothing
                        pass
                    else:
                        #cX = int((M["m10"] / M["m00"]) * ratio)
                        #cY = int((M["m01"] / M["m00"]) * ratio)
                        #text = str(M["m00"])
                        #cv2.putText(contours, text, (cX, cY), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
                        (x, y), radius = cv2.minEnclosingCircle(c)
                        center = (int(x), int(y))
                        radius = int(radius)
                        area_calc = radius * radius * math.pi
                        if (area / area_calc > low_threshold
                                and area / area_calc < high_threshold):
                            cv2.rectangle(contours,
                                          (center[0] - 5, center[1] - 5),
                                          (center[0] + 5, center[1] + 5),
                                          (0, 128, 255), -1)
                            cv2.circle(contours, center, radius, (0, 255, 0),
                                       4)
                            thresh_found += 1

        if (canny_found or thresh_found):
            timestr = time.strftime("%Y%m%d-%H%M%S")
            #name = "frame%s.jpg"%timestr
            #cv2.imwrite(name, contours)

        #cv2.imshow("contours", contours)
        return contours