Пример #1
0
def get_target_features(hsv_img, mask=None):
    h, s, v = cv2x.get_flat_hsv_channels(hsv_img, mask)
    s = s / 255.0
    v = v / 255.0
    color_features = get_color_features(hsv_img, mask=mask)

    feature_names = []
    values = []

    # add region features first
    feature_names.append('region_area')
    values.append(len(h))

    feature_names.append('region_saturation_mean')
    values.append(np.mean(s))

    feature_names.append('region_saturation_variance')
    values.append(np.var(s))

    feature_names.append('region_value_mean')
    values.append(np.mean(v))

    feature_names.append('region_value_variance')
    values.append(np.var(v))

    if mask is not None:
        contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL,
                                       cv2.CHAIN_APPROX_SIMPLE)

        # get smallest bounding rectangle (rotated)
        box = cv2.minAreaRect(contours[0])
        cnt_w, cnt_h = box[1]

        region_eccentricity = cnt_w / cnt_h
        if region_eccentricity > 1:
            region_eccentricity = 1.0 / region_eccentricity

        peri = cv2.arcLength(contours[0], True)

        # calculate circularity as (4 * pi * area) / perimeter ^ 2
        region_circularity = (4 * np.pi * len(h)) / float(peri)**2

        # calculate convexity as convex hull perimeter / contour perimeter
        hull = cv2.convexHull(contours[0])
        region_convexity = cv2.arcLength(hull, True) / peri

        feature_names.append('region_eccentricity')
        values.append(region_eccentricity)

        feature_names.append('region_circularity')
        values.append(region_circularity)

        feature_names.append('region_convexity')
        values.append(region_convexity)

    for color, features in color_features.items():
        for feature, value in sorted(features.items()):
            feature_str = '%s (%s)' % (feature, color)
            feature_names.append(feature_str)
            values.append(value)

    target_features = pd.Series(values, index=feature_names)

    return target_features.sort_index()
Пример #2
0
def get_color_features(hsv_img, mask=None):
    """
    Takes an hsv image and returns a custom set of features useful for machine learning
    :param hsv_img: np.array with color scheme hsv
    :param mask: np.array list that contains the line segments of the thing being described
    :return: dictionary of features for machine learning
    """
    c_prof = get_color_profile(hsv_img, mask)

    if mask is not None:
        tot_px = np.sum(mask > 0)
    else:
        tot_px = hsv_img.shape[0] * hsv_img.shape[1]

    # corner to corner distance used to normalize sub-contour distance metrics
    diag_distance = cv2x.calculate_distance(0, 0, hsv_img.shape[0],
                                            hsv_img.shape[1])

    color_features = {}

    for color in HSV_RANGES.keys():
        color_percent = float(c_prof[color]) / tot_px

        # create color mask & apply it
        color_mask = create_color_mask(hsv_img, [color])

        # apply user specified mask
        if mask is not None:
            color_mask = cv2.bitwise_and(color_mask, color_mask, mask=mask)

        ret, thresh = cv2.threshold(color_mask, 1, 255, cv2.THRESH_BINARY)

        # To properly calculate the metrics, any "complex" contours with
        # holes (i.e., those with a contour hierarchy) need to be reconstructed
        # with their hierarchy. The OpenCV hierarchy scheme is a 4 column
        # NumPy array with the following convention:
        #
        #   [Next, Previous, First_Child, Parent]
        #
        #   Next: Index of next contour at the same hierarchical level
        #   Previous: Index of previous contour at the same hierarchical level
        #   First_Child: Index of the parent's first child contour
        #   Parent: Index of the parent contour
        #
        # If any of these cases do not apply then the value -1 is used. The
        # following pseudo-code covers the different cases:
        #
        #   If 'Parent' == -1:
        #     the index is a root parent contour
        #
        #     If 'First_Child' == -1:
        #       the index has no children
        #     If 'Next' == -1:
        #       there are no more root-level parent contours left
        #
        #   Else If 'Parent' > -1:
        #     the index is a child contour w/ 'Parent' value as it's parent
        #
        #     If 'Next' & 'First_Child' == -1:
        #       the child has no further siblings or children, it's a "leaf"
        #
        # This can all get quite complex, with grand-children, and further
        # nesting. Any root parent is an external contour, 1st level children
        # are then the boundary of inner holes, the root parent's grandchildren
        # would be the outer boundary of a nested contour, etc.
        #
        # However, we only want to consider connected contours, meaning those
        # external boundaries and their direct boundaries for any holes. Any
        # grandchildren we want to consider as being at the root level. For
        # this, OpenCV has a retrieval method 'RETR_CCOMP', where only these
        # 2 levels are used
        contours, hierarchy = cv2.findContours(thresh, cv2.RETR_CCOMP,
                                               cv2.CHAIN_APPROX_SIMPLE)

        cent_list = []
        area_list = []
        peri_list = []
        har_list = []

        if len(contours) == 0:
            color_features[color] = {
                'percent': color_percent,
                'contour_count': len(cent_list),
                'distance_mean': 0.0,
                'distance_variance': 0.0,
                'area_mean': 0.0,
                'area_variance': 0.0,
                'perimeter_mean': 0.0,
                'perimeter_variance': 0.0,
                'har_mean': 0.0,
                'har_variance': 0.0,
                'largest_contour_area': 0.0,
                'largest_contour_saturation_mean': 0.0,
                'largest_contour_saturation_variance': 0.0,
                'largest_contour_value_mean': 0.0,
                'largest_contour_value_variance': 0.0,
                'largest_contour_eccentricity': 0.0,
                'largest_contour_circularity': 0.0,
                'largest_contour_convexity': 0.0,
                'largest_contour_har': 0.0
            }
            continue

        # If contour count is > 0, then we have hierarchies,
        # get all the parent contour indices
        parent_contour_indices = np.where(hierarchy[:, :, 3] == -1)[1]

        largest_contour_area = 0.0
        largest_contour_true_area = 0.0
        largest_contour_peri = 0.0
        largest_contour_har = 0.0
        largest_contour = None
        largest_contour_idx = None

        for c_idx in parent_contour_indices:
            c_mask = np.zeros(hsv_img.shape[0:2], dtype=np.uint8)
            cv2.drawContours(c_mask,
                             contours,
                             c_idx,
                             255,
                             -1,
                             hierarchy=hierarchy)

            true_area = np.sum(c_mask > 0)

            # contour may be a single point or a line, which has no area
            # we'll also ignore "noise" of anything 4 pixels or less
            # also if the contour is only a point or line, it has no area
            # ignore 'noise' of any sub-contours that are <0.1% of total area
            if true_area <= 8 or len(contours[c_idx]) <= 3:
                continue

            area = true_area / float(tot_px)
            peri = cv2.arcLength(contours[c_idx], True)
            try:
                m = cv2.moments(contours[c_idx])
                centroid_x = m['m10'] / m['m00']
                centroid_y = m['m01'] / m['m00']
            except ZeroDivisionError:
                centroid_x = contours[c_idx][:, :, 0].mean()
                centroid_y = contours[c_idx][:, :, 1].mean()

            # re-draw contour without holes
            cv2.drawContours(c_mask, contours, c_idx, 255, -1)
            filled_area = np.sum(c_mask > 0)
            hole_area_ratio = (filled_area - true_area) / float(filled_area)

            if area > largest_contour_area:
                largest_contour_area = area
                largest_contour_true_area = true_area
                largest_contour_peri = peri
                largest_contour_har = hole_area_ratio
                largest_contour = contours[c_idx]
                largest_contour_idx = c_idx

            cent_list.append((centroid_x, centroid_y))
            area_list.append(area)
            peri_list.append(peri)
            har_list.append(hole_area_ratio)

        if len(cent_list) <= 1:
            pair_dist = [0.0]
        else:
            pair_dist = pdist(np.array(cent_list)) / diag_distance

        dist_mean = np.mean(pair_dist)
        dist_var = np.var(pair_dist)

        if len(area_list) == 0:
            area_mean = 0.0
            area_var = 0.0
        else:
            area_mean = np.mean(area_list)
            area_var = np.var(area_list)

        if len(peri_list) == 0:
            peri_mean = 0.0
            peri_var = 0.0
        else:
            peri_mean = np.mean(peri_list)
            peri_var = np.var(peri_list)

        if len(har_list) == 0:
            har_mean = 0.0
            har_var = 0.0
        else:
            har_mean = np.mean(har_list)
            har_var = np.var(har_list)

        largest_contour_eccentricity = 0.0
        largest_contour_circularity = 0.0
        largest_contour_convexity = 0.0
        largest_contour_sat_mean = 0.0
        largest_contour_sat_var = 0.0
        largest_contour_val_mean = 0.0
        largest_contour_val_var = 0.0

        if largest_contour_true_area >= 0.0 and largest_contour is not None:
            lc_mask = np.zeros(hsv_img.shape[0:2], dtype=np.uint8)
            cv2.drawContours(lc_mask,
                             contours,
                             largest_contour_idx,
                             255,
                             cv2.FILLED,
                             hierarchy=hierarchy)
            lc_h, lc_s, lc_v = cv2x.get_flat_hsv_channels(hsv_img, mask)
            lc_s = lc_s / 255.0
            lc_v = lc_v / 255.0

            largest_contour_sat_mean = np.mean(lc_s)
            largest_contour_sat_var = np.var(lc_s)
            largest_contour_val_mean = np.mean(lc_v)
            largest_contour_val_var = np.mean(lc_v)

            # get smallest bounding rectangle (rotated)
            box = cv2.minAreaRect(largest_contour)
            cnt_w, cnt_h = box[1]

            largest_contour_eccentricity = cnt_w / cnt_h
            if largest_contour_eccentricity > 1:
                largest_contour_eccentricity = 1.0 / largest_contour_eccentricity

            # calculate circularity as (4 * pi * area) / perimeter ^ 2
            largest_contour_circularity = (
                4 * np.pi *
                largest_contour_true_area) / float(largest_contour_peri)**2

            # calculate convexity as convex hull perimeter / contour perimeter
            hull = cv2.convexHull(largest_contour)
            largest_contour_convexity = cv2.arcLength(
                hull, True) / largest_contour_peri

        color_features[color] = {
            'percent': color_percent,
            'contour_count': len(cent_list),
            'distance_mean': dist_mean,
            'distance_variance': dist_var,
            'area_mean': area_mean,
            'area_variance': area_var,
            'perimeter_mean': peri_mean,
            'perimeter_variance': peri_var,
            'har_mean': har_mean,
            'har_variance': har_var,
            'largest_contour_area': largest_contour_area,
            'largest_contour_saturation_mean': largest_contour_sat_mean,
            'largest_contour_saturation_variance': largest_contour_sat_var,
            'largest_contour_value_mean': largest_contour_val_mean,
            'largest_contour_value_variance': largest_contour_val_var,
            'largest_contour_eccentricity': largest_contour_eccentricity,
            'largest_contour_circularity': largest_contour_circularity,
            'largest_contour_convexity': largest_contour_convexity,
            'largest_contour_har': largest_contour_har
        }

    return color_features
def get_cropped_pic(img_path, min_area, offsetWidth, offsetHeight,
                    show_binary_image, show_original_image, model):
    """
    @Params
    min_area: if the detected contour is smaller than this, it shouldn't be counted.
    offsetWidth and Height: how many pixes do you want to add to the bounding box.
    show xxx: show the image in opencv.
    @Returns
    1. results_array: all the cropped images
    2. image: original image.
    """
    results = []
    position = []
    origin_img = cv2.imread(img_path)
    img = cv2.cvtColor(origin_img, cv2.COLOR_BGR2GRAY)
    h, w = img.shape

    # use filter to make the image blur.
    gray = cv2.bilateralFilter(img, 9, 75, 75)
    # threshold image, so it's easier to be detect contour.
    _, threshold_img = cv2.threshold(gray, 55, 255, cv2.THRESH_BINARY_INV)
    # show the image which is preprocessed
    if show_binary_image:
        cv2.imshow('Gray image', cv2.resize(threshold_img, (w // 2, h // 2)))
        cv2.waitKey(0)
        cv2.destroyAllWindows()
    # find contours and get the external one
    contours, _ = cv2.findContours(threshold_img, cv2.RETR_EXTERNAL,
                                   cv2.CHAIN_APPROX_SIMPLE)
    # counter of contours
    index = 0
    for c in contours:
        # get the min area rect
        rect = cv2.minAreaRect(c)

        # get the width and height of minRec
        width = int(rect[1][0])
        height = int(rect[1][1])
        # make sure the area of rectangle is big enough
        # test if the contour is a bolt:
        if width * height > min_area:
            _, (length, height3), angle3 = rect
            if height3 > length:
                length = height3
            length = length / 6.6

            rect_with_offset = make_rect_bigger(rect, offsetWidth,
                                                offsetHeight)
            box = cv2.boxPoints(rect_with_offset)
            # cv2.putText(origin_img, str(index), tuple(box[1]), cv2.FONT_HERSHEY_SIMPLEX,
            #             5, (0, 255, 0), 2, cv2.LINE_AA)

            # convert all coordinates floating point values to int
            box = np.int0(box)
            src_pts = box.astype("float32")

            # get the new width and height
            width = int(rect_with_offset[1][0])
            height = int(rect_with_offset[1][1])
            # corrdinate of the points in box points after the rectangle has been straightened
            dst_pts = np.array(
                [[0, height], [0, 0], [width, 0], [width, height]],
                dtype="float32")

            # the perspective transformation matrix
            M = cv2.getPerspectiveTransform(src_pts, dst_pts)
            # directly warp the rotated rectangle to get the straightened rectangle
            warped = cv2.warpPerspective(img,
                                         M, (width, height),
                                         borderMode=cv2.BORDER_CONSTANT,
                                         borderValue=(170, 170, 170))

            temp_name = config.TEMP_PATH + str(
                datetime.now().strftime('%Y%m%d-%H%M%S')) + '.jpg'
            cv2.imwrite(temp_name, warped)

            if model_predict(temp_name, model) == 0:
                bolt_name = config.BOLT_PATH + str(
                    datetime.now().strftime('%Y%m%d-%H%M%S')) + '.jpg'
                cv2.imwrite(bolt_name, warped)
                # get the box points from original rectangle
                box = cv2.boxPoints(rect)
                # convert all coordinates floating point values to int
                box = np.int0(box)
                src_pts = box.astype("float32")
                # get the new width and height
                width = int(rect[1][0])
                height = int(rect[1][1])
                # coordinate of the points in box points after the rectangle has been straightened
                dst_pts = np.array(
                    [[0, height], [0, 0], [width, 0], [width, height]],
                    dtype='float32')
                # the perspective transformation matrix
                M = cv2.getPerspectiveTransform(src_pts, dst_pts)
                # directly warp the rotated rectangle to get the straightened rectangle
                warped = cv2.warpPerspective(img,
                                             M, (width, height),
                                             borderMode=cv2.BORDER_CONSTANT,
                                             borderValue=(170, 170, 170))
                # get the center and angle.
                angle, center, evec1, evec2, eval = get_orientation(
                    c, origin_img)
                # use angle but not radian.
                angle1 = angle * 180.0 / pi
                grasp_point = []
                # from the rect center to the centroid.
                angle2 = atan2(center[1] - int(rect[0][1]),
                               center[0] - int(rect[0][0])) * 180.0 / pi
                direction_point = (center[0] + 0.02 * evec1 * eval,
                                   center[1] + 0.02 * evec2 * eval)
                real_angle = angle1
                # the angle between two vector should smaller than 90, otherwise we had a reversed direction.
                if min((360.0 - abs(angle1 - angle2)),
                       abs(angle1 - angle2)) > 90.0:
                    real_angle = angle1 + 180.0
                    if real_angle > 180.0:
                        real_angle = real_angle - 360.0
                    direction_point = (center[0] - 0.02 * evec1 * eval,
                                       center[1] - 0.02 * evec2 * eval)
                draw_axis(origin_img, center, direction_point, (0, 255, 0), 1)
                # grasp_point.append((center[0] - 58) / 5.76)
                # grasp_point.append((center[1] - 111) / 5.63)
                # cv2.arrowedLine(origin_img, (int(rect[0][0]), int(rect[0][1])), center, (0, 0, 255),
                #                 thickness=1, line_type=8, shift=0, tipLength=5)

                results.append(bolt_name)
                # cv2.imshow("output", cv2.resize(output, (800,800)))
                # cv2.waitKey(0)
                # cv2.destroyAllWindows()
                # draw a green 'nghien' rectangle
                # cv2.drawContours(origin_img, [box], 0, (0, 255, 0))
                index += 1

                # calculate head point
                length = max(height, width)
                angle_radians = radians(real_angle)
                xLen = cos(angle_radians) * length / 2 * 0.7
                yLen = sin(angle_radians) * length / 2 * 0.7
                head = (int(center[0] + xLen), int(center[1] + yLen))
                cv2.circle(origin_img, head, 2, (0, 255, 0), thickness=5)

                real_angle = degrees(real_angle) % 360

                position.append([real_angle, center, length, head])

            else:
                cv2.imwrite(
                    config.NOT_BOLT_PATH +
                    str(datetime.now().strftime('%Y%m%d-%H%M%S')) + '.jpg',
                    warped)

    # draw contours of the image
    cv2.drawContours(origin_img, contours, -1, (0, 0, 255), 1)
    # check the image
    if show_original_image:
        cv2.imshow("contours", cv2.resize(origin_img, (w // 2, h // 2)))
        cv2.waitKey(0)
        cv2.destroyAllWindows()
    return position, results
Пример #4
0
    def getFaceBox(self, expand=False):
        if (self.isUpdated):
            rotation_vector, translation_vector, eulerAngle = self.currentData
            im = self.im
            points = self.currentPoints.astype(np.int)
            all_marks = self.currentLandmark.parts()
            #all_marks = np.array(all_marks)
            all_points = np.empty((0, 2), dtype="int")

            for mark in all_marks:
                #print(mark)
                all_points = np.append(all_points, [[mark.x, mark.y]], axis=0)

            rect = cv2.minAreaRect(all_points)
            box = cv2.boxPoints(rect)
            box = np.int0(box)
            #print("box: ",box)
            print("deg: ", rect[2])
            deg = rect[2]
            if (rect[2] < -45):
                box_tmp = np.empty((4, 2), dtype="int")
                box_tmp[0] = box[1]
                box_tmp[1] = box[2]
                box_tmp[2] = box[3]
                box_tmp[3] = box[0]
                box = box_tmp
                print(box)

            if (rect[2] < -45):
                height = rect[1][0]
                width = rect[1][1]
            else:
                width = rect[1][0]
                height = rect[1][1]

            if (expand):
                height = height * 3 / 2
                print(height, width)

                exp_point1, exp_point2 = self.expandWidth(
                    box[0], box[3], 20, deg)
                exp_point3, exp_point4 = self.expandHeight(
                    exp_point1, exp_point2, height, deg)
                box_tmp = np.empty((4, 2), int)
                box_tmp[0] = exp_point1
                box_tmp[1:3] = [exp_point3, exp_point4]
                box_tmp[3] = exp_point2
                box = box_tmp

            steady_box = []
            box = box.flatten()

            for i in range(8):
                stb = self.boxStabilizers[i]
                stb.update([box[i]])
                steady_box.append(stb.state[0])

            steady_box = np.int0(np.reshape(steady_box, (4, 2)))
            return True, steady_box
        else:
            return False, None
    gray = cv2.inRange(hsv, lowerColor,
                       upperColor)  # Generate mask from color range

    # Find countours in image with OpenCV
    cnts = cv2.findContours(gray.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)

    # Draw/print contour information
    for c in cnts:
        x, y, w, h = cv2.boundingRect(c)  # get the bounding box
        area = w * h  # calculate bounding box area
        center = (int(x + w / 2), int(y + h / 2))  # calculate center

        # skip contours with too small area
        if (w * h) < cv2.getTrackbarPos('M Area', 'Targeting Controls'):
            continue

        cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0),
                      1)  # draw green bounding box

        cv2.circle(img, center, 2, (255, 0, 0), 2)  # draw blue centerpoint

        rect = cv2.minAreaRect(c)  # get the min area rectangle
        box = cv2.boxPoints(rect)  # extract box points for drawing
        cv2.drawContours(img, [np.int0(box)], 0,
                         (0, 0, 255))  # draw red min area rectangle

    cv2.imshow('Test Image', img)

cv2.destroyAllWindows()
Пример #6
0
def sentences_segmentate(img):
    # Converting to Gray
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # Using Sobel Edge Detection to Generate Binary
    sobel = cv2.Sobel(gray, cv2.CV_8U, 1, 0, ksize=3)
    # Two valued
    ret, binary = cv2.threshold(sobel, 0, 255,
                                cv2.THRESH_OTSU + cv2.THRESH_BINARY)

    # Expansion and Corrosion
    element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (30, 9))
    element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (24, 6))

    # Expansion once to make the outline stand out
    dilation = cv2.dilate(binary, element2, iterations=1)

    # Corrode once, remove details
    erosion = cv2.erode(dilation, element1, iterations=1)

    # Expansion again to make the outline more visible
    dilation2 = cv2.dilate(erosion, element2, iterations=2)

    # Finding Outlines and Screening Text Areas
    region = []
    contours, hierarchy = cv2.findContours(dilation2, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)

    for i in range(len(contours)):
        cnt = contours[i]

        # Calculate contour area and screen out small areas
        area = cv2.contourArea(cnt)
        if (area < 1000):
            continue

        # Find the smallest rectangle
        rect = cv2.minAreaRect(cnt)
        #print("Rect: ", rect)

        # Box is the coordinate of four points
        box = cv2.boxPoints(rect)
        box = np.int0(box)

        # Computing height and width
        height = abs(box[0][1] - box[2][1])
        width = abs(box[0][0] - box[2][0])

        # According to the characteristics of the text, select those too thin rectangles, leaving flat ones.
        if (height > width * 1.3):
            continue

        region.append(box)

    # Segmentate into images
    img_output_path = 'temp/out_sentences/'
    i = 0
    for box in region:
        x, y, w, h = cv2.boundingRect(box)
        ROI = img[y:y + h, x:x + w]

        path = os.path.join(img_output_path, '{}.jpg'.format(i))
        cv2.imwrite(path, ROI)
        i += 1

    cv2.drawContours(img, region, -1, (0, 255, 0), 3)
Пример #7
0
imx = pl.zeros(mask.shape)
filters.sobel(mask,1,imx)
imy = pl.zeros(mask.shape)
filters.sobel(mask,0,imy)
magnitude = pl.sqrt(imx**2+imy**2)

#conversion from colored to grayscale
magnitude = Image.fromarray(magnitude)
magnitude = magnitude.convert('L')
magnitude = pl.array(magnitude)

#finding contours
contours,heirarchy = cv2.findContours(magnitude, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)

#getting the points of minimum area rectangle around particular contours
rect = cv2.minAreaRect(contours[8])
box1 = cv2.boxPoints(rect)
box1 = pl.int0(box1)

rect = cv2.minAreaRect(contours[14])
box2 = cv2.boxPoints(rect)
box2 = pl.int0(box2)

#combining required points of both contours to form a bigger rectangle
box = [list(box1[0]),list(box1[1]),list(box2[2]),list(box2[3])]
box = pl.array(box)
cv2.drawContours(image,[box],0,(0,255,0),1)

#showing image
cv2.imshow("Image",image)
cv2.waitKey(0)
Пример #8
0
img2 = cv2.GaussianBlur(img, (3, 3), 0)
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
img2 = cv2.Sobel(img2, cv2.CV_8U, 1, 0, ksize=3)
_, img2 = cv2.threshold(img2, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
element = cv2.getStructuringElement(shape=cv2.MORPH_RECT, ksize=(17, 3))
morph_img_threshold = img2.copy()
cv2.morphologyEx(src=img2,
                 op=cv2.MORPH_CLOSE,
                 kernel=element,
                 dst=morph_img_threshold)
num_contours, hierarchy = cv2.findContours(morph_img_threshold,
                                           mode=cv2.RETR_EXTERNAL,
                                           method=cv2.CHAIN_APPROX_NONE)
cv2.drawContours(img2, num_contours, -1, (0, 255, 0), 1)
for i, cnt in enumerate(num_contours):
    min_rect = cv2.minAreaRect(cnt)
    if ratio_and_rotation(min_rect):
        x, y, w, h = cv2.boundingRect(cnt)
        plate_img = img[y:y + h, x:x + w]
        print("Number  identified number plate...")
        cv2.imshow("num plate image", plate_img)
        if cv2.waitKey(0) & 0xff == ord('q'):
            pass
        if (isMaxWhite(plate_img)):
            clean_plate, rect = clean2_plate(plate_img)
            if rect:
                fg = 0
                x1, y1, w1, h1 = rect
                x, y, w, h = x + x1, y + y1, w1, h1
                # cv2.imwrite("clena.png",clean_plate)
                plate_im = Image.fromarray(clean_plate)
Пример #9
0
                                          minArea=100, filter=4,
                                          cThr=[50,50],draw = False)
 if len(conts) != 0:
     for obj in conts2:
         imgContour = imgContours2.copy()
         imgBlur = cv2.GaussianBlur(imgContours2, (9, 9), 1)
         imgGray = cv2.cvtColor(imgBlur, cv2.COLOR_BGR2GRAY)
         edged = cv2.Canny(imgBlur, 50, 100)
         edged = cv2.dilate(edged, None, iterations=1)
         edged = cv2.erode(edged, None, iterations=1)
         cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
         cnts = imutils.grab_contours(cnts)
         (cnts, _) = contours.sort_contours(cnts)
         cnts = [x for x in cnts if cv2.contourArea(x) > 100]
         ref_object = cnts[0]
         box = cv2.minAreaRect(ref_object)
         box = cv2.boxPoints(box)
         box = np.array(box, dtype="int")
         box = perspective.order_points(box)
         (tl, tr, br, bl) = box
         dist_in_pixel = euclidean(tl, tr)
         dist_in_cm = 25
         pixel_per_cm = dist_in_pixel/dist_in_cm
         pixel_per_cm = dist_in_pixel/dist_in_cm
         threshold1 = 200
         threshold2 = 2
         imgCanny = cv2.Canny(imgGray,threshold1,threshold2)
         kernel = np.ones((5, 5))
         imgDil = cv2.dilate(imgCanny, kernel, iterations=1)
         geContours(imgDil,imgContour)
         for cnt in cnts:
Пример #10
0
    canny = cv.Canny(gray, 0, 20)
    dilated = cv.dilate(canny, (1, 1), iterations=1)
    eroded = cv.erode(dilated, (1, 1), iterations=1)
    # detects contours and draws them in green onto blank canvas
    contours, hierarchies = cv.findContours(
        canny, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)
    cv.drawContours(blank, contours, -1, (0, 255, 0), 1)
    cnt = contours[0]
    # defines bounding rectangle around crosshair and prints width and height of box
    x, y, w, h = cv.boundingRect(cnt)
    # print('Width = ' + str(w) + ' pixels')
    # print('Height = ' + str(h) + ' pixels')
    # calculates size and thickness from bounding box
    size = h / 2
    thickness = w/1.5
    if thickness < np.floor(thickness) + .5: 
        thickness = np.floor(thickness)
    elif thickness < np.ceil(thickness):
        thickness = np.floor(thickness) + .5
    print('Size = ' + str(size))
    print('Thickness = ' + str(thickness))
    # draws bounding box in red onto blank canvas
    rect = cv.minAreaRect(cnt)
    box = cv.boxPoints(rect)
    box = np.int0(box)
    cv.drawContours(blank, [box], -1, (0, 0, 255), 1)
    cv.imshow('Contours Drawn', blank)

    cv.waitKey(0)
    cv.destroyAllWindows()
Пример #11
0
              'PNG')

#import the image
img = cv2.imread(
    r'C:\Users\Atharva\Desktop\Incture\Output images\BigRockout.png')

#convert the image into grayscale and invert the background and the foreground
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.bitwise_not(gray, mask=None)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
cv2.imshow('thresh', gray)
cv2.waitKey(0)

#Deskew the image by identifying the angle of rotation and correcting it back to straighten the image
coords = np.column_stack(np.where(thresh > 0))
angle = cv2.minAreaRect(coords)[-1]
if angle < -45:
    angle = -(90 + angle)
else:
    angle = -angle
(h, w) = img.shape[:2]
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated = cv2.warpAffine(img,
                         M, (w, h),
                         flags=cv2.INTER_CUBIC,
                         borderMode=cv2.BORDER_REPLICATE)

# draw the correction angle on the image so we can validate it
cv2.putText(rotated, "Angle: {:.2f} degrees".format(angle), (10, 30),
            cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)
Пример #12
0
def binary_captchar(pathname):
    '''
    根据图片位置生成验证码
    '''

    img = cv.imread(pathname, 0)

    ret, dst = cv.threshold(img, 10, 255, cv.THRESH_BINARY)  # 二值化

    for i in range(img.shape[0]):  # 反色
        for j in range(img.shape[1]):
            dst[i, j] = 255-dst[i, j]

    ret, labels = cv.connectedComponents(dst)  # 求连通区域

    unique, counts = np.unique(labels, return_counts=True)
    labels_counts = dict(zip(unique, counts))  # 统计

    ret, bimg = cv.threshold(img, 127, 255, cv.THRESH_BINARY)  # 二值化底图

    for i in range(img.shape[0]):  # 降噪
        for j in range(img.shape[1]):
            if(labels_counts[labels[i, j]] < 10 or labels_counts[labels[i, j]] > 80):
                bimg[i, j] = 0
            else:
                bimg[i, j] = 255

    contours, hierarchy = cv.findContours(
        bimg, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)

    rects = [cv.minAreaRect(cnt) for cnt in contours]  # 每个轮廓取最小框

    boxes = [np.int0(cv.boxPoints(box)) for box in rects]  # 填充
    leftset = set()
    for box in boxes:
        leftset.add(min([i[0] for i in box]))
    left_list = sorted(leftset)  # 把识别出的字母部分从左向右排序

    captcha = ''

    for left in left_list:
        for box in boxes:
            Xs = [i[0] for i in box]
            Ys = [i[1] for i in box]
            x1 = min(Xs)
            if(x1 != left):
                continue
            x2 = max(Xs)
            y1 = min(Ys)
            y2 = max(Ys)
            hight = y2 - y1
            width = x2 - x1
            crop = bimg[y1 - 1:y1 + hight + 2, x1 - 1:x1 + width + 2]  # 切割成的小份

            tem_folder = os.path.join(os.path.abspath('.'), 'split')
            isExists = os.path.exists(tem_folder)
            if not isExists:
                os.makedirs(tem_folder)

            split_name = os.path.join(os.path.abspath(
                '.'), 'split', str(time.time()) + '.png')
            # print(split_name)

            cv.imwrite(split_name, crop)
            split_img = cv.imread(split_name)
            '''
            if not split_img:
                print('can\' load split_img')
                sys.exit()
            '''

            '''
            写入之后再读取才可以
            '''
            captcha += get_captcha(split_img)

            os.remove(split_name)  # 删除临时文件

    return captcha
Пример #13
0
for num in range(NumOfFrame):
    try:
        _, img = vid.read(num)
        gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        _, im = cv2.threshold(gray_img, 220, 1,
                              cv2.THRESH_BINARY + cv2.THRESH_OTSU)

        if sum(sum(gray_img.astype('double'))) > 1000:
            cnts, hier = cv2.findContours(im, cv2.RETR_TREE,
                                          cv2.CHAIN_APPROX_SIMPLE)
            center = None
            for c in cnts:
                x, y, w, h = cv2.boundingRect(c)
                #cv2.rectangle(img, (x,y),(x+w,y+h),(0,255,0),2)

                rect = cv2.minAreaRect(c)
                box = cv2.boxPoints(rect)

                box = np.int0(box)
                cv2.drawContours(img, [box], 0, (0, 0, 255))
            if len(cnts) > 1:
                c = sorted(cnts, key=cv2.contourArea)[-1]  # The largest area
                ((x, y), radius) = cv2.minEnclosingCircle(c)
                M = cv2.moments(c)
                #deal with division by zero
                if M["m00"] != 0:
                    center = (int(M["m10"] / M["m00"]),
                              int(M["m01"] / M["m00"]))
                else:
                    center = (0, 0)
                CX = center[0]
Пример #14
0
    def _image_prune_getprops(self, orig_img, swtlabelled, minCC_comppx,
                              maxCC_comppx, acceptCC_aspectratio):
        """
        Function to find Prune the Connected Component Labelled image. Based on
        the parameters values the Connected Component mask will be pruned and 
        and certain properties will be calculated against each Component,
        as mentioned below :

        # Note
        *CC :Connected Component
        *bbm : Bounding Box Mininum. This is a result to cv2.minAreaRect function,
               which would return the minimum area rectangle bounding box for a 
               CC
        *sw : Stroke Width within that component

            pixels : Number of pixels whithin a particular CC
            bbm_h : Minimum Bounding Box Height
            bbm_w : Minimum Bounding Box Width
            bbm_cx : Minimum Bounding Box Centre x
            bbm_cy : Minimum Bounding Box Cntre y
            bbm_ar : Minimum Bounding Box Aspect Ratio (bbm_w/bbm_h)
            bbm_bbox : Cooridinates of the bbm vertices
            bbm_anchor : Vertice corresponding to least x coord
            bbm_outline : BBM outline, i.e outer contour
            bbm_ang : Angle of orientation for that BBM. Both bbm_ang and 180-bbm_ang
                      can be valid
            img_color_mean : Mean (R,G,B) tuple values of the original image
                             masked by that component
            img_color_median : Median (R,G,B) tuple values of the original image
                               masked by that component
            sw_countdict : Value Counts for the different stroke widths within that
                           CC
            sw_var : Stroke Width variance within each CC
            sw_median : Median stroke Width within each CC
            sw_mean : Mean stroke Width within each CC

        PRUNE PARAMETERS : minCC_comppx, maxCC_comppx, acceptCC_aspectratio
        OTHER PARAMETERS : orig_img, swtlabelled


        parameters
        --------------------------------------
        orig_img : nd.ndarray, required
            Original Image ndarray.

        swtlabelled : nd.ndarray, required
            Connected Components labelled mask after SWT.

        minCC_comppx : int, optional, default : 50
            Pruning Paramter : Minimum number of pixels to reside within each CC.
        
        minCC_comppx : int, optional, default : 10000
            Pruning Paramter : Maximum number of pixels to reside within each CC.
        
        acceptCC_aspectratio : float, default : 5
            Pruning Paramter : Acceptable Inverse of the Aspect Ratio of each CC.

        returns
        --------------------------------------
        nd.ndarray - swtlabelled_pruned

        Returns Pruned SWT Labelled Image
        """
        swtlabelled_pruned = swtlabelled.copy()
        lc_count = print_valcnts(swtlabelled_pruned, _print=False)
        # Pruning based on min and max number of pixels in a connected component
        for label, count in lc_count.items():
            if count < minCC_comppx or count > maxCC_comppx:
                swtlabelled_pruned[swtlabelled_pruned == label] = 0
        lc_count = print_valcnts(swtlabelled_pruned, _print=False)

        # Pruning based on a Aspect Ratio
        for label, pixel_count in lc_count.items():

            lmask = (swtlabelled_pruned == label).astype(np.uint8).copy()

            cntrs = cv2.findContours(lmask, cv2.RETR_EXTERNAL,
                                     cv2.CHAIN_APPROX_SIMPLE)
            cntrs = cntrs[0] if len(cntrs) == 2 else cntrs[1]

            rotrect = cv2.minAreaRect(cntrs[0])
            label_height = np.round(max(rotrect[1]), 2)
            label_width = np.round(min(rotrect[1]), 2)
            label_aspectratio = label_width / label_height

            if not ((1 / acceptCC_aspectratio) < label_aspectratio <
                    acceptCC_aspectratio):
                swtlabelled_pruned[swtlabelled_pruned == label] = 0
            else:
                bbm_cx, bbm_cy = np.round(rotrect[0], 2)
                bbm_bbox = cv2.boxPoints(rotrect)

                anchor_point = bbm_bbox[np.argmax(
                    (bbm_bbox == np.min(bbm_bbox[:, 0])).sum(axis=1))]
                remain_point = np.array(
                    [k for k in bbm_bbox if (k != anchor_point).any()])
                all_lengths = [
                    np.linalg.norm(k - anchor_point) for k in remain_point
                ]
                anchor_armlength_point = remain_point[all_lengths == np.sort(
                    all_lengths)[1]][0]

                bbox_ang = np.arctan(
                    -(anchor_armlength_point[1] - anchor_point[1]) /
                    (anchor_armlength_point[0] - anchor_point[0]))
                bbox_ang = np.rad2deg(bbox_ang)
                if bbox_ang < 0:
                    bbox_ang = 180 + bbox_ang

                self.components_props[label] = COMPONENT_PROPS.copy()
                self.components_props[label]['pixels'] = pixel_count
                self.components_props[label]['bbm_h'] = label_height
                self.components_props[label]['bbm_w'] = label_width
                self.components_props[label]['bbm_cx'] = bbm_cx
                self.components_props[label]['bbm_cy'] = bbm_cy
                self.components_props[label]['bbm_ar'] = label_aspectratio
                self.components_props[label]['bbm_bbox'] = bbm_bbox
                self.components_props[label]['bbm_anchor'] = anchor_point
                self.components_props[label]['bbm_outline'] = cntrs
                self.components_props[label]['bbm_ang'] = bbox_ang

                _iy, _ix = lmask.nonzero()
                mean_rgbcolor = self.orig_img[_iy, _ix].mean(axis=0)
                median_rgbcolor = np.median(self.orig_img[_iy, _ix], axis=0)

                if median_rgbcolor.shape == () and mean_rgbcolor.shape == ():
                    self.components_props[label]['img_color_mean'] = str(
                        [np.floor(mean_rgbcolor)])
                    self.components_props[label]['img_color_median'] = str(
                        [np.floor(median_rgbcolor)])
                else:
                    self.components_props[label]['img_color_mean'] = str(
                        np.floor(mean_rgbcolor).tolist())
                    self.components_props[label]['img_color_median'] = str(
                        np.floor(median_rgbcolor).tolist())

                sw_xyvals = self.swt_mat[_iy, _ix].copy()
                sw_countdict = print_valcnts(sw_xyvals,
                                             _print=False,
                                             remove_0=False)

                self.components_props[label]['sw_countdict'] = str(
                    sw_countdict)
                self.components_props[label]['sw_var'] = np.var(sw_xyvals)
                self.components_props[label]['sw_median'] = np.median(
                    sw_xyvals)
                self.components_props[label]['sw_mean'] = np.mean(sw_xyvals)

        return swtlabelled_pruned
Пример #15
0
    def contourAnalyze(self, contours, pname, **numContours):

        if ('num' in numContours):
            self.numContours = numContours['num']
            #print(self.numContours)

        #takes contours above {minArea} and below {maxArea}
        rcontours = []
        #holds the {# of contours} indices
        indList = []
        #holds the {# of contours} positions
        mxyList = []

        ind1 = -1

        for i in range(len(contours)):
            #------ Basic Declarations ---------------------

            p = contours[i]
            #desired area
            area = cv2.contourArea(contours[i])

            #-------------------------------------------------

            #------- Contour Calculations --------------------

            if area >= self.minArea and area <= self.maxArea:

                #increase the first indice for position saving
                ind1 = ind1 + 1
                indList.append(ind1)

                #make rotating boxes around points
                rect = cv2.minAreaRect(p)
                box = cv2.boxPoints(rect)
                box = np.int0(box)

                #put the box onto the original frame
                cv2.drawContours(self.frame, [box], 0, (0, 255, 0), 2)

                #store the position of the contoured objects
                #ASSUMES THAT NEW CONTOURS AREN'T BEING INTRODUCED
                m = cv2.moments(p)
                mx = int(m['m10'] / m['m00'])
                my = int(m['m01'] / m['m00'])
                mxyList.append([mx, my])
                cv2.circle(self.frame, (mx, my), 5, (0, 0, 255), 2)
                cv2.putText(self.frame, 'HEY', (mx, my),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, 255, 2)
                rcontours.append(p)

        if len(rcontours) != self.numContours:
            #print('\nduck\n')
            return False, rcontours

        elif len(rcontours) == self.numContours:
            #print('\nPEEPEE\n')
            self.save(pname, indList, mxyList, pos)

            #saves newly edited pos to file
            with open(pname, 'w+b') as file:
                pickle.dump(pos, file)

            #returns check and contours that meet area standards
            return True, rcontours