コード例 #1
0
def find_road_area(grey_frame, original_frame, grey_copy):
    lines = cv.HoughLinesP(grey_frame,
                           1,
                           3.14159265359 / 180,
                           35,
                           minLineLength=80,
                           maxLineGap=200)
    # img = np.copy(original_frame)
    # blank_image = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
    line_left = [0, 0, 0, 0, 0, 0]
    line_right = [0, 0, 0, 0, 0, 0]
    roi_vertices = [(0, 0), (0, 0), (0, 0)]
    found_left = False
    found_right = False
    if lines is not None:
        for line in lines:
            for x1, y1, x2, y2 in line:
                m = ((y2 - y1) / (x2 - x1))
                if 0.7 > m > 0.3:
                    found_right = True
                    # cv.line(blank_image, (x1, y1), (x2, y2), (0, 255, 0), thickness=5)
                    b = y1 - (m * x1)
                    # line_right = [x1, y1, ((original_frame.shape[0] * 0.94 - b) / m) - 20,
                    #               original_frame.shape[0] * 0.94, m, b]
                    line_right = [x1, y1, x2 + 10, y2, m, b]
                    if found_right and found_left:
                        break
                if -0.7 < m < -0.3:
                    found_left = True
                    # cv.line(blank_image, (x1, y1), (x2, y2), (0, 255, 0), thickness=5)
                    b = y1 - (m * x1)
                    # line_left = [((original_frame.shape[0] * 0.92 - b) / m) + 20,
                    #              original_frame.shape[0] * 0.92, x2, y2, m, b]
                    line_left = [x1 + 20, y1, x2, y2, m, b]
                    if found_right and found_left:
                        break
    # original_frame = cv.addWeighted(img, 0.8, blank_image, 1, 0.0)
    if found_right and found_left:
        if line_left != line_right:  # if both the same line - pass, else do:
            mx = line_right[4] - line_left[4]
            if mx != 0:  # if no intersections - pass, else do:
                b = line_left[5] - line_right[5]
                if b == 0:
                    cross_x = 0
                else:
                    cross_x = int(b / mx)
                cross_y = int(line_right[4] * cross_x + line_right[5])
                roi_vertices = [(line_left[0], line_left[1]),
                                (cross_x, cross_y),
                                (line_right[2], line_right[3])]
        # roi_vertices = [(line_left[0], line_left[1]), (line_left[2], line_left[3]),
        #                 (line_right[0], line_right[1]), (line_right[2], line_right[3])]
    else:
        cv.putText(original_frame, 'Lane Not Found',
                   (int(original_frame.shape[1] / 2 - 100),
                    int(original_frame.shape[0] / 2)), cv.FONT_HERSHEY_SIMPLEX,
                   1, (0, 255, 0), 2, cv.LINE_AA)
    roi_frame = region_of_interest(grey_copy, np.array([roi_vertices],
                                                       np.int32))
    return roi_frame, original_frame
コード例 #2
0
ファイル: main.py プロジェクト: bahao113/Xe-tu-hanh-OpenCV
def detect_line_segments(cropped_edges):
    rho = 1
    theta = np.pi / 180
    min_threshold = 10
    line_segments = cv.HoughLinesP(cropped_edges, rho, theta, min_threshold,
                                   np.array([]), minLineLength=20, maxLineGap=0)
    return line_segments
def find_text_angle(dilated_img, org_img):
    """
        org_img - original image
        img - dilated img
    """
    lines = cv2.HoughLinesP(dilated_img,
                            rho=1,
                            theta=np.pi / 180,
                            threshold=30,
                            minLineLength=5,
                            maxLineGap=20)

    nb_lines = len(lines)
    angle = 0

    for line in lines:
        x1, y1, x2, y2 = line[0]
        angle += math.atan2((y2 - y1), (x2 - x1))

    angle /= nb_lines

    rotated = rotate_image(org_img, angle - 1)
    rot_dilated = rotate_image(dilated_img, angle - 1)

    return rotated, rot_dilated
コード例 #4
0
def get_dim(img):
    l, r, t, b = 10000, 0, 10000, 0
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    edges = cv2.Canny(gray, 100, 100)
    # cv2.imshow('edges', edges)
    # cv2.waitKey(0)

    lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 10, minLineLength=50)
    for line in lines:
        for x1, y1, x2, y2 in line:
            # print(x1, y1, x2, y2)
            if abs(x1 - x2) < 20:
                x = int((x1 + x2) / 2)
                l, r = min(l, x), max(r, x)
            if abs(y1 - y2) < 20:
                y = int((y1 + y2) / 2)
                t, b = min(t, y), max(b, y)

    # cv2.line(img, (l, t), (l, b), (0, 255, 0), 2)
    # cv2.line(img, (r, t), (r, b), (0, 255, 0), 2)
    # cv2.line(img, (l, t), (r, t), (0, 255, 0), 2)
    # cv2.line(img, (l, b), (r, b), (0, 255, 0), 2)
    # cv2.imshow('img', img)
    # cv2.waitKey(0)

    return 60 * 25, 160, l, r, t, b
コード例 #5
0
 def hough_transfer(image):
     """
     hough变换,在图像中找到所有直线
     :param image:
     :return:
     """
     lines = cv2.HoughLinesP(image, 1, np.pi / 180, 180, 20, 15)
     return lines
コード例 #6
0
def HouLinep(img, canny_img):
    minLineLength = 100
    maxLineGap = 10
    lines = cv.HoughLinesP(canny_img, 1, np.pi / 180, 120, minLineLength,
                           maxLineGap)
    for x1, y1, x2, y2 in lines[0]:
        cv.line(img, (x1, y1), (x2, y2), (0, 255, 0), 2)

    cv.imshow('houp', img)
コード例 #7
0
def hough_lines(image_org, img, rho, theta, threshold, min_line_len, max_line_gap):    #image_org 原始输入图片,作为后续采用颜色用;img是masked_edges ROI的mask
    """
    `img` should be the output of a Canny transform.
    Returns an image with hough lines drawn.
    """
    lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
    line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)

    draw_lines(image_org, line_img, lines)
    return line_img
コード例 #8
0
def upload():
    # file=request.files['temp']
    f = request.files['temp']
    tempname = request.form['tempname']
    temp_path = '../templates/'
    name = f.filename.replace(' ', '_')
    # print(tempname)
    f.save(secure_filename(f.filename))

    inputImage = cv2.imread(name)
    inputImageGray = cv2.cvtColor(inputImage, cv2.COLOR_BGR2GRAY)
    edges = cv2.Canny(inputImageGray, 150, 200, apertureSize=3)

    # print(edges)
    edges = abs(cv2.subtract(255, edges))

    minLineLength = 30
    maxLineGap = 5
    lines = cv2.HoughLinesP(edges, cv2.HOUGH_PROBABILISTIC, np.pi / 180, 30,
                            minLineLength, maxLineGap)
    for x in range(0, len(lines)):
        for x1, y1, x2, y2 in lines[x]:
            pts = np.array([[x1, y1], [x2, y2]], np.int32)
            cv2.polylines(inputImage, [pts], True, (0, 255, 0))

    font = cv2.FONT_HERSHEY_SIMPLEX
    cv2.putText(inputImage, "Tracks Detected", (500, 250), font, 0.5, 255)

    os.remove(name)

    filename = tempname + '.png'

    #Following converts white pixels to transparent
    imagePIL = Image.fromarray(edges)
    imagePIL = imagePIL.convert("RGBA")
    datas = imagePIL.getdata()

    newData = []
    for item in datas:
        if item[0] == 255 and item[1] == 255 and item[2] == 255:
            newData.append((255, 255, 255, 0))
        else:
            if item[0] > 150:
                newData.append((0, 0, 0, 255))
            else:
                newData.append(item)

    imagePIL.putdata(newData)
    imagePIL.save(temp_path + filename, "PNG")

    return send_file(temp_path + filename, mimetype='image/png')
コード例 #9
0
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
    """
    `img` should be the output of a Canny transform.
    """
    lines = cv2.HoughLinesP(img,
                            rho,
                            theta,
                            threshold,
                            np.array([]),
                            minLineLength=min_line_len,
                            maxLineGap=max_line_gap)
    line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
    draw_lines(line_img, lines)
    return line_img
コード例 #10
0
def get_hough_lines(img,
                    rho=1,
                    theta=np.pi / 180,
                    threshold=20,
                    min_line_len=20,
                    max_line_gap=300):
    lines = cv2.HoughLinesP(img,
                            rho,
                            theta,
                            threshold,
                            np.array([]),
                            minLineLength=min_line_len,
                            maxLineGap=max_line_gap)
    return lines
コード例 #11
0
def line_detect_possible_demo(image):  #霍夫直线变换,自动画线
    gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
    edge = cv.Canny(gray, 50, 150, apertureSize=3)
    lines = cv.HoughLinesP(edge,
                           1,
                           np.pi / 180,
                           200,
                           minLineLength=50,
                           maxLineGap=10)  #返回值是线段始末点坐标
    print(lines)
    for line in lines:
        x1, y1, x2, y2 = line[0]
        cv.line(image, (x1, y1), (x2, y2), (0, 0, 255), 2)
    cv.imshow("result", image)
コード例 #12
0
def find_road_area(grey_frame, cut_frame, cut_frame_shape, cut_frame_ratio):
    edges = cv.Canny(grey_frame, 100, 150, apertureSize=3)
    lines = cv.HoughLinesP(edges,
                           1,
                           np.pi / 180,
                           150,
                           minLineLength=100,
                           maxLineGap=450)
    centered_line_left = [
        0, cut_frame_shape[1],
        int(cut_frame_shape[0] / 2),
        int(cut_frame_shape[1] / 3)
    ]
    centered_line_right = [
        cut_frame_shape[0], cut_frame_shape[1],
        int(cut_frame_shape[0] / 2),
        int(cut_frame_shape[1] / 3)
    ]
    most_left_corner = (cut_frame_shape[0] +
                        cut_frame_shape[1]) / (cut_frame_ratio * 2)
    most_right_corner = (cut_frame_shape[0] + cut_frame_shape[1]) - (
        (cut_frame_shape[0] + cut_frame_shape[1]) / (cut_frame_ratio * 2))
    # check if sums of axes is not more than
    # (cut_frame_shape[0] + cut_frame_shape[1]) / (cut_frame_ratio * 2) px far of the corners
    # choose the closest to corners line start points
    # if all lines are far away then use default centered_lines
    if lines is not None:
        for line in lines:
            x1, y1, x2, y2 = line[0]
            if x1 + (cut_frame_shape[1] - y1) < most_left_corner:
                if x2 + y2 < cut_frame_shape[0]:
                    most_left_corner = x1 + (cut_frame_shape[1] - y1)
                    centered_line_left = line[0]
            if x2 + y2 > most_right_corner:
                if x1 > y1 * cut_frame_ratio:
                    most_right_corner = x2 + y2
                    centered_line_right = line[0]
            cv.line(cut_frame, (x1, y1), (x2, y2), (0, 175, 255), 2)
    roi_vertices = [(centered_line_left[0], centered_line_left[1]),
                    (centered_line_left[2], centered_line_left[3]),
                    (centered_line_right[2], centered_line_right[3]),
                    (centered_line_right[0], centered_line_right[1])]
    cv.line(cut_frame, (centered_line_left[0], centered_line_left[1]),
            (centered_line_left[2], centered_line_left[3]), (225, 227, 98), 2)
    cv.line(cut_frame, (centered_line_right[0], centered_line_right[1]),
            (centered_line_right[2], centered_line_right[3]), (227, 98, 102),
            2)
    roi = region_of_interest(grey_frame, np.array([roi_vertices], np.int32))
    return roi
コード例 #13
0
def houghline_transform(img):
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    Gray_scaled_image = Gray_image(hsv)
    filtered_image = region_of_interest(img, Gray_scaled_image)
    lines = cv2.HoughLinesP(Gray_scaled_image, 1, np.pi / 180, 50)

    for line in lines:
        #lines is an array of arrays, in which every array contains a line starting and ending points; x1,y1...etc
        x1, y1, x2, y2 = line[0]
        #accordingly x1,y1... are arrays of the starting and ending points of the lines
        cv2.line(Gray_scaled_image, (x1, y1), (x2, y2), (255, 255, 255),
                 6)  #draws a green line with a thickness equal to 3
        cv2.line(img, (x1, y1), (x2, y2), (255, 0, 0),
                 6)  #draws a green line with a thickness equal to 3

    showImage(img, Gray_scaled_image, filtered_image)

    #showPlottedImage(Gray_scaled_image)
    return lines
コード例 #14
0
def process(image):
    print(image.shape)
    height = image.shape[0]
    width = image.shape[1]
    roi_vertices = [
        (0, height),
        (width/2, height/2),
        (width, height)
    ]

    gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    canny_image = cv2.Canny(gray_image, 100, 120)
    cropped_image = roi(canny_image, np.array([roi_vertices], np.int32))

    lines = cv2.HoughLinesP(cropped_image, rho=6, theta=np.pi/180,
                            threshold=160, lines=np.array([]), minLineLength=40, maxLineGap=25)

    image_with_lines = draw_line(image, lines)
    return image_with_lines
コード例 #15
0
def videocapture():  #procesarea videoclipului
    cap = cv2.VideoCapture("test2.mp4")
    while (cap.isOpened()):
        et, frame = cap.read()
        if not et:
            continue
        canny_image = canny(frame)
        cropped_image = region_of_interest(canny_image)
        lines = cv2.HoughLinesP(cropped_image,
                                2,
                                np.pi / 180,
                                70,
                                np.array([]),
                                minLineLength=40,
                                maxLineGap=5)
        averaged_lines = average_slope_intercept(frame, lines)
        line_image = display_lines(frame, averaged_lines)
        combo_image = cv2.addWeighted(frame, 0.8, line_image, 1, 1)
        cv2.imshow("result", combo_image)
        cv2.waitKey(1)
コード例 #16
0
def test_image(photo):
    image = cv2.cvtColor((photo), cv2.COLOR_BGR2RGB)
    canny_image = do_canny(cv2.cvtColor(image, cv2.COLOR_RGB2GRAY))
    crop_img = region_interest(canny_image, np.array([vertices], np.int32))
    lines = cv2.HoughLinesP(crop_img,
                            3,
                            np.pi / 180,
                            100,
                            np.array([]),
                            minLineLength=70,
                            maxLineGap=50)
    line_image = drawline(image, average_slope_intercept(photo, lines))
    angle = compute_steering_angle(average_slope_intercept(photo, lines))
    image_with_lines = cv2.addWeighted(photo, 0.8, line_image, 1, 1)
    image_with_direction_line = cv2.addWeighted(
        image_with_lines, 0.8, display_heading_line(image, angle), 1, 1)
    cv2.putText(image_with_direction_line, "steering angle: " + str(angle),
                (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 0), 2,
                cv2.LINE_AA)
    return cv2.imshow("combine", image_with_direction_line)
コード例 #17
0
def hough_contours(img):
    """
    Finds contours with Probabilistic Hough Lines
    Lines are drawn on the image

    :param img: Image
    :return: img with lines drawn
    """
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    edges = auto_canny(gray)
    lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 100, np.array([]), 50, 5)
    cv2.imshow("CANNY", edges)
    cv2.waitKey()

    for line in lines:
        for x1, y1, x2, y2 in line:
            cv2.line(img, (x1, y1), (x2, y2), (20, 220, 20), 2)

    cv2.imshow("lines", img)
    cv2.waitKey()
    return img
コード例 #18
0
def hough_lines(img: np.ndarray, min_line_length=100, max_line_gap=10) -> list:
    """
    Detect lines using hough transformation

    Args:
        img: image as numpy array
        min_line_length: minimal length of detected line
        max_line_gap: max gap between lines segments to treat them as single line

    Returns:
        list of lines
    """
    edges = canny(img)
    result = cv2.HoughLinesP(
        edges,
        1,
        np.pi / 180,
        100,
        minLineLength=min_line_length,
        maxLineGap=max_line_gap,
    )
    return result if result is not None else []
コード例 #19
0
def attempt_hough():

    # get the images
    # img_paths = u.get_images_in_dir('images')
    img_paths = [
        'images/prac03ex01img01.png', 'images/prac03ex03img01.png',
        'images/prac03ex03img02.jpg'
    ]

    for path in img_paths:
        # read in the image as color
        image_col = u.read_color(path)

        # make a grayscale copy
        image = u.bgr_to_gray(image_col)

        # apply a canny edge detector
        canny = u.apply_auto_canny(image, 0.33)

        # apply a hough transform to get the lines
        lines = cv2.HoughLinesP(canny,
                                1,
                                np.pi / 180,
                                30,
                                minLineLength=50,
                                maxLineGap=250)

        # draw the hough lines on the image
        hough_image = u.draw_hough_lines(image_col, lines)

        # put everything into a plot
        plot = u.create_mpl_subplot(
            [canny, image, u.bgr_to_rgb(image_col), hough_image], False)

        # show the plot
        # plot.show()

        u.save_mpl_subplot(plot,
                           'line_output/{}.png'.format(path.split('/')[-1]))
コード例 #20
0
def hough(
    img: np.ndarray,
    min_line_length=100,
    max_line_gap=10,
    source: np.ndarray = None,
    draw=True,
) -> np.ndarray:
    edges = canny(img)
    lines = cv2.HoughLinesP(
        edges,
        1,
        np.pi / 180,
        100,
        minLineLength=min_line_length,
        maxLineGap=max_line_gap,
    )
    if lines is not None:
        if draw:
            if source is not None:
                source = draw_lines(source, lines)
            else:
                img = draw_lines(img, lines)
    return img if source is None else source
コード例 #21
0
ファイル: run.py プロジェクト: aniruddha2000/Codebadge
def upload():
    # file=request.files['temp']
    f = request.files['temp']
    tempname = request.form['tempname']
    temp_path = '../templates/'
    name = f.filename.replace(' ', '_')
    print(tempname)
    f.save(secure_filename(f.filename))

    inputImage = cv2.imread(name)
    inputImageGray = cv2.cvtColor(inputImage, cv2.COLOR_BGR2GRAY)

    edges = cv2.Canny(inputImageGray, 150, 200, apertureSize=3)

    print(edges)
    edges = abs(cv2.subtract(255, edges))

    minLineLength = 30
    maxLineGap = 5
    lines = cv2.HoughLinesP(edges, cv2.HOUGH_PROBABILISTIC, np.pi / 180, 30,
                            minLineLength, maxLineGap)
    for x in range(0, len(lines)):
        for x1, y1, x2, y2 in lines[x]:
            pts = np.array([[x1, y1], [x2, y2]], np.int32)
            cv2.polylines(inputImage, [pts], True, (0, 255, 0))

    font = cv2.FONT_HERSHEY_SIMPLEX
    cv2.putText(inputImage, "Tracks Detected", (500, 250), font, 0.5, 255)

    cv2.imwrite(temp_path + tempname + '.jpeg', edges)
    cv2.waitKey(0)

    os.remove(name)

    filename = tempname + '.jpeg'
    return send_file(temp_path + filename, mimetype='image/jpeg')
コード例 #22
0
def process(image):
    # Define region of interest
    height = image.shape[0]
    width = image.shape[1]
    region_of_interest_vertirces = [(0, height), (width / 4, 2 * height / 3),
                                    (3 * width / 4, 2 * height / 3),
                                    (width, height)]

    gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    canny_image = cv2.Canny(gray_image, 100, 200)
    cropped_iamge = region_of_interest(
        canny_image,
        np.array([region_of_interest_vertirces], np.int32),
    )
    lines = cv2.HoughLinesP(cropped_iamge,
                            rho=6,
                            theta=np.pi / 180,
                            threshold=30,
                            lines=np.array([]),
                            minLineLength=40,
                            maxLineGap=50)

    image_with_lines = draw_the_line(image, lines)
    return image_with_lines
コード例 #23
0

# Path of dataset directory
cap = cv.VideoCapture("datas/videos/roadway_01.mp4")
while (cap.isOpened()):
    _, frame = cap.read()
    if frame is None:
        cv.waitKey(0)
        break
    canny_image = canny_edge_detector(frame)
    cropped_image = region_of_interest(canny_image)

    lines = cv.HoughLinesP(cropped_image,
                           2,
                           np.pi / 180,
                           100,
                           np.array([]),
                           minLineLength=40,
                           maxLineGap=5)
    if lines.all():
        # print(lines)
        averaged_lines = average_slope_intercept(frame, lines)
        line_image = display_lines(frame, averaged_lines)
        combo_image = cv.addWeighted(frame, 0.8, line_image, 1, 1)
    cv.imshow("results", combo_image)

    # When the below two will be true and will press the 'q' on
    # our keyboard, we will break out from the loop

    # # wait 0 will wait for infinitely between each frames.
    # 1ms will wait for the specified time only between each frames
コード例 #24
0
    polygons = np.array([[(205, 621), (1254, 311), (751, 721),
                          (1672, 429)]])  # creating the polygon
    mask = np.zeros_like(image)  # the array of mask
    cv2.fillPoly(mask, polygons, 255)  # triangle
    masked_image = cv2.bitwise_and(image, mask)  # masking the image
    return masked_image


frame = cv2.imread('test_image.jpg')
cv2.imshow("Output", frame)
canny_image = canny(frame)
cropped_canny = region_of_interest(canny_image)
lines = cv2.HoughLinesP(cropped_canny,
                        2,
                        np.pi / 180,
                        100,
                        np.array([]),
                        minLineLength=40,
                        maxLineGap=5)
averaged_lines = average_slope_intercept(frame, lines)
line_image = display_lines(frame, averaged_lines)
combo_image = cv2.addWeighted(frame, 0.8, line_image, 1, 1)
plt.imshow(combo_image)
plt.show()

# # video
# cap = cv2.VideoCapture("nyc.mp4")
# while(cap.isOpened()):
#     ret, frame = cap.read()
#     if ret == True:
#         canny_image = canny(frame)
コード例 #25
0
def main():
    # size of camera output
    cam_size = 600

    # label's config
    font_scale = 1.5
    font = cv2.FONT_HERSHEY_PLAIN
    text_background = (0, 0, 0)
    text_offset_x = 10
    text_offset_y = cam_size - 25

    # getting all class names
    with open('class_names.txt', 'r') as f:
        class_names = f.read().splitlines()

    # loading the model
    model = keras.models.load_model('doodle_model.h5')

    # starting cv2 video capture
    cap = cv2.VideoCapture(0)
    while True:
        # getting middle of cropped camera output
        crop_size = int(cam_size / 2)

        _, frame = cap.read()

        # setting white backgound for lines to draw on to
        img = 255 * np.ones(shape=frame.shape, dtype=np.uint8)

        # line detection
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        edges = cv2.Canny(gray, 75, 150)
        lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 15, maxLineGap=10)
        for line in lines:
            x1, y1, x2, y2 = line[0]
            cv2.line(img, (x1, y1), (x2, y2), (0, 0, 0), 5)

        # cropping the image for setted output
        mid_h = int(img.shape[0] / 2)
        mid_w = int(img.shape[1] / 2)
        img = img[mid_h - crop_size:mid_h + crop_size,
                  mid_w - crop_size:mid_w + crop_size]

        # converting and normalizing image to array
        # also expanding dims for further keras prediction
        im = Image.fromarray(img, 'RGB')
        im = im.resize((75, 75))
        img_array = np.array(im) / 255.
        img_array = np.expand_dims(img_array, axis=0)

        # classifying the doodle
        pred = model.predict(img_array) * 100

        # getting class name and score of the best prediction
        index, _ = max(enumerate(pred[0]), key=operator.itemgetter(1))

        # generating output text
        text = '{} {}%'.format(class_names[index], int(pred[0][index]))

        # generating text box
        (text_width, text_height) = cv2.getTextSize(text,
                                                    font,
                                                    fontScale=font_scale,
                                                    thickness=1)[0]
        box_coords = ((text_offset_x, text_offset_y),
                      (text_offset_x + text_width - 2,
                       text_offset_y - text_height - 2))

        # drawing text box, text and showing the lines for better camera adjustment
        cv2.rectangle(img, box_coords[0], box_coords[1], text_background,
                      cv2.FILLED)
        cv2.putText(img,
                    text, (text_offset_x, text_offset_y),
                    font,
                    fontScale=font_scale,
                    color=(255, 255, 255),
                    thickness=1)
        cv2.imshow('AIlias', img)

        key = cv2.waitKey(1)
        if key == 27:
            break

    # ending cv2 cam capture
    cap.release()
    cv2.destroyAllWindows()
コード例 #26
0
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract'

#####################################

img = cv2.imread('images\Passport.png', 0)
(height, width) = img.shape

#####################################

img_copy = img.copy()

img_canny = cv2.Canny(img_copy, 50, 100, apertureSize=3)

img_hough = cv2.HoughLinesP(img_canny,
                            1,
                            math.pi / 180,
                            100,
                            minLineLength=100,
                            maxLineGap=10)

(x, y, w,
 h) = (np.amin(img_hough, axis=0)[0, 0], np.amin(img_hough, axis=0)[0, 1],
       np.amax(img_hough, axis=0)[0, 0] - np.amin(img_hough, axis=0)[0, 0],
       np.amax(img_hough, axis=0)[0, 1] - np.amin(img_hough, axis=0)[0, 1])

img_roi = img_copy[y:y + h, x:x + w]

#####################################

img_roi = cv2.rotate(img_roi, cv2.ROTATE_90_COUNTERCLOCKWISE)

(height, width) = img_roi.shape
コード例 #27
0
# cv.createTrackbar('minLine', 'template', 5, 50, nothing)
# cv.createTrackbar('maxlineGap', 'template', 1, 50, nothing)

while True:
    lower = cv.getTrackbarPos('Canny_thresh1', 'template')
    upper = cv.getTrackbarPos('Canny_thresh2', 'template')
    edges = cv.Canny(img_gray, lower, upper, apertureSize=3)
    # thresh = cv.getTrackbarPos('thresh', 'template')
    # minLineLength = cv.getTrackbarPos('minLine', 'template')
    # maxLineGap = cv.getTrackbarPos('maxlineGap', 'template')
    thresh = 100
    minLineLength = 5
    maxLineGap = 1
    lines = cv.HoughLinesP(
        edges,
        1,
        np.pi / 180,
        thresh,  # 第二第三参数都为精确度
        minLineLength,
        maxLineGap)
    img_ = img.copy()
    for line in lines:  # 此处教材有误,按教材只能画出一条
        x1 = line[0][0]
        y1 = line[0][1]
        x2 = line[0][2]
        y2 = line[0][3]
        cv.line(img_, (x1, y1), (x2, y2), (255, 0, 255), 2)
    cv.imshow('template', img_)
    key = cv.waitKey(10)
    if key == 27:
        break
コード例 #28
0
def get_vp(img_input, config):
    """ 
    Return coordinates of vanishing point calculated as the intersection
    of the two lines with min and max gradients.
    If no suitable vanishing point is found a default will be returned.
    """

    # set default vp TODO: get better estimate
    vp = (config['vp'].getint('default_x'), config['vp'].getint('default_y'))

    # get edges
    img = cv2.cvtColor(img_input, cv2.COLOR_RGB2GRAY)
    kernel_size = config['vp'].getint('kernel_size')
    img = cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
    img = cv2.Canny(img, config['vp'].getint('low_thr'),
                    config['vp'].getint('high_thr'))

    # mask out upper half of image
    height = img.shape[0]
    width = img.shape[1]
    vertices = np.array([[(0, height), (width, height),
                          (width // 2, height // 2)]])
    mask = np.zeros_like(img)
    cv2.fillPoly(mask, vertices, 255)
    img = cv2.bitwise_and(img, mask)

    # Hough transform to find lines
    hough = cv2.HoughLinesP(img,
                            2,
                            np.pi / 180,
                            100,
                            0,
                            minLineLength=200,
                            maxLineGap=50)
    if (hough is not None):
        # convert [x1,y1,x2,y2] to y=mx+c (m,c)
        hough_mc = [get_line_from_coordinates(line[0]) for line in hough]
        # remove lines that are unrealistically horizontal
        hough_mc = [line for line in hough_mc if (abs(line[0]) > 0.2)]

        # we need at least two lines to find vp
        if (len(hough_mc) > 1):
            # convert gradients to radiens
            hough_radc = [(math.atan(line[0]), line[1]) for line in hough_mc]
            # find index of lines with min and max gradients
            min_index = hough_radc.index(min(hough_radc))
            max_index = hough_radc.index(max(hough_radc))
            # find vp as intersection of two lines
            p_vp = calculate_intercept_from_eqs(hough_mc[min_index],
                                                hough_mc[max_index])

            # set vp to intersection if it is reasonable
            y_min = config['vp'].getint('y_min')
            y_max = config['vp'].getint('y_max')
            if p_vp[1] > y_min and p_vp[1] < y_max and p_vp[0] > 0 and p_vp[
                    0] < width:
                vp = p_vp
            else:
                logging.info('Could not find vp.')

    # DEBUG: draw lines
    # x1, y1, x2, y2 = hough[5][0]
    # cv2.line(img_input, (x1, y1), (x2, y2), (0,255,0), 5)
    # x1, y1, x2, y2 = hough[9][0]
    # cv2.line(img_input, (x1, y1), (x2, y2), (0,255,0), 5)
    # for line in hough:
    #     x1, y1, x2, y2 = line[0]
    #     print(line[0])
    #     # Draws lines between two coordinates with green color and 5 thickness
    #     cv2.line(img_input, (x1, y1), (x2, y2), (0,255,0), 5)

    return vp
コード例 #29
0
        veins_only = cv2.bitwise_and(thinned_R2, thinned_R2, mask=erosion_resized)
        cv2.imshow('veins_only', veins_only)
        cv2.waitKey(100)

        cropped_veins = veins_only[((point1[1]//4)-50):((point2[1]//4)+50), ((point1[0]//4)-20):((point2[0]//4)+160)].copy()

        # cv2.rectangle(veins_only, ((point1[0]//4)-20,(point1[1]//4)-50), ((point2[0]//4)+150,(point2[1]//4)+50),(255),2)
        cv2.imshow('cropped_veins', cropped_veins)
        cv2.waitKey(100)

        cv2.imwrite('C:/Users/bryan/Desktop/College/FYP/Python Server/Hough/veins.jpg', cropped_veins)

        cropped_veins_BGR = cv2.merge([cropped_veins,cropped_veins,cropped_veins])

        lines = cv2.HoughLinesP(cropped_veins,rho=1,theta=numpy.pi/180,threshold=9,minLineLength=3, maxLineGap=6)
        # for line in lines:
        #     rho,theta = line[0]
        #     a = numpy.cos(theta)
        #     b = numpy.sin(theta)
        #     x0 = a*rho
        #     y0 = b*rho
        #     x1 = int(x0 + 1000*(-b))
        #     y1 = int(y0 + 1000*(a))
        #     x2 = int(x0 - 1000*(-b))
        #     y2 = int(y0 - 1000*(a))

        #     cv2.line(cropped_veins_BGR,(x1,y1),(x2,y2),(0,0,255),1)
        lines_polar = []
        for line in lines:
            x1,y1,x2,y2 = line[0]
コード例 #30
0
from cv2 import cv2
import numpy as np

vid = cv2.VideoCapture("./sources/line.mp4")

while 1:
    ret, frame = vid.read()
    frame = cv2.resize(frame, (640, 480))
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    lower_yellow = np.array([18, 94, 140], np.uint8)
    upper_yellow = np.array([48, 255, 255], np.uint8)

    mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
    edges = cv2.Canny(mask, 75, 250)

    lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 50, maxLineGap=50)
    #maxlinegap değeri belirliyoruz ki aralardaki boşluklar kapansın
    #çizgi bir bütün şeklinde olsun

    for i in lines:
        x1, y1, x2, y2 = i[0]
        cv2.line(frame, (x1, y1), (x2, y2), (255, 0, 0), 3)

    cv2.imshow("frame", frame)
    if cv2.waitKey(20) & 0xFF == ord('q'):
        break

vid.release()
cv2.destroyAllWindows()