Пример #1
0
import cv2
import numpy as np

img = cv2.imread(r'/home/siddharth/Desktop/lanes.jpg', 1)
img1 = cv2.GaussianBlur(img, (5, 5), 0)
hsv = cv2.cvtColor(img1, cv2.COLOR_BGR2HSV)
low_yellow = np.array([18, 94, 140])
up_yellow = np.array([48, 255, 255])
mask1 = cv2.inRange(hsv, low_yellow, up_yellow)
edges1 = cv2.Canny(mask1, 75, 150)
lines1 = cv2.HoughLinesP(edges1, 1, np.pi / 180, 50, maxLineGap=50)

low_white = np.array([81, 0, 177])
up_white = np.array([128, 22, 255])
mask2 = cv2.inRange(hsv, low_white, up_white)
edges2 = cv2.Canny(mask2, 75, 150)
lines2 = cv2.HoughLinesP(edges2, 1, np.pi / 180, 50, maxLineGap=200)

if lines1 is not None:
    for line1 in lines1:
        x1, y1, x2, y2 = line1[0]
        cv2.line(img, (x1, y1), (x2, y2), (0, 255, 0), 5)

if lines2 is not None:
    for line2 in lines2:
        x1, y1, x2, y2 = line2[0]
        cv2.line(img, (x1, y1), (x2, y2), (0, 255, 0), 5)

cv2.imshow("img1", img)
cv2.waitKey(0)
Пример #2
0
        mask = cv2.bitwise_and(sabi_mask, light_mask)
        cv2.imwrite("../PIPEmask/pipemask" + fn, mask)
        cv2.imwrite("../LIGHTmask/lightmask" + fn, light_mask)

        #直線探し
        blur1 = cv2.bilateralFilter(resizeim, 9, 35, 35)
        blur2 = cv2.bilateralFilter(blur1, 9, 35, 35)
        blur3 = cv2.bilateralFilter(blur2, 9, 35, 35)
        blur4 = cv2.bilateralFilter(blur3, 9, 35, 35)
        blur5 = cv2.bilateralFilter(blur4, 9, 35, 35)
        blur6 = cv2.bilateralFilter(blur5, 9, 35, 35)
        blur = cv2.bilateralFilter(blur6, 9, 35, 35)
        edges = cv2.Canny(blur, 200, 500)
        lines = cv2.HoughLinesP(edges,
                                rho=1,
                                theta=np.pi / 360,
                                threshold=30,
                                minLineLength=150,
                                maxLineGap=12)

        #arr=np.array([0])
        if lines is None:
            red_lines_img = resizeim
            arr = np.array([0])
            np.savetxt("../wideM/" + fn.replace(".jpg", "lineM.csv"),
                       arr.T,
                       delimiter=",")

        else:
            for line in lines:
                x1, y1, x2, y2 = line[0]
Пример #3
0
def btncmd():  #btncmd 함수설정
    if line_var.get() == 1 and gaussian_var.get() == 1 and haugh_var.get(
    ) == 1:  #Sobel필터, Gaussian on, Haugh on
        #그레이스케일 이미지 불러오기
        src = cv2.imread('solidWhiteCurve.jpg', cv2.IMREAD_GRAYSCALE)

        if src is None:
            print('image load failed!!')
            sys.exit()

        #가우시안블러 처리
        blur_img = cv2.GaussianBlur(src, (0, 0), 1)

        #Sobel Edge Detection
        Sobel_dx = cv2.Sobel(blur_img, cv2.CV_32F, 1, 0)  #x방향 미분
        Sobel_dy = cv2.Sobel(blur_img, cv2.CV_32F, 0, 1)  #y방향 미분

        mag = cv2.magnitude(Sobel_dx, Sobel_dy)

        #허프변환
        lines = cv2.HoughLinesP(mag,
                                1,
                                np.pi / 180.,
                                50,
                                minLineLength=30,
                                maxLineGap=4)
        dst = cv2.cvtColor(mag, cv2.COLOR_GRAY2BGR)

        #직선을 빨간색으로 표시
        if lines is not None:
            for i in range(lines.shape[0]):
                pt1 = (lines[i][0][0], lines[i][0][1])
                pt2 = (lines[i][0][2], lines[i][0][3])
                cv2.line(dst, pt1, pt2, (0, 0, 255), 2, cv2.LINE_AA)

        #출력
        cv2.imshow('src', src)
        cv2.imshow('dst', dst)

        cv2.waitKey()

        cv2.destroyAllwindows()
    elif line_var.get() == 1 and gaussian_var.get() == 1 and haugh_var.get(
    ) == 2:  #Sobel필터, Gaussian on, Haugh off
        img = cv2.imread('solidWhiteCurve.jpg', cv2.IMREAD_GRAYSCALE)
        height, width = img.shape[:2]

        blur_img = cv2.GaussianBlur(img, (0, 0), 1)
        Sobel_dx = cv2.Sobel(blur_img, cv2.CV_32F, 1, 0)  #x방향 미분
        Sobel_dy = cv2.Sobel(blur_img, cv2.CV_32F, 0, 1)  #y방향 미분

        mag = cv2.magnitude(Sobel_dx, Sobel_dy)
        mag = np.clip(mag, 0, 255).astype(np.uint8)
        cv2.imshow('img', img)
        cv2.imshow('mag', mag)
        cv2.waitKey()

        cv2.destroyAllWindows()
    elif line_var.get() == 1 and gaussian_var.get() == 2 and haugh_var.get(
    ) == 1:  #Sobel필터, Gaussian off, Haugh on
        #그레이스케일 이미지 불러오기
        src = cv2.imread('solidWhiteCurve.jpg')

        if src is None:
            print('image load failed!!')
            sys.exit()

        #Sobel Edge Detection
        Sobel_dx = cv2.Sobel(src, cv2.CV_32F, 1, 0)  #x방향 미분
        Sobel_dy = cv2.Sobel(src, cv2.CV_32F, 0, 1)  #y방향 미분

        mag = cv2.magnitude(Sobel_dx, Sobel_dy)

        #허프변환
        lines = cv2.HoughLinesP(mag,
                                1,
                                np.pi / 180.,
                                50,
                                minLineLength=30,
                                maxLineGap=4)
        dst = cv2.cvtColor(mag, cv2.COLOR_GRAY2BGR)

        #직선을 빨간색으로 표시
        if lines is not None:
            for i in range(lines.shape[0]):
                pt1 = (lines[i][0][0], lines[i][0][1])
                pt2 = (lines[i][0][2], lines[i][0][3])
                cv2.line(dst, pt1, pt2, (0, 0, 255), 2, cv2.LINE_AA)

        #출력
        cv2.imshow('src', src)
        cv2.imshow('dst', dst)

        cv2.waitKey()

        cv2.destroyAllwindows()
    elif line_var.get() == 1 and gaussian_var.get() == 2 and haugh_var.get(
    ) == 2:  #Sobel필터, Gaussian off, Haugh off
        img = cv2.imread('solidWhiteCurve.jpg', cv2.IMREAD_GRAYSCALE)
        height, width = img.shape[:2]

        Sobel_dx = cv2.Sobel(img, cv2.CV_32F, 1, 0)  #x방향 미분
        Sobel_dy = cv2.Sobel(img, cv2.CV_32F, 0, 1)  #y방향 미분

        mag = cv2.magnitude(Sobel_dx, Sobel_dy)
        mag = np.clip(mag, 0, 255).astype(np.uint8)
        cv2.imshow('img', img)
        cv2.imshow('mag', mag)
        cv2.waitKey()

        cv2.destroyAllWindows()
    elif line_var.get() == 2 and gaussian_var.get() == 1 and haugh_var.get(
    ) == 1:  #Laplacian필터, Gaussian on, Haugh on
        src = cv2.imread('solidWhiteCurve.jpg', cv2.IMREAD_GRAYSCALE)

        if src is None:
            print('Image load failed!')
            sys.exit()

        #가우시안블러 처리
        blur_img = cv2.GaussianBlur(src, (0, 0), 1)

        #edge
        edges = cv2.Laplacian(blur_img, cv2.CV_8U, ksize=5)

        lines = cv2.HoughLinesP(edges,
                                1,
                                np.pi / 180.,
                                160,
                                minLineLength=50,
                                maxLineGap=1)

        dst = cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR)

        if lines is not None:
            for i in range(lines.shape[0]):
                pt1 = (lines[i][0][0], lines[i][0][1])  # 시작점 좌표
                pt2 = (lines[i][0][2], lines[i][0][3])  # 끝점 좌표
                cv2.line(dst, pt1, pt2, (0, 0, 255), 2, cv2.LINE_AA)

        cv2.imshow('src', src)
        cv2.imshow('dst', dst)
        cv2.waitKey()
        cv2.destroyAllWindows()
    elif line_var.get() == 2 and gaussian_var.get() == 1 and haugh_var.get(
    ) == 2:  #Laplacian필터, Gaussian on, Haugh off
        img = cv2.imread('solidWhiteCurve.jpg', cv2.IMREAD_GRAYSCALE)
        height, width = img.shape[:2]

        blur_img = cv2.GaussianBlur(img, (0, 0), 1)
        Laplacian_img = cv2.Laplacian(blur_img, cv2.CV_8U, ksize=5)

        cv2.imshow('img', img)
        cv2.imshow('Laplacian_img', Laplacian_img)
        cv2.waitKey()

        cv2.destroyAllWindows()
    elif line_var.get() == 2 and gaussian_var.get() == 2 and haugh_var.get(
    ) == 1:  #Laplacian필터, Gaussian off, Haugh on
        src = cv2.imread('solidWhiteCurve.jpg', cv2.IMREAD_GRAYSCALE)

        if src is None:
            print('Image load failed!')
            sys.exit()

        edges = cv2.Laplacian(src, cv2.CV_8U, ksize=5)

        lines = cv2.HoughLinesP(edges,
                                1,
                                np.pi / 180.,
                                160,
                                minLineLength=50,
                                maxLineGap=1)

        dst = cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR)

        if lines is not None:
            for i in range(lines.shape[0]):
                pt1 = (lines[i][0][0], lines[i][0][1])  # 시작점 좌표
                pt2 = (lines[i][0][2], lines[i][0][3])  # 끝점 좌표
                cv2.line(dst, pt1, pt2, (0, 0, 255), 2, cv2.LINE_AA)

        cv2.imshow('src', src)
        cv2.imshow('dst', dst)
        cv2.waitKey()
        cv2.destroyAllWindows()
    elif line_var.get() == 2 and gaussian_var.get() == 2 and haugh_var.get(
    ) == 2:  #Laplacian필터, Gaussian off, Haugh off
        img = cv2.imread('solidWhiteCurve.jpg', cv2.IMREAD_GRAYSCALE)
        height, width = img.shape[:2]

        Laplacian_img = cv2.Laplacian(img, cv2.CV_8U, ksize=5)

        cv2.imshow('img', img)
        cv2.imshow('Laplacian_img', Laplacian_img)
        cv2.waitKey()

        cv2.destroyAllWindows()
    elif line_var.get() == 3 and gaussian_var.get() == 1 and haugh_var.get(
    ) == 1:  #Canny필터, Gaussian on, Haugh on
        #그레이스케일 이미지 불러오기
        src = cv2.imread('solidWhiteCurve.jpg')

        if src is None:
            print('image load failed!!')
            sys.exit()

        #가우시안블러 처리
        blur_img = cv2.GaussianBlur(src, (0, 0), 1)

        #Canny Edge Detection
        canny_img = cv2.Canny(
            blur_img,
            50,
            150,
        )

        #허프변환
        lines = cv2.HoughLinesP(canny_img,
                                1,
                                np.pi / 180.,
                                50,
                                minLineLength=30,
                                maxLineGap=4)
        dst = cv2.cvtColor(canny_img, cv2.COLOR_GRAY2BGR)

        #직선을 빨간색으로 표시
        if lines is not None:
            for i in range(lines.shape[0]):
                pt1 = (lines[i][0][0], lines[i][0][1])
                pt2 = (lines[i][0][2], lines[i][0][3])
                cv2.line(dst, pt1, pt2, (0, 0, 255), 2, cv2.LINE_AA)

        #출력
        cv2.imshow('src', src)
        cv2.imshow('dst', dst)

        cv2.waitKey()

        cv2.destroyAllwindows()
    elif line_var.get() == 3 and gaussian_var.get() == 1 and haugh_var.get(
    ) == 2:  #Canny필터, Gaussian on, Haugh off
        src = cv2.imread('solidWhiteCurve.jpg')

        blur_img = cv2.GaussianBlur(src, (0, 0), 2)

        canny_img = cv2.Canny(blur_img, 30, 90)

        cv2.imshow('solidWhiteCurve.jpg', src)
        cv2.imshow('canny_img', canny_img)
        cv2.waitKey()

        cv2.destroyAllWindows()
    elif line_var.get() == 3 and gaussian_var.get() == 2 and haugh_var.get(
    ) == 1:  #Canny필터, Gaussian off, Haugh on
        #그레이스케일 이미지 불러오기
        src = cv2.imread('solidWhiteCurve.jpg')

        if src is None:
            print('image load failed!!')
            sys.exit()

        #Canny Edge Detection
        canny_img = cv2.Canny(
            src,
            50,
            150,
        )

        #허프변환
        lines = cv2.HoughLinesP(canny_img,
                                1,
                                np.pi / 180.,
                                50,
                                minLineLength=30,
                                maxLineGap=4)
        dst = cv2.cvtColor(canny_img, cv2.COLOR_GRAY2BGR)

        #직선을 빨간색으로 표시
        if lines is not None:
            for i in range(lines.shape[0]):
                pt1 = (lines[i][0][0], lines[i][0][1])
                pt2 = (lines[i][0][2], lines[i][0][3])
                cv2.line(dst, pt1, pt2, (0, 0, 255), 2, cv2.LINE_AA)

        #출력
        cv2.imshow('src', src)
        cv2.imshow('dst', dst)

        cv2.waitKey()

        cv2.destroyAllwindows()
    elif line_var.get() == 3 and gaussian_var.get() == 2 and haugh_var.get(
    ) == 2:  #Canny필터, Gaussian off, Haugh off
        src = cv2.imread('solidWhiteCurve.jpg')

        canny_img = cv2.Canny(src, 30, 90)

        cv2.imshow('solidWhiteCurve.jpg', src)
        cv2.imshow('canny_img', canny_img)
        cv2.waitKey()

        cv2.destroyAllWindows()
    else:  #예외
        print("img load failed")
Пример #4
0
# Define our parameters for Canny and apply
low_threshold = 50
high_threshold = 150
masked_edges = cv2.Canny(blur_gray, low_threshold, high_threshold)

# Define the Hough transform parameters
# Make a blank the same size as our image to draw on
rho = 1
theta = np.pi/180
threshold = 1
min_line_length = 10
max_line_gap = 1
line_image = np.copy(image)*0 #creating a blank to draw lines on

# Run Hough on edge detected image
lines = cv2.HoughLinesP(masked_edges, rho, theta, threshold, np.array([]),
                            min_line_length, max_line_gap)

# Iterate over the output "lines" and draw lines on the blank
for line in lines:
    for x1,y1,x2,y2 in line:
        cv2.line(line_image,(x1,y1),(x2,y2),(255,0,0),10)

# Create a "color" binary image to combine with line image
color_edges = np.dstack((masked_edges, masked_edges, masked_edges)) 

# Draw the lines on the edge image
combo = cv2.addWeighted(color_edges, 0.8, line_image, 1, 0) 
plt.imshow(combo)
plt.show()
Пример #5
0
mask = np.zeros_like(canny)

# mask image in 2 halfs to extract the street only
mask_points_left = np.array([[0, 190], [205, 93], [266, 103], [209, 294],
                             [0, 294]])
mask_points_right = np.array([[285, 104], [339, 97], [513, 187], [513, 294],
                              [325, 294]])
cv2.fillPoly(mask, [mask_points_left], 255)
half_masked_image = cv2.bitwise_and(canny, mask)
cv2.fillPoly(mask, [mask_points_right], 255)
masked_image = cv2.bitwise_and(canny, mask)

minLineLength = 3
lines = cv2.HoughLinesP(masked_image,
                        1,
                        np.pi / 180,
                        20,
                        minLineLength,
                        maxLineGap=2)
for line in lines:
    x1, y1, x2, y2 = line[0]
    cv2.line(image, (x1, y1), (x2, y2), (0, 255, 0), 1)

plt.imshow(sobelx, cmap='gray')
plt.imshow(canny, cmap='gray')
cv2.imshow('original with hough lines', image)
plt.show()

# wait for any key to be pressed and then close all windows
cv2.waitKey(0)
cv2.destroyAllWindows()
Пример #6
0
###CANNY边缘提取
##CannyFilterIm=LimitIm.filter(ImageFilter.FIND_EDGES)

#Compute the Canny filter for two values of sigma
##edges1 = feature.canny(Im)
##edges2 = feature.canny(Im, sigma=1)
##CannyFilterIm=feature.canny(Im,sigma=3)
edges = cv2.Canny(Im, 15, 200, apertureSize = 3)

#HOUGH变换得到直线
minLineLength = 100
maxLineGap = 10
lines = cv2.HoughLines(edges,1,np.pi/180,118)
result = Im.copy()
lines = cv2.HoughLinesP(edges,1,np.pi/180,80,minLineLength,maxLineGap)
for x1,y1,x2,y2 in lines[0]:
	cv2.line(Im,(x1,y1),(x2,y2),(0,255,0),2)

#显示结果
#im.show()
##LimitIm.show()
#BrightIm.show()
#CannyFilterIm.show()

# display results
##fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(8, 3), sharex=True, sharey=True)
##
##ax1.imshow(Im, cmap=plt.cm.jet)
##ax1.axis('off')
##ax1.set_title('noisy image', fontsize=20)
Пример #7
0
 def hough_lines(self, img, rho, theta, threshold, min_line_len, max_line_gap):
     lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array(
         []), minLineLength=min_line_len, maxLineGap=max_line_gap)
     line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
     self.draw_lines(line_img, lines)
     return line_img
Пример #8
0
image_blur = cv2.GaussianBlur(gray, (5, 5), 0)

a = 0.66 * np.mean(image_blur)
b = 2 * a
image_edges = cv2.Canny(image_blur, a, b)
plt.imshow(image_edges), plt.title(
    'Edges detected by Canny detector'), plt.show()
ymax, xmax, _ = original_image.shape
img = np.zeros([ymax, xmax, 3], dtype=np.uint8)
img.fill(255)
img1 = np.zeros([ymax, xmax, 3], dtype=np.uint8)
img1.fill(255)
#apply probabilistic hough line transform
lines = cv2.HoughLinesP(image_edges,
                        0.5,
                        np.pi / 360,
                        50,
                        maxLineGap=100,
                        minLineLength=10)
#print (lines)
if lines is not None:
    for element in lines:
        line = element[0]
        cv2.line(img, (line[0], line[1]), (line[2], line[3]), (155, 55, 105),
                 2)
plt.imshow(img), plt.title(
    'All line segments detected by Probabilistic Hough Line Transform'
), plt.show()
#apply shi-tomasi corner detector with sub pixel accuracy
corners = cv2.goodFeaturesToTrack(gray, 80, 0.01, 10)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)
corners2 = cv2.cornerSubPix(gray, corners, (5, 5), (-1, -1), criteria)
Пример #9
0
k = cv2.waitKey(0)
cv2.destroyAllWindows()

# In[1]:

import cv2
import numpy as np

img = cv2.imread('sudoku.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
siyah = cv2.Canny(gray, 50, 150, apertureSize=3)

cv2.imshow('siyah', siyah)
lines = cv2.HoughLinesP(siyah,
                        1,
                        np.pi / 180,
                        100,
                        minLineLength=100,
                        maxLineGap=10)

for line in lines:
    x1, y1, x2, y2 = line[0]
    cv2.line(img, (x1, y1), (x2, y2), (0, 255, 0), 2)

cv2.imshow('image', img)
k = cv2.waitKey(0)
cv2.destroyAllWindows()

# In[24]:

import matplotlib.pylab as plt
import cv2
Пример #10
0
def deskew(im, max_skew=10):
    height, width = im.shape[:2]

    # Create a grayscale image and denoise it
    im_gs = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
    im_gs = cv2.fastNlMeansDenoising(im_gs, h=3)

    # Create an inverted B&W copy using Otsu (automatic) thresholding
    im_bw = cv2.threshold(im_gs, 0, 255,
                          cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]

    # Detect lines in this image. Parameters here mostly arrived at by trial and error.
    lines = cv2.HoughLinesP(im_bw,
                            1,
                            np.pi / 180,
                            200,
                            minLineLength=width / 12,
                            maxLineGap=width / 150)

    # Collect the angles of these lines (in radians)
    angles = []
    for line in lines:
        x1, y1, x2, y2 = line[0]
        angles.append(np.arctan2(y2 - y1, x2 - x1))

    # If the majority of our lines are vertical, this is probably a landscape image
    landscape = np.sum([abs(angle) > np.pi / 4
                        for angle in angles]) > len(angles) / 2

    # Filter the angles to remove outliers based on max_skew
    if landscape:
        angles = [
            angle for angle in angles
            if np.deg2rad(90 - max_skew) < abs(angle) < np.deg2rad(90 +
                                                                   max_skew)
        ]
    else:
        angles = [
            angle for angle in angles if abs(angle) < np.deg2rad(max_skew)
        ]

    if len(angles) < 5:
        # Insufficient data to deskew
        return im

    # Average the angles to a degree offset
    angle_deg = np.rad2deg(np.median(angles))
    print(angle_deg)
    # If this is landscape image, rotate the entire canvas appropriately
    if landscape:
        if angle_deg < 0:
            im = cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE)
            angle_deg += 90
        elif angle_deg > 0:
            im = cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE)
            angle_deg -= 90

    # Rotate the image by the residual offset
    M = cv2.getRotationMatrix2D((width / 2, height / 2), angle_deg, 1)
    im = cv2.warpAffine(im,
                        M, (width, height),
                        borderMode=cv2.BORDER_REPLICATE)
    return im
    frame = cv2.resize(frame, (450, 260))  # 말끔한 재생을 위한 비디오 크기 리사이징

    canny_frame = cv.Canny(frame, 50, 200, None,
                           3)  # canny 알고리즘을 통한 외곽선 검출 프레임

    cdstP_frame = cv.cvtColor(canny_frame,
                              cv.COLOR_GRAY2BGR)  # 외곽선에 lane을 추출할 프레임

    line_frame = np.copy(frame)  # 외곽선을 적용한 색상이 있는 프레임

    flip_frame = cv2.flip(frame, 1)  # 좌우 반전을 수행시킨 프레임

    # ======================= [ lane object ] =======================

    linesP = cv.HoughLinesP(canny_frame, 1, np.pi / 180, 50, None, 50, 10)
    if linesP is not None:
        for i in range(0, len(linesP)):
            l = linesP[i][0]
            cv.line(cdstP_frame, (l[0], l[1]), (l[2], l[3]), (20, 255, 85), 1,
                    cv.LINE_AA)
            cv.line(line_frame, (l[0], l[1]), (l[2], l[3]), (20, 255, 85), 1,
                    cv.LINE_AA)
            cv.line(flip_frame, (l[0], l[1]), (l[2], l[3]), (20, 255, 85), 1,
                    cv.LINE_AA)

    cv.imshow("source frame", frame)
    cv.imshow("only line", cdstP_frame)
    cv.imshow("line frame", line_frame)

    # HSV 필터를 적용한 프레임
Пример #12
0
def pipeline(image):
    image = yellow_dectection(image)
    height = image.shape[0]
    width = image.shape[1]

    gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    cannyed_image = cv2.Canny(gray_image, 100, 200)

    lines = cv2.HoughLinesP(cannyed_image,
                            rho=6,
                            theta=np.pi / 60,
                            threshold=200,
                            lines=np.array([]),
                            minLineLength=50,
                            maxLineGap=50)

    if lines is None:
        img_ros = bridge.cv2_to_imgmsg(image, "rgb8")
        return img_ros, 0

    left_line_x = []
    left_line_y = []
    right_line_x = []
    right_line_y = []

    for line in lines:
        for x1, y1, x2, y2 in line:
            slope = float(y2 - y1) / float(x2 - x1)
            if slope <= 0:
                left_line_x.extend([x1, x2])
                left_line_y.extend([y1, y2])
            else:
                right_line_x.extend([x1, x2])
                right_line_y.extend([y1, y2])

    min_y = 0
    max_y = int(image.shape[0] * 3 / 5)

    if not left_line_x == []:
        poly_left = np.poly1d(np.polyfit(left_line_y, left_line_x, deg=1))

        left_x_start = int(poly_left(max_y))
        left_x_end = int(poly_left(min_y))
    if not right_line_x == []:
        poly_right = np.poly1d(np.polyfit(right_line_y, right_line_x, deg=1))
        right_x_start = int(poly_right(max_y))
        right_x_end = int(poly_right(min_y))
    if not left_line_x == [] and not right_line_x == []:
        mid_x_error = (left_x_end + right_x_end) / 2.0 - width / 2
        line_image = draw_lines(
            image,
            [[
                [left_x_start, max_y, left_x_end, min_y],
                [right_x_start, max_y, right_x_end, min_y],
            ]],
            thickness=5,
        )
        img_ros = bridge.cv2_to_imgmsg(line_image, "rgb8")
    else:
        mid_x_error = 1000
        img_ros = bridge.cv2_to_imgmsg(image, "rgb8")

    return img_ros, mid_x_error
Пример #13
0
# cv2.line(im,(cols-1,righty),(0,lefty),(0,255,255),2)

# cv2.imshow('result', im)

# cv2.waitKey(0)
# cv2.destroyAllWindows()

# sample 2
# image = cv2.imread('address-tds.jpg')
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# edges = cv2.Canny(gray, 100, 250)
# lines = cv2.HoughLinesP(edges, 1, np.pi/180, 25, minLineLength=100, maxLineGap=50)

# hough = np.zeros(image.shape, np.uint8)

# for line in lines:
#     x1, y1, x2, y2 = line[0]
#     cv2.line(hough, (x1, y1), (x2, y2), (255, 255, 255), 2)

# cv2.imwrite('hough.jpg', hough)

gray = cv2.imread('address-tds.jpg')
edges = cv2.Canny(gray,50,150,apertureSize = 3)
cv2.imwrite('edges-50-150.jpg',edges)
minLineLength=1
lines = cv2.HoughLinesP(image=edges,rho=1,theta=np.pi/180, threshold=600,lines=np.array([]), minLineLength=minLineLength,maxLineGap=40)

a,b,c = lines.shape
for i in range(a):
    cv2.line(gray, (lines[i][0][0], lines[i][0][1]), (lines[i][0][2], lines[i][0][3]), (0, 0, 255), 3, cv2.LINE_AA)
    cv2.imwrite('hough.jpg',gray)
Пример #14
0
def extractPuzzle(gray):
    '''
    Main Image processing function to achieve grayscale puzzle without gridlines 
    '''

    if gray.ndim >= 3: gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)

    print("Begin ExtractPuzzle")

    # Begin Preprocess
    close = preprocess(gray, 19)
    '''Isolate the puzzle'''
    mask = np.zeros_like(close)

    _, contour, hier = cv2.findContours(close, cv2.RETR_TREE,
                                        cv2.CHAIN_APPROX_SIMPLE)

    # Largest contour
    best_cnt = sorted(contour, key=cv2.contourArea, reverse=True)[0]
    print('Largest Contour FOUND')

    cv2.drawContours(mask, [best_cnt], 0, 255, -1)
    cv2.drawContours(mask, [best_cnt], 0, 0, 2)

    # Isolate the puzzle to a black background
    res = cv2.bitwise_and(gray, mask)

    # Enlarge Puzzle to fit entire image
    warped = four_point_transform(res, best_cnt)
    print('Unwarping DONE!')

    # Find the sweet spot for thresholding
    new = repetitiveThreshold(warped, 15)
    print('Thresholding DONE!')
    view(new, (400, 400))
    # Dilate to enlarge the grids (Easier detection and removal)
    kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
    dilated = cv2.dilate(new, kernel)

    # Find all vertical and horizontal lines
    dst = cv2.Canny(dilated, 50, 150, None, 3)
    mask = np.zeros_like(dst)
    lines = cv2.HoughLinesP(dst,
                            1,
                            np.pi / 180,
                            100,
                            minLineLength=100,
                            maxLineGap=80)
    a, b, c = lines.shape
    for i in range(a):
        cv2.line(mask, (lines[i][0][0], lines[i][0][1]),
                 (lines[i][0][2], lines[i][0][3]), 255, 3, cv2.LINE_AA)

    overlapped = cv2.bitwise_and(mask, dilated)

    # Perform floodfill on to cover where those lines are
    coords = np.argwhere(overlapped != 0)
    repetitiveFloodfill(dilated, overlapped, coords, 0)
    print('Floodfill DONE!')

    # Slice image in case borders are not properly removed
    dilated = dilated[5:-5, 5:-5]
    resized = cv2.resize(dilated, (1800, 1800))

    print('Complete Puzzle Extraction')
    return resized
Пример #15
0
def detect_lane(rgb_array=None, filename=None, show='rgb'):
    # reading in an image
    if rgb_array is None and filename is None:
        return

    if filename:
        image = np.load(filename)
    else:
        image = rgb_array

    # print(is_pit(rgb_array, 100, 200, 699, 399))

    # plt.figure()
    # plt.imshow(image)
    # plt.show()
    # exit()

    # image = image.transpose(1, 0, 2)

    # printing out some stats and plotting the image

    # print('This image is:', type(image), 'with dimensions:', image.shape)

    height = image.shape[0]
    width = image.shape[1]

    region_of_interest_vertice_sets = [[[0, 599], [0, 400], [400, 100],
                                        [799, 400], [799, 599]],
                                       [[0, 599], [0, 300], [400, 200],
                                        [799, 300], [799, 599]],
                                       [[0, height - 1],
                                        [0, int(2 * height / 3)],
                                        [int(2 * height / 3), 100],
                                        [width - 1,
                                         int(2 * height / 3)],
                                        [width - 1, height - 1]]]

    # Convert to grayscale here.
    gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)

    # Call Canny Edge Detection here.
    cannyed_image = cv2.Canny(gray_image, 100, 200)

    cropped_image = region_of_interest(
        cannyed_image, np.array([region_of_interest_vertice_sets[0]],
                                np.int32))

    lines_raw = cv2.HoughLinesP(cropped_image,
                                rho=6,
                                theta=np.pi / 200,
                                threshold=150,
                                lines=np.array([]),
                                minLineLength=10,
                                maxLineGap=25)

    # print('lines_raw: ', lines_raw)
    if lines_raw is None:
        return [], None, None

    lines = []
    pos_slope = []
    neg_slope = []
    for line in lines_raw:
        skip = False
        for x0, y0, x1, y1 in line:
            if (x0 == x1) or (y0 == y1) or (y0 < 195
                                            and y1 < 195) or is_line_on_road(
                                                image, x0, y0, x1, y1):
                skip = True
            else:
                # print((x0, y0, x1, y1), image[y0][x0], image[y1][x1])

                slope = (y0 - y1) / (x1 - x0)
                skip = abs(slope) < 0.05

        if not skip:
            lines.append(line)
            if slope > 0:
                pos_slope.append(slope)
            else:
                neg_slope.append(slope)

            # print("--Slope", slope)

    pos_slope_avg, neg_slope_avg = None, None

    if len(pos_slope) > 0:
        pos_slope_avg = sum(pos_slope) / len(pos_slope)

    if len(neg_slope) > 0:
        neg_slope_avg = sum(neg_slope) / len(neg_slope)

    # print("---Avg slopes: ", pos_slope_avg, neg_slope_avg)
    # print(inspect_box(image, 0, 300, 399, 599))
    # print(inspect_box(image, 400, 300, 799, 599))

    lines = np.array(lines)

    # print(has_red_bar(image))

    # line_image_cannyed = draw_lines(cannyed_image, lines, thickness=5, itype='gray')

    # line_image_rgb = draw_lines(image, lines, thickness=5, itype='rgb')

    # to_show = line_image_rgb if show == 'rgb' else line_image_cannyed

    # plt.figure()

    # plt.imshow(to_show)
    # plt.show()

    return lines, pos_slope_avg, neg_slope_avg
    def handle_camera_data(self, msg):
        global perr, ptime, serr, dt
        image = self.bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8')
        #transformation
        height = image.shape[0]
        width = image.shape[1]
        region_of_interest_vertices = [
            (0, height),
            (0, height / 1.9),
            (width, height / 1.9),
            (width, height),
        ]

        mask = np.zeros_like(image)
        match_mask_color = [255, 255,
                            255]  # <-- This line altered for grayscale.
        cv2.fillPoly(mask, np.int32([region_of_interest_vertices]),
                     match_mask_color)
        cropped_image = cv2.bitwise_and(image, mask)

        ret, thresh_image = cv2.threshold(cropped_image, 130, 145,
                                          cv2.THRESH_BINARY)

        cannyed_image = cv2.Canny(thresh_image, 50, 150)

        lines = cv2.HoughLinesP(cannyed_image,
                                1,
                                np.pi / 180,
                                30,
                                maxLineGap=100)

        left_line_x = []
        left_line_y = []
        right_line_x = []
        right_line_y = []

        for line in lines:
            for x1, y1, x2, y2 in line:
                if (x2 - x1) == 0:
                    slope = (y2 - y1)
                else:
                    slope = (y2 - y1) / (x2 - x1)  # <-- Calculating the slope.
                #if math.fabs(slope) < 0.3: # <-- Only consider extreme slope
                #    continue
                if slope <= 0:  # <-- If the slope is negative, left group.
                    left_line_x.extend([x1, x2])
                    left_line_y.extend([y1, y2])
                else:  # <-- Otherwise, right group.
                    right_line_x.extend([x1, x2])
                    right_line_y.extend([y1, y2])

        #line_image = self.draw_lines(image, lines) # <---- Add this call.

        min_y = int(image.shape[0] * (3 / 5))  # <-- Just below the horizon
        max_y = int(image.shape[0])  # <-- The bottom of the image
        poly_left = np.poly1d(np.polyfit(left_line_y, left_line_x, deg=1))
        left_x_start = int(poly_left(max_y))
        left_x_end = int(poly_left(min_y))
        poly_right = np.poly1d(np.polyfit(right_line_y, right_line_x, deg=1))
        right_x_start = int(poly_right(max_y))
        right_x_end = int(poly_right(min_y))

        extrapolated_line_image = self.draw_lines(
            image,
            [[
                [left_x_start, max_y, left_x_end, min_y],
                [right_x_start, max_y, right_x_end, min_y],
            ]],
            thickness=5,
        )

        imgTl = [0, 0]
        imgTr = [width, 0]
        imgBr = [width, height]
        imgBl = [0, height]
        img_params = np.float32([imgTl, imgTr, imgBr, imgBl])

        tl = [left_x_end, min_y]
        tr = [right_x_end, min_y]
        br = [right_x_start, max_y]
        bl = [left_x_start, max_y]
        corner_points_array = np.float32([tl, tr, br, bl])
        print(corner_points_array)
        time.sleep(2)

        matrix = cv2.getPerspectiveTransform(corner_points_array, img_params)
        transformed_image = cv2.warpPerspective(extrapolated_line_image,
                                                matrix, (width, height))

        #Uncomment below for updated lane detection

        # og_image = self.bridge.imgmsg_to_cv2(msg,desired_encoding='bgr8')
        # #Extract dimensions and create the ROI corners before cropping image to remove horizon line
        # height = og_image.shape[0]
        # width = og_image.shape[1]

        # #Calculating Points for Birds-Eye Transform
        # imgTl = [0,0]
        # imgTr = [width,0]
        # imgBr = [width,height]
        # imgBl = [0,height]
        # img_params = np.float32([imgTl,imgTr,imgBr,imgBl])

        # tl = [0,height/1.8]
        # bl = [0, height/1.2]
        # tr = [width, height/1.8]
        # br = [width, height/1.2]
        # corner_points_array = np.float32([tl,tr,br,bl])

        # #Calculating transformation matrix and applying it to original image
        # matrix = cv2.getPerspectiveTransform(corner_points_array,img_params)
        # og_image = cv2.warpPerspective(og_image,matrix,(width,height))

        # #Calculate thresholds and apply to Canny Edge Detection
        # image = cv2.blur(og_image,(5,5))
        # lower = int(max(0,0.7*np.median(image)))
        # upper = int(min(255,1.3*np.median(image)))
        # image = cv2.Canny(image, lower, upper)

        # kernel = np.ones((3,3),np.uint8)
        # image = cv2.dilate(image,kernel,iterations = 2)

        # #Calculate Hough Transform to generate lines for each lane
        # lines = cv2.HoughLinesP(
        # image,
        # rho=10,
        # theta=np.pi / 60,
        # threshold=150,
        # lines=np.array([]),
        # minLineLength=10,
        # maxLineGap=15
        # )

        # #Group lines into left and right and rmove any flat ones
        # left_line_x = []
        # left_line_y = []
        # right_line_x = []
        # right_line_y = []
        # for line in lines:
        # for x1, y1, x2, y2 in line:
        # slope = (y2 - y1) / (x2 - x1) # <-- Calculating the slope.
        # if math.fabs(slope) < 0.3: # <-- Only consider extreme slope
        # continue
        # if slope <= 0: # <-- If the slope is negative, left group.
        # left_line_x.extend([x1, x2])
        # left_line_y.extend([y1, y2])
        # else: # <-- Otherwise, right group.
        # right_line_x.extend([x1, x2])
        # right_line_y.extend([y1, y2])

        # #Use utility function to draw Hough Tranform lines onto original image
        # #line_image = draw_lines(image, lines) # <---- Add this call.
        # try:
        # poly_left = np.poly1d(np.polyfit(
        # left_line_y,
        # left_line_x,
        # deg=2 #Change to 1 if you want a linear fit
        # ))

        # poly_right = np.poly1d(np.polyfit(
        # right_line_y,
        # right_line_x,
        # deg=2 #Change to 1 if you want a linear fit
        # ))
        # except:
        # print("No lanes detected")
        # return

        # y = np.linspace(image.shape[0]/6, image.shape[0])

        # #Formatting x and y for use with polylines to display lane overlay
        # left_line = np.array(list(zip(np.polyval(poly_left, y),y)), np.int32)
        # right_line = np.array(list(zip(np.polyval(poly_right, y),y)), np.int32)
        # left_line = left_line.reshape((-1, 1, 2))
        # right_line = right_line.reshape((-1, 1, 2))

        # #extrap_image = cv2.polylines(cv2.cvtColor(og_image.copy(),cv2.COLOR_GRAY2RGB),[left_line], False, [255,0,0], 5)
        # extrap_image = cv2.polylines(og_image.copy(),[left_line], False, [255,0,0], 5)
        # extrap_image = cv2.polylines(extrap_image,[right_line], False, [255,0,0], 5)
        # cv2.imshow("transformed_image", extrap_image)
        # cv2.waitKey(2)

        #Uncomment above for updated lane detection

        #Use transformed image for PID calculations and steering
        self.steer_robot(transformed_image)

        #Resize large images
        def resizeImage(img):
            width = int(img.shape[1] * 0.3)
            height = int(img.shape[0] * 0.3)
            dim = (width, height)
            resized = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)
            return resized

        cv2.imshow("original_image", resizeImage(image))
        cv2.imshow("transformed_image", resizeImage(transformed_image))
        cv2.waitKey(2)
def main():
    src=cv2.imread('./imagesRGB/RGB01.png')
    #src=cv2.imread('stair4.jpg')
    gray= cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
    #cv2.imshow('gray',gray)
    #gray=cv2.bitwise_not(gray)
    gray=cv2.Sobel(gray, cv2.CV_8U, 0, 1, ksize=5)
    #cv2.imshow("biwise",gray)
    bw=cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 15, -2)
    #bw= cv2.inRange(gray,230,255)
    #cv2.imshow('binary', bw)
    horizontal=np.copy(bw)
    cols = horizontal.shape[1]
    horizontal_size = cols // 30
    # Create structure element for extracting horizontal lines through morphology operations
    horizontalStructure = cv2.getStructuringElement(cv2.MORPH_RECT, (horizontal_size, 1))
    # Apply morphology operations
    horizontal = cv2.erode(horizontal, horizontalStructure)
    horizontal = cv2.dilate(horizontal, horizontalStructure)
    kernel = np.ones((5,5),np.uint8)
    horizontal= cv2.dilate(horizontal,kernel,iterations=1)
    horizontal= cv2.erode(horizontal,horizontalStructure,iterations=2)
    # Show extracted horizontal lines
    #cv2.imshow('horizontal',horizontal)
    rhofilter=np.copy(src)
    '''
    lines= cv2.HoughLinesP(horizontal,1,math.pi/180,1, 150, 50)

    cv2.imshow("horizontal", horizontal)
    for line in lines:
        for x1,y1,x2,y2 in line:
            cv2.line(src, (x1,y1),(x2,y2),(0,255,0),2)
    '''
    lines=[]
    lineP=cv2.HoughLinesP(horizontal,1,math.pi/120,threshold=10,minLineLength=300, maxLineGap=20)
    #print(lineP)
    for line in lineP:
        for x1,y1,x2,y2 in line:
            cv2.line(src,(x1,y1),(x2,y2),(0,255,0),2)
            theta=math.pi-math.atan2((x2-x1),(y2-y1))
            rho=math.sqrt((((x1-x2)**2)*math.sin(theta)**2)+(((y1-y2)**2)*math.cos(theta)**2))
            lines.append([rho,theta])
            #print(rho,theta)
    cv2.imshow('with',src)

    lines_points = []
    rho_window=11

    x1_arr=[]
    y1_arr=[]
    x2_arr=[]
    y2_arr=[]
    temp_list=[]
    line_arr=[]
    #print(lines.shape)
    if lines is not None:
        lines= np.reshape(lines, [-1,2])
        lineP= np.reshape(lineP, [-1,4])
        lines_sorted=lines[lines[:,0].argsort()]
        lineP_sorted=lineP[lines[:,0].argsort()]
        #print(lineP_sorted)

        pos=0
        for i in range(0,len(lines_sorted)-1):
            if abs(lines_sorted[i][0] - lines_sorted[i+1][0]) <= rho_window:
                line_arr.append(lineP_sorted[i])
            else:
                line_arr.append(lineP_sorted[i])
                if len(line_arr) %2 == 0:
                    index = len(line_arr)//2 -1
                else:
                    index = len(line_arr)//2
                x1_arr.append(line_arr[index][0])
                y1_arr.append(line_arr[index][1])
                x2_arr.append(line_arr[index][2])
                y2_arr.append(line_arr[index][3])
                del temp_list[:]
        i = len(lines_sorted)-1
        line_arr.append(lineP_sorted[i])
        if len(line_arr) %2 == 0:
            index = len(line_arr)//2 -1
        else:
            index = len(line_arr)//2

        x1_arr.append(line_arr[index][0])
        y1_arr.append(line_arr[index][1])
        x2_arr.append(line_arr[index][2])
        y2_arr.append(line_arr[index][3])

        for i in range(len(x1_arr)):
            x1= x1_arr[i]
            y1= y1_arr[i]
            x2= x2_arr[i]
            y2= y2_arr[i]
            lines_points.append([(x1,y1),(x2,y2)])
            cv2.line(rhofilter,(x1,y1),(x2,y2),(0,0,255),2)
            #print(rho,theta)

    cv2.imshow('With filter',rhofilter)
    cv2.waitKey(0)
    return 0
Пример #18
0
import cv2
import numpy as np

img = cv2.imread('../img/sudoku.jpg')
img2 = img.copy()
# 그레이 스케일로 변환 및 엣지 검출 ---①
imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(imgray, 50, 200)

# 확율 허프 변환 적용 ---②
lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 10, None, 20, 2)
for line in lines:
    # 검출된 선 그리기 ---③
    x1, y1, x2, y2 = line[0]
    cv2.line(img2, (x1, y1), (x2, y2), (0, 255, 0), 1)

merged = np.hstack((img, img2))
cv2.imshow('Probability hough line', merged)
cv2.waitKey()
cv2.destroyAllWindows()
    curr_img_lines = np.copy(curr_img)
    cv2.bilateralFilter(curr_img_gray, 3, 10, 10)
    curr_img_gray = cv2.GaussianBlur(curr_img_gray, (3, 3), 0)
    binImg = cv2.adaptiveThreshold(curr_img_gray, 255,
                                   cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                   cv2.THRESH_BINARY, 3, -7)
    #image thresholding
    #cv2.adaptiveThreshold(src, maxvalue, adaptive method, threshold type, bock size, Constant subtracted from the mean or weighted mean )
    #binImg = cv2.adaptiveThreshold(curr_img_gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 3, -3 )
    #ret,binImg = cv2.threshold(curr_img_gray,127,255,cv2.THRESH_BINARY)

    # line detection
    lines = cv2.HoughLinesP(image=binImg,
                            rho=1,
                            theta=np.pi / 180,
                            threshold=20,
                            lines=np.array([]),
                            minLineLength=minLineLength,
                            maxLineGap=maxLineGap)
    lines_number = 0
    if lines is not None:
        a, b, c = lines.shape
        for i in range(a):
            angle = math.atan2(lines[i][0][3] - lines[i][0][1],
                               lines[i][0][2] - lines[i][0][0])
            theta = (angle * 180) / np.pi

            if math.fabs(theta) > 85 and math.fabs(theta) < 95:
                print('theta :', theta)
                cv2.line(curr_img_lines, (lines[i][0][0], lines[i][0][1]),
                         (lines[i][0][2], lines[i][0][3]), (0, 0, 255), 3,
Пример #20
0
def detect_center_point(grab_image):
    cX, cY = 0, 0
    kernel = np.ones((3, 3), np.uint8)
    grab_gray = cv.cvtColor(grab_image, cv.COLOR_BGR2GRAY)
    blurred = cv.GaussianBlur(grab_gray, (5, 5), 0)
    thresh = cv.threshold(blurred, 20, 255, cv.THRESH_BINARY)[1]
    result = cv.erode(thresh, kernel, iterations=10)
    result = cv.dilate(result, kernel, iterations=11)
    cnts = cv.findContours(result.copy(), cv.RETR_EXTERNAL,
                           cv.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)

    for c in cnts:
        M = cv.moments(c)
        cX = int(M["m10"] / M["m00"])
        cY = int(M["m01"] / M["m00"])
        cv.circle(grab_image.copy(), (cX, cY), 7, (0, 0, 255), -1)
        cv.putText(grab_image.copy(), "center", (cX - 20, cY - 20),
                   cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)

    edges = cv.Canny(result, 50, 150)
    # FIXME: 라인오차 잡아야함
    lines = cv.HoughLinesP(edges,
                           1,
                           np.pi / 180,
                           20,
                           minLineLength=0,
                           maxLineGap=300)

    if lines is None:
        return -1, -1, -1, -1

    top_line_p1_x1, top_line_p1_y1, top_line_p1_x2, top_line_p1_y2, bottom_line_p1_x1, bottom_line_p1_y1, bottom_line_p1_x2, bottom_line_p1_y2 = select_houghlines(
        lines, cX, cY)

    # print(top_line_p1_x1, top_line_p1_y1, top_line_p1_x2, top_line_p1_y2)
    cv.line(grab_image, (bottom_line_p1_x1, bottom_line_p1_y1),
            (bottom_line_p1_x2, bottom_line_p1_y2), (0, 255, 255), 2)
    cv.line(grab_image, (top_line_p1_x1, top_line_p1_y1),
            (top_line_p1_x2, top_line_p1_y2), (255, 255, 0), 2)
    cv.line(grab_image, (cX, 0), (cX, 1008), (0, 255, 0), 2)

    # top intersaction point detect
    a1 = top_line_p1_y2 - top_line_p1_y1
    b1 = top_line_p1_x1 - top_line_p1_x2
    c1 = a1 * top_line_p1_x1 + b1 * top_line_p1_y1

    a2 = cY - 0
    b2 = cX - cX
    c2 = a2 * cX + b2 * 0

    deteminate = a1 * b2 - a2 * b1
    top_X = (b2 * c1 - b1 * c2) / deteminate
    top_Y = (a1 * c2 - a2 * c1) / deteminate

    # bottom intersaction point detect
    bottom_a1 = bottom_line_p1_y2 - bottom_line_p1_y1
    bottom_b1 = bottom_line_p1_x1 - bottom_line_p1_x2
    bottom_c1 = bottom_a1 * bottom_line_p1_x1 + bottom_b1 * bottom_line_p1_y1

    bottom_a2 = 1008 - cY
    bottom_b2 = cX - cX
    bottom_c2 = bottom_a2 * cX + bottom_b2 * cY

    lower_deteminate = bottom_a1 * bottom_b2 - bottom_a2 * bottom_b1
    bottom_X = (bottom_b2 * bottom_c1 -
                bottom_b1 * bottom_c2) / lower_deteminate
    bottom_Y = (bottom_a1 * bottom_c2 -
                bottom_a2 * bottom_c1) / lower_deteminate

    # [top info]
    # center: cX, cY
    # cross: top_X, top_Y
    # min_distance_point: line_p1_x2, line_p1_y2
    # [bottom info]
    # center: cX, cY
    # cross: bottom_X, bottom_Y
    # min_distance_point: bottom_center_x, bottom_center_y

    top_angle_height = math.atan2((cX - top_X), (0 - top_Y))
    top_angle_bottom = math.atan2((top_line_p1_x2 - top_X),
                                  (top_line_p1_y2 - top_Y))
    upper_angle = (top_angle_height - top_angle_bottom) * 180 / math.pi

    bottom_angle_height = math.atan2((cX - bottom_X), (1008 - bottom_Y))
    bottom_angle_width = math.atan2((bottom_line_p1_x2 - bottom_X),
                                    (bottom_line_p1_y2 - bottom_Y))
    bottom_angle = (bottom_angle_height - bottom_angle_width) * 180 / math.pi

    check_rotate = abs(bottom_angle) + abs(upper_angle)
    distance = abs(top_Y - bottom_Y)

    cv.waitKey()
    cv.destroyAllWindows()
    return check_rotate, distance, upper_angle, bottom_angle
Пример #21
0
    def process_image(self, image):
        self.out1.write(image)
        cv2.imshow("image window11", image)
        cv2.waitKey(3)
    # grayscale the image
        gray = self.grayscale(image)

    # apply gaussian blur
        gray = self.gaussian_blur(gray, 5)
        ret, thresh = cv2.threshold(
            gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
        kernel = np.ones((3, 3), np.uint8)
        opening = cv2.morphologyEx(
            thresh, cv2.MORPH_OPEN, kernel, iterations=2)
        gray = opening

        cv2.imshow("image window1", gray)
        cv2.waitKey(3)
    # return opening

    # using canny edge detector
        low_threshold = 50
        high_threshold = 150
        edges = self.canny(gray, low_threshold, high_threshold)

    # use fillPoly to mask out the region of interest
        iShape = image.shape
        xoffs = 70
        yoffs = 0
        ybottom = iShape[0] / 2 + yoffs
        vertices = np.array([[(iShape[1] / 2 + xoffs, ybottom), (iShape[1] / 2 - xoffs,
              ybottom), (0, 200), (0, 0), (iShape[1], 0), (iShape[1], 200)]], dtype=np.int32)
        edges = self.region_of_interest(edges, vertices)

        cv2.imshow("image window2", edges)
        cv2.waitKey(3)
    # return edges

    # hough transform
        rho = 2
        theta = np.pi / 180
        threshold = 15
        min_line_len = 50
        max_line_gap = 30
        lines = cv2.HoughLinesP(edges, rho, theta, threshold, np.array(
            []), min_line_len, max_line_gap)

    # find the slopes of left and right lines of the bounding box (region of
    # interest)
        pf = np.polyfit((0, iShape[1] / 2 - xoffs), (200, ybottom), 1)
        nf = np.polyfit((iShape[1], iShape[1] / 2 + xoffs), (200, ybottom), 1)
        print(pf)
        print(nf)
        r = 0.4

    # Go through all the detected lines, filter those that are have slopes within r of the left or right lines
    # of the bounding box. This is to exclude, as much as possible, horizontal
    # lines.
        pslopx = []
        pslopy = []
        nslopx = []
        nslopy = []
        m1 = np.zeros_like(image)
        m2 = np.zeros_like(image)
        for line in lines:
            for x1, y1, x2, y2 in line:
                cv2.line(m2, (x1, y1), (x2, y2), [255, 0, 0], 2)
                slop = (y2 - y1) / (x2 - x1)
                if (slop > 0 and slop >= pf[0] - r and slop <= pf[0] + r):
                    pslopx.extend((x1, x2))
                    pslopy.extend((y1, y2))
                    cv2.line(m1, (x1, y1), (x2, y2), [0, 255, 0], 2)
                elif (slop < 0 and slop >= nf[0] - r and slop <= nf[0] + r):
                    nslopx.extend((x1, x2))
                    nslopy.extend((y1, y2))
                    cv2.line(m1, (x1, y1), (x2, y2), [0, 0, 255], 2)

        cv2.line(m2, (0, 200), (iShape[1] / 2 -
                                xoffs, ybottom), [255, 255, 0], 2)
        cv2.line(m2, (iShape[1], 300), (iShape[1] /
                                        2 + xoffs, ybottom), [255, 0, 255], 5)
        cv2.imshow("image window3", m2)
        cv2.waitKey(3)

        cv2.imshow("image window4", m1)
        cv2.waitKey(3)
    # return m1

        mask = np.zeros_like(image)

    # draw the bounding box left and right lines
    # cv2.line(mask, (vertices[0,1,0], vertices[0,1,1]), (vertices[0,2,0], vertices[0,2,1]), [0,255, 0], 2)
    # cv2.line(mask, (vertices[0,5,0], vertices[0,5,1]), (vertices[0,0,0], vertices[0,0,1]), [0,255, 0], 2)

    # find the line fits for both pos and neg slopes and draw the estimated
    # lines in the pic.
        if pslopx and len(pslopx) >= 2:
            p = np.poly1d(np.polyfit(pslopx, pslopy, 1))
            y_lt = p(0)
            y_lb = p(iShape[1] / 2 - xoffs)
            cv2.line(mask, (0, int(y_lt)),
                     (iShape[1] / 2 - xoffs, int(y_lb)), [0, 255, 0], 6)

        if nslopx and len(nslopx) >= 2:
            n = np.poly1d(np.polyfit(nslopx, nslopy, 1))
            y_rt = n(iShape[1])
            y_rb = n(iShape[1] / 2 + xoffs)
            cv2.line(mask, (iShape[1], int(y_rt)), (iShape[
                     1] / 2 + xoffs, int(y_rb)), [0, 255, 0], 6)

        result = self.weighted_img(mask, image)
        self.out2.write(result)

        return result
Пример #22
0
def houghLine(img, threshold=150, minLength=150, maxGap=15):
    imgW = img.shape[1]
    imgH = img.shape[0]
    image = copy.copy(img)
    # brand
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    edges = cv2.Canny(gray, 50, 200)
    kernel = np.ones((5, 5), np.uint8)
    edges = cv2.dilate(edges, kernel, iterations=3)
    edges = cv2.Canny(edges, 100, 200)
    kernel = np.ones((3, 3), np.uint8)
    edges = cv2.dilate(edges, kernel, iterations=1)
    lines = cv2.HoughLinesP(edges,
                            1,
                            np.pi / 180,
                            threshold=threshold,
                            minLineLength=minLength,
                            maxLineGap=maxGap)
    edges = cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR)
    # drawLineP(lines, edges)
    print "line num = ", len(lines[0])

    # 找垂线和横线
    VecLineL = None
    VecLineR = None
    HorLineU = None
    HorLineD = None
    angThresh = (float)(15) / (float)(180) * np.pi
    lenScaleThresh = 0.2
    veclines = []
    horlines = []
    for line in lines[0]:
        myline = LineParam(line)
        if myline.line[1] < angThresh or math.fabs(
                myline.line[1] - math.pi) < angThresh:  # 垂直方向筛选
            if myline.length >= lenScaleThresh * imgH:
                veclines.append(myline)
        elif math.fabs(myline.line[1] - math.pi / 2) < angThresh:  # 水平方向筛选
            if myline.length >= lenScaleThresh * imgW:
                horlines.append(myline)

    # 画线
    for line in veclines:
        cv2.line(edges, tuple(line.p1), tuple(line.p2), (255, 0, 0), 2)
    for line in horlines:
        cv2.line(edges, tuple(line.p1), tuple(line.p2), (255, 2, 2), 2)

    # 直线按长度从大到小排序
    veclines.sort(key=lambda x: x.length, reverse=True)
    horlines.sort(key=lambda x: x.length, reverse=True)

    # 垂线更容易找,先确定垂线,根据垂线端点筛选横线
    if len(veclines) < 0:
        print '没有找到垂线边缘,无法继续处理!'
        return img, None
    for line in veclines:
        if VecLineL == None and math.fabs(line.line[0]) < (float)(imgW) / 2:
            VecLineL = line
        if VecLineR == None and math.fabs(line.line[0]) > (float)(imgW) / 2:
            VecLineR = line

    # 找离垂线端点近的横线,横线应该左右垂线之间
    if len(horlines) < 0:
        print '没有找到横线,无法继续处理!'
        return img, None
    p1s = []  # 垂线上端点
    p2s = []  # 垂线下端点
    VecLine = []
    if VecLineL != None:
        VecLine.append(VecLineL)
    if VecLineR != None:
        VecLine.append(VecLineR)
    VecLine.sort(key=lambda x: x.length, reverse=True)
    if len(VecLine) < 1:
        print '没有找到两侧的垂直边界线,中断!'
        return edges, image
    else:
        for vecline in VecLine:
            if vecline.y1 <= vecline.y2:
                p1s.append(vecline.p1)
                p2s.append(vecline.p2)
            else:
                p2s.append(vecline.p1)
                p1s.append(vecline.p2)

    p1s.sort(key=lambda x: x[1])
    p2s.sort(key=lambda x: x[1], reverse=True)
    uplineThreshY = p2s[0][1]
    downlineThreshY = p1s[0][1]

    upLines = []
    downLines = []
    maxLength = horlines[0].length
    for line in horlines:
        # if line.line[0]<(float)(imgH/2) and line.length>0.3*maxLength:
        if line.line[0] < (
                float)(uplineThreshY) and line.length > 0.3 * maxLength:
            # line.distance = math.fabs(line.getDistPtToLine(p1[0],p1[1]))
            distanceTmp = 0
            numTmp = 0
            for i in range((len(p1s))):
                distanceTmp += math.fabs(
                    line.getDistPtToLine(p1s[i][0], p1s[i][1]))
                numTmp += 1
            line.distanceU = distanceTmp / numTmp
            upLines.append(line)
        # if line.line[0]>(float)(imgH/2) and line.length>0.3*maxLength:
        if line.line[0] > (
                float)(downlineThreshY) and line.length > 0.3 * maxLength:
            # line.distance = math.fabs(line.getDistPtToLine(p2[0], p2[1]))
            distanceTmp = 0
            numTmp = 0
            for i in range((len(p2s))):
                distanceTmp += math.fabs(
                    line.getDistPtToLine(p2s[i][0], p2s[i][1]))
                numTmp += 1
            line.distanceD = distanceTmp / numTmp
            downLines.append(line)

    # 剔除不在左右垂线之间的线段
    UpLines = []
    DownLines = []
    deltpixel = 10
    if len(VecLine) == 2:
        for line in upLines:
            if min(line.p1[0],line.p2[0])>=max(0, (min(p1s[0][0],p1s[1][0])-deltpixel)) \
                    and max(line.p1[0],line.p2[0])<=min(imgW,(max(p1s[0][0],p1s[1][0])+deltpixel)):
                UpLines.append(line)
        for line in downLines:
            if min(line.p1[0],line.p2[0])>=max(0,(min(p2s[0][0],p2s[1][0])-deltpixel)) \
                    and max(line.p1[0],line.p2[0])<=min(imgW,(max(p2s[0][0],p2s[1][0])+deltpixel)):
                DownLines.append(line)
    elif VecLineL == None:
        for line in upLines:
            if max(line.p1[0], line.p2[0]) <= min(imgW,
                                                  (p1s[0][0] + deltpixel)):
                UpLines.append(line)
        for line in downLines:
            if max(line.p1[0], line.p2[0]) <= min(imgW,
                                                  (p2s[0][0] + deltpixel)):
                DownLines.append(line)
    elif VecLineR == None:
        for line in upLines:
            if min(line.p1[0], line.p2[0]) >= max(0, (p1s[0][0] - deltpixel)):
                UpLines.append(line)
        for line in downLines:
            if min(line.p1[0], line.p2[0]) >= max(0, (p2s[0][0] - deltpixel)):
                DownLines.append(line)

    UpLines.sort(key=lambda x: x.distanceU)
    DownLines.sort(key=lambda x: x.distanceD)
    if len(upLines) > 0:
        HorLineU = UpLines[0]
    if len(downLines) > 0:
        HorLineD = DownLines[0]

    # 找直线交点
    Lines = []  # 顺序:左右上下
    if VecLineL != None:
        Lines.append(VecLineL)
    if VecLineR != None:
        Lines.append(VecLineR)
    if HorLineU != None:
        Lines.append(HorLineU)
    if HorLineD != None:
        Lines.append(HorLineD)

    # 如果有直线缺失,自动补齐 =======================================
    if len(Lines) == 3:
        # 垂直边界是靠对称补齐
        if VecLineL == None:
            print '自动补齐左边界'
            pts = [HorLineD.p1, HorLineD.p2, HorLineU.p1, HorLineU.p2]
            pts.sort(key=lambda x: x[0])
            VecLineL = getLine(VecLineR.line[1], pts[0])
            Lines.append(VecLineL)
        if VecLineR == None:
            print '自动补齐右边界'
            pts = [HorLineD.p1, HorLineD.p2, HorLineU.p1, HorLineU.p2]
            pts.sort(key=lambda x: x[0], reverse=True)
            VecLineR = getLine(VecLineL.line[1], pts[0])
            Lines.append(VecLineR)

        # 水平边界补齐靠两条垂直边界端点
        deltScale = 0.067  # 40/600
        if HorLineU == None:
            print '自动补齐上边界'
            pts = [VecLineL.p1, VecLineL.p2, VecLineR.p1, VecLineR.p2]
            pts.sort(key=lambda x: x[1])
            if math.fabs(pts[0][1] - pts[1][1]) <= deltScale * imgH:
                HorLineU = LineParam(
                    [pts[0][0], pts[0][1], pts[1][0], pts[1][1]])
            Lines.append(HorLineU)
        if HorLineD == None:
            print '自动补齐下边界'
            pts = [VecLineL.p1, VecLineL.p2, VecLineR.p1, VecLineR.p2]
            pts.sort(key=lambda x: x[1], reverse=True)
            if math.fabs(pts[0][1] - pts[1][1]) <= deltScale * imgH:
                HorLineD = LineParam(
                    [pts[0][0], pts[0][1], pts[1][0], pts[1][1]])
            Lines.append(HorLineD)

    # 完成直线补齐 =================================================

    # 画图保存直线提取结果
    for line in Lines:
        edges = line.drawLineP(edges)

    # 计算直线交点
    crossPoint = []
    for twolines in list(combinations(Lines, 2)):
        pt = lineCrossPoint(twolines[0], twolines[1])
        # 如果点超过image边界,扩充图像, 只向右边和下边扩充,不影响直线参数
        if len(pt) > 0 and pt[0] > imgW and pt[0] < 1.5 * imgW and pt[
                1] >= 0 and pt[1] < 1.5 * imgH:
            edgesTmp = np.zeros((imgH, pt[0]), np.uint8)
            edgesTmp[0:imgH, 0:imgW] = edges
            edges = edgesTmp
            imageTmp = np.zeros((imgH, pt[0], 3), np.uint8)
            imageTmp[0:imgH, 0:imgW] = image
            image = imageTmp
            imgW = pt[0]
        if len(pt) > 0 and pt[1] > imgH and pt[1] < 1.5 * imgH and pt[
                0] >= 0 and pt[0] < 1.5 * imgW:
            edgesTmp = np.zeros((pt[1], imgW, 3), np.uint8)
            edgesTmp[0:imgH, 0:imgW] = edges
            edges = edgesTmp
            imageTmp = np.zeros((pt[1], imgW, 3), np.uint8)
            imageTmp[0:imgH, 0:imgW] = image
            image = imageTmp
            imgH = pt[1]
        if len(pt) > 0 and pt[0] <= imgW and pt[0] >= 0 and pt[1] >= 0 and pt[
                1] <= imgH:
            crossPoint.append(pt)
            cv2.circle(edges, (pt[0], pt[1]), 8, (0, 255, 255), 3)

    # 矫正图像
    cornerPoint = []

    if len(crossPoint) < 4:
        print '角点不足: ', len(crossPoint)
        return edges, image
    crossPoint.sort(key=lambda x: x[1])  # y值小到大
    temp = [crossPoint[0], crossPoint[1]]
    temp.sort()  # x值小到大
    cornerPoint.append(temp[0])  # 左上角点
    cornerPoint.append(temp[1])  # 右上角点
    temp = [crossPoint[2], crossPoint[3]]
    temp.sort()  # x值小到大
    cornerPoint.append(temp[1])  # 右下角点
    cornerPoint.append(temp[0])  # 左下角点
    X1 = (int)(cornerPoint[0][0] + cornerPoint[3][0]) / 2
    X2 = (int)(cornerPoint[1][0] + cornerPoint[2][0]) / 2
    Y1 = (int)(cornerPoint[0][1] + cornerPoint[1][1]) / 2
    Y2 = (int)(cornerPoint[2][1] + cornerPoint[3][1]) / 2
    desPoint = []
    desPoint.append([X1, Y1])
    desPoint.append([X2, Y1])
    desPoint.append([X2, Y2])
    desPoint.append([X1, Y2])

    # 透视变换 cornerPoint 纠正到 desPoint
    cornerPoint = np.float32(cornerPoint)
    desPoint = np.float32(desPoint)
    M = cv2.getPerspectiveTransform(cornerPoint, desPoint)
    newImg = cv2.warpPerspective(image, M, (imgW, imgH))
    # 裁剪
    delt = 25
    cropImg = newImg[max(0, Y1 - delt):min(600, Y2 + delt),
                     max(0, X1 - delt):min(800, X2 + delt)]

    return edges, newImg
Пример #23
0
    if lines is not None:  # 라인이 있을 경우에만 실행

        for i in range(0, len(lines)):  # 삼각함수 구하는 과정임
            rho = lines[i][0][0]
            theta = lines[i][0][1]
            a = math.cos(theta)
            b = math.sin(theta)
            x0 = a * rho
            y0 = b * rho
            pt1 = [int(x0 + 1000 * (-b)), int(y0 + 1000 * (a))]
            pt2 = [int(x0 - 1000 * (-b)), int(y0 - 1000 * (a))]

            # 테스트용 라인그리기 주석 cv.line(cdst, pt1, pt2, (0, 255, 255), 2)
            # 테스트용 변수확인하기 주석 print(rho, " ", theta, " ", a, " ", b, " ", x0, " ", y0, " ", pt1, " ", pt2, "\n")

    linesP = cv.HoughLinesP(dst, 1, np.pi / 180, 50, None, 50, 10)
    if linesP is not None:
        for i in range(0, len(linesP)):
            l = linesP[i][0]
            cv.line(cdstP, (l[0], l[1]), (l[2], l[3]), (180, 120, 255), 3,
                    cv.LINE_AA)

    cv.imshow("오리지널", src)
    cv.imshow("Source", dst)
    cv.imshow("Detected Lines (in red) - Standard Hough Line Transform", cdst)
    cv.imshow("Detected Lines (in red) - Probabilistic Line Transform", cdstP)

    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

cap.release()
Пример #24
0
# canny_image) lines=cv2.HoughLinesP(processed_image,2,np.pi/180,100,np.array([]),minLineLength=40,maxLineGap=5)
# averaged_lines=average_slope_intercept(lane_image,lines) lineImage=display_lines(lane_image,averaged_lines)
# final_result_image=cv2.addWeighted(lane_image,0.8,lineImage,1,1) cv2.imshow("result",region_of_interest(canny))
# cv2.waitKey(0) plt.imshow(canny_image) plt.show() cv2.imshow("result",final_result_image) cv2.waitKey(0)

# to identify the lane, we will draw a triangle with coordinates (200,700) , (1100,700) , (550,250)
# refer to def region_of_interest

capture = cv2.VideoCapture(
    r"C:\Users\ssrev\PycharmProjects_Self_Driving_Car_Virtual_Implementation\Finding_Lanes\test2.mp4"
)
while capture.isOpened():
    _, frame = capture.read()
    canny_image = canny(frame)
    processed_image = region_of_interest(canny_image)
    lines = cv2.HoughLinesP(processed_image,
                            2,
                            np.pi / 180,
                            100,
                            np.array([]),
                            minLineLength=40,
                            maxLineGap=5)
    averaged_lines = average_slope_intercept(frame, lines)
    lineImage = display_lines(frame, averaged_lines)
    final_result_image = cv2.addWeighted(frame, 0.8, lineImage, 1, 1)
    cv2.imshow("final_result", final_result_image)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
capture.release()
cv2.destroyAllWindows()
Пример #25
0
def get_page_rotate_angle(bw_img, min_line):
    img = cv2.medianBlur(bw_img, 3)

    #fix rotation
    dilate = cv2.cvtColor(bw_img, cv2.COLOR_BGR2GRAY)
    (_, dilate) = cv2.threshold(dilate, 0, 255,
                                cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)

    dilate = cv2.dilate(dilate,
                        cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)),
                        1)

    #crop
    cropBox = get_image_cropBox(dilate)

    dilate = dilate[cropBox[0]:cropBox[1] + 1, cropBox[2]:cropBox[3] + 1]

    #lines= cv2.HoughLines(edges,1,np.pi/180,200);
    lines = cv2.HoughLinesP(image=dilate,
                            rho=1,
                            theta=np.pi / 1440,
                            threshold=600,
                            minLineLength=min_line,
                            maxLineGap=7)
    if lines is None:
        print "get_angle: Looks like empty/invalid page -- no long lines here"
        return [0.0, [], cropBox]

    print "get_angle: Hough lines: %d" % (len(lines[0]))

    avgangle = 0.0
    avgcnt = 0

    angleG = {}
    for n in range(0, len(lines)):
        for x1, y1, x2, y2 in lines[n]:
            cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2)

            phi = math.atan2(x1 - x2, y1 - y2) * 180 / np.pi
            if (phi < 0):
                phi += 180.0

            #horizontal lines
#      if (phi>70 and phi<110):
            qphi = int((phi + 2.5) / 5.0) * 5
            if (str(qphi) in angleG):
                angleG[str(qphi)].append(phi)
            else:
                angleG[str(qphi)] = []
                angleG[str(qphi)].append(phi)

            print "phi: %.1f (%d %d),(%d,%d)" % (phi, x1, y1, x2, y2)

#  print angleG
    maxlenQphi = -1
    for qphi, lst in angleG.iteritems():
        if (maxlenQphi == -1):
            maxlenQphi = qphi
            continue
        if (len(angleG[maxlenQphi]) < len(lst)):
            maxlenQphi = qphi


#  print "maxlenQphi: ", maxlenQphi
    avgcnt = len(angleG[maxlenQphi])
    avgangle = sum(angleG[maxlenQphi])

    if (avgcnt == 0):
        print "get_angle: Looks like empty/invalid page -- no long lines here"
        return [0.0, [], cropBox]

    angle = -(avgangle / avgcnt) + 90.0

    if (angle > 45): angle = angle - 90.0

    print "get_angle: avg: cnt=%d anglediff = %0.4f" % (avgcnt, angle)

    #  cv2.imshow("dilate", cv2.resize(dilate, None, fx=0.5,fy=0.5, interpolation=cv2.INTER_NEAREST));
    #  cv2.waitKey(0);

    return [angle, lines, cropBox]
import cv2
import numpy as np
img = cv2.imread("example_02.jpg")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 150, 200)
lines = cv2.HoughLinesP(edges, 1, np.pi/180, 70, maxLineGap=450)
for line in lines:
   x1, y1, x2, y2 = line[0]
   cv2.line(img, (x1, y1), (x2, y2), (0, 0, 128), 1)
#cv2.imshow("linesEdges", edges)
cv2.imshow("linesDetected", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Пример #27
0
	def run(self):

		rate = rospy.Rate(20) # 10hz

		steering_list = np.array([])
		x_min_list = np.array([])
		x_max_list = np.array([])
		y_min_list = np.array([])
		y_max_list = np.array([])

		m = 0.0
		prev_sbus_steering = 1024
		last_front_stop_time = time.time()
		last_back_stop_time = time.time()
		while not rospy.is_shutdown():
			# startTime = time.time()
			## reset the list if the direction change
			if self.ROBOT_MODE != self.prev_ROBOT_MODE:
				x_min_list = np.array([])
				x_max_list = np.array([])
				y_min_list = np.array([])
				y_max_list = np.array([])
				steering_list = np.array([])

			if (self.CONSOLE_ARM == 1):

				## Use PX and PY to bitmap image
				for px,py in zip(self.PX, self.PY):
					self.image_bitmap(int(px),int(py))
			
				## Line detection
				processed_img = cv2.cvtColor(self.amap, cv2.COLOR_BGR2GRAY)
				processed_img = cv2.Canny(processed_img, self.cannyMinVal, self.cannyMaxVal)
				
				## In case of pigfarm, have a long-range view is better than crop fron-back
				# if (self.ROBOT_MODE == "FRONT"):
				# 	## roi on processed image to filter undesired area out
				# 	processed_img = self.roi(processed_img, self.front_vertices)
				# 	## this roi is for visualizing, how much we filter the side area out
				# 	## can be commented out
				# 	self.amap = self.roi(self.amap, self.front_vertices)
				# elif (self.ROBOT_MODE == "BACK"):
				# 	## roi on processed image to filter undesired area out
				# 	processed_img = self.roi(processed_img, self.back_vertices)
				# 	## this roi is for visualizing, how much we filter the side area out
				# 	## can be commented out
				# 	self.amap = self.roi(self.amap, self.back_vertices)

				if (self.ROBOT_MODE == "FRONT") or (self.ROBOT_MODE == "BACK"):
					processed_img = self.roi(processed_img, self.sides_vertices)
					self.amap = self.roi(self.amap, self.sides_vertices)
	

				## If HoughLineP can find line features on the image
				linesP = cv2.HoughLinesP(processed_img, 1, pi/180, self.houghThresh, minLineLength=self.houghMinLength, maxLineGap=self.houghMaxGap)
				if linesP is not None:
					slope = np.array([])

					for i in range(0, len(linesP)):
						l = linesP[i][0]
						x1 = l[0]
						y1 = l[1]
						x2 = l[2]
						y2 = l[3]

						## Perfect vertical line
						if (x1==x2):
							m = None
							x = x1

							## We are trying to rearrange the order of x1,y1 as min pixel and x2,y2 as max pixel
							if x < (self.frame_width/2):
								if y2 < y1:
									self.left_line_list.append([y2,y1,x2,x1])
								else:
									# flip the lowset value of y to first element, same with x pair of it
									self.left_line_list.append([y1,y2,x1,x2])
							else:
								if y2 < y1:
									self.right_line_list.append([y2,y1,x2,x1])
								else:
									# flip the lowset value of y to first element, same with x pair of it
									self.right_line_list.append([y1,y2,x1,x2])

						else:
							m = (float(y2)-float(y1))/(float(x2)-float(x1))
							x_ave = (x2+x1)/2.0

							x = x_ave

							## Horizontal line with slope
							## if slope is pretty less (mostly horizontal flat) or if slope that not much vertical
							if (abs(m) < 1) or (abs(m) > 15): 
								pass
								# msg = "NO_LINE"
								# cv2.line(self.amap, (x1, y1), (x2, y2), (150,150,150), 1, cv2.LINE_AA)

							## Vertical line with slope
							else:
								## We are trying to rearrange the order of x1,y1 as min pixel and x2,y2 as max pixel
								## same as perfect vertical line above case
								if x < (self.frame_width/2):
									if y1 > y2:
										self.left_line_list.append([y2,y1,x2,x1])
									else:
										# flip the lowset value of y to first element, same with x pair of it
										self.left_line_list.append([y1,y2,x1,x2])
								else:
									if y1 > y2:
										self.right_line_list.append([y2,y1,x2,x1])
									else:
										self.right_line_list.append([y1,y2,x1,x2])

						# print("i: %d, m: %s, %d %d %d %d" %(i,str(m),y2,y1,x2,x1))

				## HoughLineP cannot find any line
				else:
					msg = "NO_LINE"
					self.left_line_list = []
					self.right_line_list = []
					self.y_L_dist = np.array([])
					self.y_R_dist = np.array([])


				

				## Draw a circle do at middle of image
				cv2.circle(self.amap, (self.frame_width_half,self.frame_height_half), 4, (0,0,0), -1)

				## Find the best candidate line on each left and right side and plot
				## Left line
				if len(self.left_line_list) > 0:
					## Calculate the height distance of line, it should simply be ymax - ymin
					for left_line_elem in self.left_line_list:
						self.y_L_dist = np.append(self.y_L_dist, (left_line_elem[1] - left_line_elem[0]))

					## Using argmax to find the longest line in the y_L_dist list
					idx_L = np.argmax(self.y_L_dist)

					## Use that longest line as a candidate of left side line
					y_min_plot_L = self.left_line_list[idx_L][0]
					y_max_plot_L = self.left_line_list[idx_L][1]
					x_min_plot_L = self.left_line_list[idx_L][2]
					x_max_plot_L = self.left_line_list[idx_L][3]

					## Draw candidate left side line
					cv2.line(self.amap, (x_min_plot_L, y_min_plot_L), (x_max_plot_L, y_max_plot_L), (255,0,0), 1, cv2.LINE_AA)
					x_mid_L = (x_min_plot_L + x_max_plot_L)/2
					y_mid_L = (y_min_plot_L + y_max_plot_L)/2
					# clear array out
					self.y_L_dist = np.array([])
				else:
					x_mid_L = 0

				## Right line
				if len(self.right_line_list) > 0:
					## Calculate the height distance of line, it should simply be ymax - ymin		
					for right_line_elem in self.right_line_list:
						self.y_R_dist = np.append(self.y_R_dist, (right_line_elem[1] - right_line_elem[0]))

					## Using argmax to find the longest line in the y_R_dist list
					idx_R = np.argmax(self.y_R_dist)

					## Use that longest line as a candidate of right side line
					y_min_plot_R = self.right_line_list[idx_R][0]
					y_max_plot_R = self.right_line_list[idx_R][1]
					x_min_plot_R = self.right_line_list[idx_R][2]
					x_max_plot_R = self.right_line_list[idx_R][3]
					cv2.line(self.amap, (x_min_plot_R, y_min_plot_R), (x_max_plot_R, y_max_plot_R), (0,255,0), 1, cv2.LINE_AA)
					x_mid_R = (x_min_plot_R + x_max_plot_R)/2
					y_mid_R = (y_min_plot_R + y_max_plot_R)/2
					# clear array out
					self.y_R_dist = np.array([])
				else:
					x_mid_R = 0

				## If two lines of left and right sides are existing
				if x_mid_L != 0 and x_mid_R != 0:

					## We calculate the average of start and end point between left and right lines
					x_min_plot_MID = (x_min_plot_L + x_min_plot_R)/2
					y_min_plot_MID = ((y_min_plot_L + y_min_plot_R)/2) 
					x_max_plot_MID = (x_max_plot_L + x_max_plot_R)/2
					y_max_plot_MID = ((y_max_plot_L + y_max_plot_R)/2) 
					case = "LR_"

				elif x_mid_L != 0 and x_mid_R == 0:
					x_min_plot_MID = x_min_plot_L + self.half_lane_width_pixel
					y_min_plot_MID = y_min_plot_L
					x_max_plot_MID = x_max_plot_L + self.half_lane_width_pixel
					y_max_plot_MID = y_max_plot_L
					case = "L_"

				elif x_mid_L == 0 and x_mid_R != 0:
					x_min_plot_MID = x_min_plot_R - self.half_lane_width_pixel
					y_min_plot_MID = y_min_plot_R
					x_max_plot_MID = x_max_plot_R - self.half_lane_width_pixel
					y_max_plot_MID = y_max_plot_R
					case = "R_"

				else:
					# print("Some lines are missing...")
					sbus_steering = 1024	#prev_sbus_steering#
					if self.ROBOT_MODE == "FRONT":
						sbus_throttle = self.sbus_throttle_fwd_const
					elif self.ROBOT_MODE == "BACK":
						sbus_throttle = self.sbus_throttle_bwd_const
					else:
						sbus_throttle = self.sbus_throttle_mid				

					x_min_plot_MID = 0
					x_max_plot_MID = 0
					case = "NO_"


				## Care only if it found a line
				if case != "NO_":
					## Fliter noisy points
					x_min_plot_MID, x_min_list = self.MovingAverage(x_min_plot_MID, x_min_list, self.line_list_length)
					x_max_plot_MID, x_max_list = self.MovingAverage(x_max_plot_MID, x_max_list, self.line_list_length)
					y_min_plot_MID, y_min_list = self.MovingAverage(y_min_plot_MID, y_min_list, self.line_list_length)
					y_max_plot_MID, y_max_list = self.MovingAverage(y_max_plot_MID, y_max_list, self.line_list_length)

					cv2.line(self.amap, (int(x_min_plot_MID), int(y_min_plot_MID)), (int(x_max_plot_MID), int(y_max_plot_MID)), (0,0,255), 1, cv2.LINE_AA)
					cv2.line(self.amap, (self.frame_width_half, int(y_min_plot_MID)), (self.frame_width_half, int(y_max_plot_MID)), (0,0,0), 1, cv2.LINE_AA)

					if self.ROBOT_MODE == 'FRONT':
						if (x_min_plot_MID < self.center_min_DB):
							sbus_steering = int(self.map_with_limit(x_min_plot_MID, self.x_mid_lane_MIN, self.center_min_DB , self.sbus_steering_max, self.sbus_steering_max_DB))
							# sbus_throttle = self.sbus_throttle_const
							sbus_throttle = int(self.map_with_limit(x_min_plot_MID, self.x_mid_lane_MIN, self.center_thr_adj_min, self.sbus_throttle_fwd_min, self.sbus_throttle_fwd_const))
							case = case + "FWD_STR_L"
						elif (x_min_plot_MID > self.center_max_DB):
							sbus_steering = int(self.map_with_limit(x_min_plot_MID, self.center_max_DB, self.x_mid_lane_MAX, self.sbus_steering_min_DB, self.sbus_steering_min))
							# sbus_throttle = self.sbus_throttle_const
							sbus_throttle = int(self.map_with_limit(x_min_plot_MID, self.center_thr_adj_max, self.x_mid_lane_MAX, self.sbus_throttle_fwd_const, self.sbus_throttle_fwd_min))
							case = case + "FWD_STR_R"
						else:
							# sbus_steering = 1024
							sbus_steering = int(self.map_with_limit(x_min_plot_MID, self.center_min_DB, self.center_max_DB, (self.sbus_steering_max_DB - 1), (self.sbus_steering_min_DB + 1)))
							sbus_throttle = self.sbus_throttle_fwd_const
							case = case + "FWD_THR"

							
					elif self.ROBOT_MODE == 'BACK':
						if (x_max_plot_MID < self.center_min_DB):
							sbus_steering = int(self.map_with_limit(x_max_plot_MID, self.x_mid_lane_MIN, self.center_min_DB , self.sbus_steering_BWD_max, self.sbus_steering_BWD_max_DB))
							# sbus_throttle = self.sbus_throttle_const
							sbus_throttle = int(self.map_with_limit(x_max_plot_MID, self.x_mid_lane_MIN, self.center_thr_adj_min, self.sbus_throttle_bwd_min, self.sbus_throttle_bwd_const))
							case = case + "BWD_STR_L"
						elif (x_max_plot_MID > self.center_max_DB):
							sbus_steering = int(self.map_with_limit(x_max_plot_MID, self.center_max_DB, self.x_mid_lane_MAX, self.sbus_steering_BWD_min_DB, self.sbus_steering_BWD_min))
							# sbus_throttle = self.sbus_throttle_const
							sbus_throttle = int(self.map_with_limit(x_max_plot_MID, self.center_thr_adj_max, self.x_mid_lane_MAX, self.sbus_throttle_bwd_const, self.sbus_throttle_bwd_min))
							case = case + "BWD_STR_R"
						else:
							# sbus_steering = 1024
							sbus_steering = int(self.map_with_limit(x_max_plot_MID, self.center_min_DB, self.center_max_DB, (self.sbus_steering_BWD_max_DB - 1), (self.sbus_steering_BWD_min_DB + 1)))
							sbus_throttle = self.sbus_throttle_bwd_const
							case = case + "BWD_THR"

							
					else:
						sbus_steering = self.sbus_steering_mid
						sbus_throttle = self.sbus_throttle_mid

				cv2.circle(self.amap, (int(self.x_mid_lane_MIN), 0), 3, (255,0,0), -1)
				cv2.circle(self.amap, (int(self.x_mid_lane_MAX), 0), 3, (0,255,0), -1)
				cv2.circle(self.amap, (int(self.center_min_DB), 0), 3, (255,255,0), -1)
				cv2.circle(self.amap, (int(self.center_max_DB), 0), 3, (0,255,255), -1)
				cv2.circle(self.amap, (int(self.center_thr_adj_min), 0), 3, (0,100,255), -1)
				cv2.circle(self.amap, (int(self.center_thr_adj_max), 0), 3, (100,0,255), -1)

				cv2.circle(self.amap, (int(self.x_mid_lane_MIN), self.frame_height), 3, (255,0,0), -1)
				cv2.circle(self.amap, (int(self.x_mid_lane_MAX), self.frame_height), 3, (0,255,0), -1)
				cv2.circle(self.amap, (int(self.center_min_DB), self.frame_height), 3, (255,255,0), -1)
				cv2.circle(self.amap, (int(self.center_max_DB), self.frame_height), 3, (0,255,255), -1)
				cv2.circle(self.amap, (int(self.center_thr_adj_min), self.frame_height), 3, (0,100,255), -1)
				cv2.circle(self.amap, (int(self.center_thr_adj_max), self.frame_height), 3, (100,0,255), -1)

				

				## Filter steering data
				steering_ave, steering_list = self.MovingAverage(sbus_steering, steering_list, self.steering_list_length) #10
				steering_ave = int(steering_ave)

				if self.FRONT_STOP or self.BACK_STOP:
					steering_ave = self.sbus_steering_mid
					sbus_throttle = self.sbus_throttle_mid


				self.sbus_cmd.data = [steering_ave, sbus_throttle]
				
				print("case: %s x_min_plot_MID: %d , str: %d, ave: %d, thr: %d" %(case, x_min_plot_MID, sbus_steering, steering_ave, sbus_throttle))

				# clear list out
				self.left_line_list = []
				self.right_line_list = []


				## publish image topic
				try:
					image_message = self.bridge.cv2_to_imgmsg(self.amap, encoding="bgr8") #bgr8 #8UC1
					# bw_image_message = self.bridge.cv2_to_imgmsg(processed_img, encoding="8UC1")
					self.image_pub.publish(image_message)
					# self.bw_image_pub.publish(bw_image_message)
				except CvBridgeError as e:
					print(e)

				self.amap = np.copy(self._map)
				processed_img = np.copy(self._map)

				## this takes 0.06 ms
				prev_sbus_steering = sbus_steering

			else:
				self.sbus_cmd.data = [self.sbus_steering_mid, self.sbus_throttle_mid]

			## Drawing Polygons ##
			# footprint
			self.pg.header.stamp = rospy.Time.now()
			self.pg.header.frame_id = "base_footprint"
			self.pg.polygon.points = [
								Point32(x=A[0], y=A[1]),
								Point32(x=B[0], y=B[1]),
								Point32(x=D[0], y=D[1]),
								Point32(x=C[0], y=C[1])]

			# front stop zone
			self.pg_front_stop.header.stamp = rospy.Time.now()
			self.pg_front_stop.header.frame_id = "base_footprint"
			self.pg_front_stop.polygon.points = [
								Point32(x=B[0], y=B[1]),
								Point32(x=T[0], y=T[1]),
								Point32(x=D[0], y=D[1]),
								Point32(x=B[0], y=B[1])]

			# back stop zone
			self.pg_back_stop.header.stamp = rospy.Time.now()
			self.pg_back_stop.header.frame_id = "base_footprint"
			self.pg_back_stop.polygon.points = [
									Point32(x=A[0], y=A[1]),
									Point32(x=C[0], y=C[1]),
									Point32(x=W[0], y=W[1]),
									Point32(x=A[0], y=A[1])]

			# construct tf
			self.t.header.frame_id = "base_footprint" 
			self.t.header.stamp = rospy.Time.now()
			self.t.child_frame_id = "base_link"
			self.t.transform.translation.x = 0.0
			self.t.transform.translation.y = 0.0
			self.t.transform.translation.z = 0.0

			self.t.transform.rotation.x = 0.0
			self.t.transform.rotation.y = 0.0
			self.t.transform.rotation.z = 0.0
			self.t.transform.rotation.w = 1.0
			self.br.sendTransform(self.t)

			# laser_pub.publish(new_scan)
			self.foot_pub.publish(self.pg)
			self.front_stop_pub.publish(self.pg_front_stop)
			self.back_stop_pub.publish(self.pg_back_stop)


			# self.x_left_array = np.array([])
			# self.y_left_array = np.array([])

			self.prev_ROBOT_MODE = self.ROBOT_MODE


			self.sbus_cmd_pub.publish(self.sbus_cmd)
			# period = time.time() - startTime
			# print(period)
			rate.sleep()
Пример #28
0
im = cv2.imread("tick_test02.jpg")

im = cv2.resize(im, (0, 0), fx=0.2, fy=0.2, interpolation=cv2.INTER_CUBIC)
edge = cv2.Canny(im, 50, 180, 0)
#cv2.imwrite("edge1.jpg", edge)
#cv2.namedWindow("EDGE", cv2.WINDOW_NORMAL)

kernel = np.ones((3, 3), np.uint8)
#erosion = cv2.erode(edge, kernel, iterations=1)
dilate = cv2.dilate(edge, kernel, iterations=1)

#cv2.imshow("EDGE", dilate)
#cv2.waitKey(0)
#cv2.destroyAllWindows()

lines = cv2.HoughLinesP(dilate, 1, np.pi / 180, 31, 70, 60)
print("The number of lines detected by HoughTransform: %d" % len(lines))

h, w, _ = im.shape
line = lines[:, 0, :]
filter_line = []
for x1, y1, x2, y2 in line:
    if x1 == x2:
        continue
    else:
        k = abs((y2 - y1) / (x2 - x1))
        if k <= tan(30 * np.pi / 180) and k >= tan(20 * np.pi / 180):
            filter_line.append([x1, y1, x2, y2])
print("The number of filter lines detected by HoughTransform: %d" %
      len(filter_line))
Пример #29
0
def find_arrowdirection(img):

    #cette fonction retourne la direction de la fleche
    # si le retour = -1 alors cest qu'aucune direction n'a ete trouvee

    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

    ## FINDING RED
    lower = np.array([0, 100, 100])  #hsv
    upper = np.array([10, 255, 255])
    mask = cv2.inRange(hsv, lower, upper)

    ### BLUR
    kernel = np.ones((6, 6), np.float32) / 25
    dst = cv2.filter2D(mask, -1, kernel)
    cv2.imwrite('0blur.jpg', dst)

    ####ERODE
    se1 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
    se2 = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2))
    erosion = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, se1)
    erosion = cv2.morphologyEx(erosion, cv2.MORPH_OPEN, se2)

    se1 = cv2.getStructuringElement(cv2.MORPH_CROSS, (5, 5))  #5
    se2 = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))  #3
    erosion2 = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, se1)
    erosion2 = cv2.morphologyEx(erosion2, cv2.MORPH_OPEN, se2)
    se3 = cv2.getStructuringElement(cv2.MORPH_CROSS, (4, 4))  #5
    se4 = cv2.getStructuringElement(cv2.MORPH_CROSS, (2, 2))  #3
    erosion2 = cv2.morphologyEx(erosion2, cv2.MORPH_CLOSE, se3)
    cv2.imwrite('0erosion2ter.jpg', erosion2)
    erosion2 = cv2.morphologyEx(erosion2, cv2.MORPH_OPEN, se4)

    kernel1 = np.ones((2, 2), np.uint8)
    erosion3 = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel1)
    cv2.imwrite('0erosion1.jpg', erosion)
    cv2.imwrite('0erosion2.jpg', erosion2)
    cv2.imwrite('0erosion3.jpg', erosion3)

    erosion = erosion2

    ####EDGE
    edges = cv2.Canny(erosion, 130, 150, apertureSize=3)  #130 150 3
    cv2.imwrite('0canny.jpg', edges)

    #### HOUGH
    minLineLength = 20  # nb pixel formant la ligne : A MODIFIER
    #cv2.HoughLinesP(edges,p accuracy th accuracy in rad, 100, threshold minLineLength,maxLineGap)
    #lines = cv2.HoughLinesP(edges,rho = 1,theta = 1*np.pi/180,threshold = 10,minLineLength = 10,maxLineGap = 20)
    lines = cv2.HoughLinesP(edges,
                            rho=1,
                            theta=1 * np.pi / 180,
                            threshold=10,
                            minLineLength=10,
                            maxLineGap=20)

    i = 1
    rho = []
    th = []

    if (len(lines) == 0):
        print 'no lines detected '
        theta = 1000
        return theta

    while i <= len(lines) - 1:
        for x1, y1, x2, y2 in lines[i]:
            th_compute = round(
                np.arctan((0.0 + x2 - x1) / (y2 - y1 + 0.01)) * 180 / 3.14, 2)
            rho_compute = round(LA.norm([x2 - x1, y2 - y1]), 2)
            if rho_compute > 30.0:  # remove rho under 2.0
                if th_compute == 0.0:
                    th_compute = 90.0
                th.append(th_compute)
                rho.append(rho_compute)
                cv2.line(img, (x1, y1), (x2, y2), (255, 255, 0), 2)
        i = i + 1

    # SO
    if (len(rho) == 0):
        print 'no lines detected '
        theta = 1000
        return theta

    print 'lines detected : ', len(rho)

    # return index of rho from max to min
    idx_rho = np.argsort(rho)
    idx_rho = idx_rho[::-1]  #invert array

    print 'main direction is (in deg) : ', [
        th[idx_rho[0]], th[idx_rho[1]], th[idx_rho[2]]
    ]
    print 'max line length : ', [
        rho[idx_rho[0]], rho[idx_rho[1]], rho[idx_rho[2]]
    ]

    # FIND BARYCENTRE
    npImg = np.asarray(hsv)
    coordList = np.argwhere(npImg == [0, 255, 255])
    ##### X et Y sont inverses

    sum_x = 0
    sum_y = 0
    x_array = []
    y_array = []
    for y, x, i in coordList:
        cv2.circle(img, (x, y), 1, (255, 255, 255), -1)
        sum_x = sum_x + x
        sum_y = sum_y + y
        x_array.append(x)
        y_array.append(y)

    cog_x = sum_x / len(coordList)
    cog_y = sum_y / len(coordList)

    cv2.circle(img, (cog_x, cog_y), 5, (100, 255, 255), -1)
    cv2.circle(img, (int(np.median(x_array)), int(np.median(y_array))), 5,
               (100, 100, 100), -1)

    print 'COG : ', [cog_x, cog_y]
    print 'MED ; ', [np.median(x_array), np.median(y_array)]
    print 'COG to find : ', [466, 226]
    mask = cv2.inRange(hsv, lower, upper)

    contours = cv2.findContours(erosion, 1, 2)
    contours = contours[1]
    cnt = contours[0]

    print 'cnt : ', cnt

    area = cv2.contourArea(cnt)
    print 'arrow area : ', area

    perimeter = cv2.arcLength(cnt, True)
    print 'arrow perimeter : ', perimeter
    x_d, y_d, w_d, h_d = cv2.boundingRect(cnt)
    cv2.rectangle(img, (x_d, y_d), (x_d + w_d, y_d + h_d), [100, 255, 100], 3)

    # compute angle de la diagonale :
    th_diag = round(np.arctan((0.0 + w_d) / (h_d + 0.001)) * 180 / 3.14, 2)
    print 'diagonale angle is (in deg) :', th_diag

    # centre des diagonales
    cod_x = int(x_d + w_d / 2)
    cod_y = int(y_d + h_d / 2)
    cv2.circle(img, (cod_x, cod_y), 5, [100, 255, 100], 3)

    # vecteur u de la droite passant par le COD et perpendiculaire a l axe de la fleche
    u_x = int(cod_x + 100 * np.sin((th[idx_rho[0]] + 90) * np.pi / 180))
    u_y = int(cod_y + 100 * np.cos((th[idx_rho[0]] + 90) * np.pi / 180))

    # produit scalaire entre u et cog//cod
    scalar = np.vdot([u_x - cod_x, u_y - cod_y],
                     [cog_x - cod_x, cog_y - cod_y])

    if scalar > 0:
        print 'fleche vers la droite '

    else:
        print 'fleche vers la gauche '

    return theta
Пример #30
0
    def update_output(*args):
        if interactive:
            canny_threshold = cv2.getTrackbarPos("Canny Threshold",
                                                 window_name)
            erosion_element = cv2.getTrackbarPos("Erosion Element",
                                                 window_name)
            erosion_size = cv2.getTrackbarPos("Erosion Size", window_name)
            dilation_element = cv2.getTrackbarPos("Dilation Element",
                                                  window_name)
            dilation_size = cv2.getTrackbarPos("Dilation Size", window_name)
        else:
            # FIXME: hack around Python 2 nested scoping craziness:
            canny_threshold = 0
            erosion_element = 2
            erosion_size = 0
            dilation_element = 2
            dilation_size = 1

        output_image = output_base_image.copy()

        label = []

        def add_label(s):
            print "\t%s" % s
            label.append(s)

        if erosion_size > 0:
            element_name = MORPH_TYPE_KEYS[erosion_element]
            element = MORPH_TYPES[element_name]

            structuring_element = cv2.getStructuringElement(
                element, (2 * erosion_size + 1, 2 * erosion_size + 1))
            add_label("Erosion %s at %d" % (element_name, erosion_size))
            output_image = cv2.erode(output_image, structuring_element)

        if dilation_size > 0:
            element_name = MORPH_TYPE_KEYS[dilation_element]
            element = MORPH_TYPES[element_name]

            structuring_element = cv2.getStructuringElement(
                element, (2 * dilation_size + 1, 2 * dilation_size + 1))
            add_label("Dilation %s at %d" % (element_name, dilation_size))
            output_image = cv2.dilate(output_image, structuring_element)

        if canny_threshold > 0:
            add_label("Canny at %d" % canny_threshold)
            output_image = cv2.Canny(output_image, canny_threshold,
                                     canny_threshold * 3, 12)

        print "\tFinding contours...",
        contours, hierarchy = cv2.findContours(output_image, cv2.RETR_TREE,
                                               cv2.CHAIN_APPROX_SIMPLE)
        print " %d" % len(contours)

        output = cv2.cvtColor(output_image, cv.CV_GRAY2RGB)

        if False:
            lines = cv2.HoughLinesP(output_base_image,
                                    rho=1,
                                    theta=cv.CV_PI / 180,
                                    threshold=160,
                                    minLineLength=80,
                                    maxLineGap=10)

            for line in lines[0]:
                cv2.line(output, (line[0], line[1]), (line[2], line[3]),
                         (0, 0, 255), 2, 4)

        # TODO: confirm that the min area check buys us anything over the bounding box min/max filtering
        # TODO: make minimum contour area configurable
        min_area_pct = 0.01
        min_area = min_area_pct * source_image.size

        # TODO: more robust algorithm for detecting likely scan edge artifacts which can handle cropped scans of large images (e.g. http://dl.wdl.org/107_1_1.png)
        max_height = int(round(0.9 * source_image.shape[0]))
        max_width = int(round(0.9 * source_image.shape[1]))
        min_height = int(round(0.1 * source_image.shape[0]))
        min_width = int(round(0.1 * source_image.shape[1]))

        print "\tContour length & area (min area: %d pixels, min/max box: height = %d, %d, width = %d, %d)" % (
            min_area, min_height, max_height, min_width, max_width)

        bboxes = []
        for i, contour in enumerate(contours):
            length = cv2.arcLength(contours[i], False)
            area = cv2.contourArea(contours[i], False)

            if area < min_area:
                continue

            poly = cv2.approxPolyDP(contour,
                                    0.01 * cv2.arcLength(contour, False),
                                    False)
            x, y, w, h = cv2.boundingRect(poly)
            bbox = ((x, y), (x + w, y + h))

            if w > max_width or w < min_width or h > max_height or h < min_height:
                print "\t\t%4d: failed min/max check: %s" % (i, bbox)
                continue

            bboxes.append(bbox)
            print "\t\t%4d: %16.2f%16.2f bounding box=%s" % (i, length, area,
                                                             bbox)

            if interactive:
                #cv2.imshow(extract_name, extracted)
                cv2.polylines(output,
                              contour,
                              True, (32, 192, 32),
                              thickness=3)
                cv2.rectangle(output, bbox[0], bbox[1], color=(32, 192, 192))
                cv2.drawContours(output,
                                 contours,
                                 i, (32, 192, 32),
                                 hierarchy=hierarchy,
                                 maxLevel=0)

        # Merge overlapping bboxes
        i = 0
        while i < len(bboxes) - 1:
            j = i + 1
            while j < len(bboxes):
                if bboxes[i][0][0] < bboxes[j][1][0] and bboxes[i][0][1] < bboxes[j][1][1] and \
                   bboxes[i][1][0] > bboxes[j][0][0] and bboxes[i][1][1] > bboxes[j][0][1]:
                    x1 = min(bboxes[i][0][0], bboxes[j][0][0])
                    y1 = min(bboxes[i][0][1], bboxes[j][0][1])
                    x2 = max(bboxes[i][1][0], bboxes[j][1][0])
                    y2 = max(bboxes[i][1][1], bboxes[j][1][1])
                    bboxes[i] = ((x1, y1), (x2, y2))
                    bboxes.remove(bboxes[j])
                j = j + 1
            i = i + 1

        # save/display remaining bboxes
        for i, bbox in enumerate(bboxes):
            extract_name = "%s extract %d" % (window_name, i)
            extracted = source_image[bbox[0][1]:bbox[1][1],
                                     bbox[0][0]:bbox[1][0]]
            #extracted = source_image[y:y + h, x:x + w]

            if output_dir:
                cv2.imwrite(os.path.join(output_dir, "%s.jpg" % extract_name),
                            extracted)

            if interactive:
                cv2.rectangle(output, bbox[0], bbox[1], color=(192, 192, 32))

        label.append("%d contours" % len(contours))

        if interactive:
            output = cv2.copyMakeBorder(output, 40, 0, 0, 0,
                                        cv2.BORDER_CONSTANT, None,
                                        (32, 32, 32))
            cv2.putText(output, ", ".join(label), (10, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 1.0, (192, 192, 192))
            cv2.imshow(window_name, output)