Exemplo n.º 1
0
def process_img(image):
    original_image = image
    # processed_img = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
    processed_img = cv2.Canny(original_image, threshold1=200, threshold2=300)
    processed_img = roi(processed_img, [vertices])
    processed_img = cv2.GaussianBlur(processed_img, (5,5), 0)

    lines = cv2.HoughLinesP(processed_img, 1, np.pi/180, 180, np.array([]),  100, 5)
    # draw_lines(processed_img, lines)
    m1 = 0
    m2 = 0
    try:
        l1, l2, m1, m2 = draw_lanes(original_image, lines)
        cv2.line(original_image, (l1[0], l1[1]), (l1[2], l1[3]), [0, 255, 0], 30)
        cv2.line(original_image, (l2[0], l2[1]), (l2[2], l2[3]), [0, 255, 0], 30)
    except Exception as e:
        print(str(e))
        pass
    try:
        for coords in lines:
            coords = coords[0]
            try:
                cv2.line(processed_img, (coords[0], coords[1]), (coords[2], coords[3]), [255, 0, 0], 3)
            except Exception as e:
                print(str(e))
    except Exception as e:
        pass
    return processed_img, original_image, m1, m2
Exemplo n.º 2
0
def process_img(image):
    original_image = image
    # convert to gray
    processed_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    # edge detection
    processed_img = cv2.Canny(processed_img, threshold1=200, threshold2=300)

    processed_img = cv2.GaussianBlur(processed_img, (5, 5), 0)

    vertices = np.array([
        [10, 500],
        [10, 300],
        [300, 200],
        [500, 200],
        [800, 300],
        [800, 500],
    ], np.int32)

    processed_img = roi(processed_img, [vertices])

    # more info: http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_houghlines/py_houghlines.html
    #                                     rho   theta   thresh  min length, max gap:
    lines = cv2.HoughLinesP(processed_img, 1, np.pi / 180, 180, 20, 15)
    m1 = 0
    m2 = 0
    try:
        l1, l2, m1, m2 = draw_lanes(original_image, lines)
        cv2.line(original_image, (l1[0], l1[1]), (l1[2], l1[3]), [0, 255, 0],
                 30)
        cv2.line(original_image, (l2[0], l2[1]), (l2[2], l2[3]), [0, 255, 0],
                 30)
    except Exception as e:
        print(str(e))
        pass
    try:
        for coords in lines:
            coords = coords[0]
            try:
                cv2.line(processed_img, (coords[0], coords[1]),
                         (coords[2], coords[3]), [255, 0, 0], 3)

            except Exception as e:
                print(str(e))
    except Exception as e:
        pass

    return processed_img, original_image, m1, m2
Exemplo n.º 3
0
def process_img(image):
    original_image = image
    # convert to gray
    processed_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    # edge detection
    processed_img =  cv2.Canny(processed_img, threshold1 = 200, threshold2=300)
    
    processed_img = cv2.GaussianBlur(processed_img,(5,5),0)
    
    vertices = np.array([[10,500],[10,300],[300,200],[500,200],[800,300],[800,500],
                         ], np.int32)

    processed_img = roi(processed_img, [vertices])

    # more info: http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_houghlines/py_houghlines.html
    #                                     rho   theta   thresh  min length, max gap:        
    lines = cv2.HoughLinesP(processed_img, 1, np.pi/180, 180,      20,       15)
    m1 = 0
    m2 = 0
    try:
        l1, l2, m1,m2 = draw_lanes(original_image,lines)
        cv2.line(original_image, (l1[0], l1[1]), (l1[2], l1[3]), [0,255,0], 30)
        cv2.line(original_image, (l2[0], l2[1]), (l2[2], l2[3]), [0,255,0], 30)
    except Exception as e:
        print(str(e))
        pass
    try:
        for coords in lines:
            coords = coords[0]
            try:
                cv2.line(processed_img, (coords[0], coords[1]), (coords[2], coords[3]), [255,0,0], 3)
                
                
            except Exception as e:
                print(str(e))
    except Exception as e:
        pass

    return processed_img,original_image, m1, m2
Exemplo n.º 4
0
    processed_img =  cv2.Canny(processed_img, threshold1 = 200, threshold2=300)

    processed_img = cv2.GaussianBlur(processed_img,(5,5),0)

    vertices = np.array([[10,500],[10,300],[300,200],[500,200],[800,300],[800,500],
                         ], np.int32)

    processed_img = roi(processed_img, [vertices])

    # more info: http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_houghlines/py_houghlines.html
    #                                     rho   theta   thresh  min length, max gap:
    lines = cv2.HoughLinesP(processed_img, 1, np.pi/180, 180,      20,       15)
    m1 = 0
    m2 = 0
    try:
        l1, l2, m1,m2 = draw_lanes(original_image,lines)
        cv2.line(original_image, (l1[0], l1[1]), (l1[2], l1[3]), [0,255,0], 30)
        cv2.line(original_image, (l2[0], l2[1]), (l2[2], l2[3]), [0,255,0], 30)
    except Exception as e:
        print(str(e))
        pass
    try:
        for coords in lines:
            coords = coords[0]
            try:
                cv2.line(processed_img, (coords[0], coords[1]), (coords[2], coords[3]), [255,0,0], 3)


            except Exception as e:
                print(str(e))
    except Exception as e:
Exemplo n.º 5
0

while(True):
    tic = time.time()

    # First Try - using PIL ImageGrab method - gives less frame rate
    # orig_screengrab = np.array(ImageGrab.grab(bbox=(0,40,640,520)))
    # Grabbing + Displaying takes around 0.15 secs per frame of size(480,640)

    # Capture Region based on resolution
    # 40px to allow for the window title bar, bottom = 480 + 40 = 520
    capture_region = (0,40,640,520)
    orig_screengrab = grab_screen(capture_region)
    # Grabbing + Displaying takes around 0.03 secs per frame of size(480,640)
    #processed_image, original_image = process_screengrab(orig_screengrab, capture_region)
    processed_image, image_gray, image_with_lanes_bgr = draw_lanes(orig_screengrab, capture_region)
    #print(processed_screengrab.shape)

    lane_masked_image = find_lane_markers(orig_screengrab)
    cv2.imshow('Original ScreeGrab', cv2.cvtColor(orig_screengrab, cv2.COLOR_RGB2BGR))
    # output of find_lane_markers
    cv2.imshow('Lane Detection', lane_masked_image)
    # Draw Lane outputs
    cv2.imshow('Grayscale Image ', image_gray)
    cv2.imshow('Processed Screengrab',processed_image)
    cv2.imshow('Processed with lanes',image_with_lanes_bgr)
    print('Grabbing + Displaying took {} seconds'.format(time.time() - tic))
    if cv2.waitKey(25) & 0xFF==ord('q'):
        cv2.destroyAllWindows()
        break
Exemplo n.º 6
0
import cv2
import sys
import os
import time
from draw_lanes import draw_lanes
from get_curve import get_curve

sys.path.append("/Lane_Detection")
from lane_detection import lane_detection
#, get_curve, draw_lanes
import matplotlib.pyplot as plt

img = cv2.imread('/home/hackathon/Curved-Lane-Lines/test_images/test3.jpg')
#plt.imshow(img)
size = (100, 100)
curves, lanes, ploty, out_img = lane_detection(img, size)
#curverad =get_curve(out_img, curves[0], curves[2])
#lane_curve = np.mean([curverad[0], curverad[1]])
startx = time.time()
img_resized = cv2.resize(img, (100, 100))
img_n = draw_lanes(img_resized, curves[0], curves[2])
print("\n draw_lanes time is : " + str(time.time() - startx))
f, (ax1) = plt.subplots(1, 1, figsize=(20, 10))
ax1.imshow(img_n)
ax1.set_title('Original Image', fontsize=30)