Пример #1
0
def get_lane_image(img, ratio, debug=False):
    base_left_m, base_left_c, base_right_m, base_right_c = -0.32785089597153977, 446.0614062879984*ratio,\
                                                            0.2911316010810115, 79.62376483613377*ratio
    max_m_diff, max_c_diff = 0.13, 50

    test_img = np.array(img)
    width, height = test_img.shape[1], test_img.shape[0]

    detector = lane_detection.LaneDetector()
    lines = detector.process(cv2.cvtColor(test_img, cv2.COLOR_BGR2RGB), True,
                             0.4, 0.5, debug)

    left_lane_lines, right_lane_lines = lane_detection.get_lane_lines(
        lines, base_left_m, base_left_c, base_right_m, base_right_c,
        max_m_diff, max_c_diff)
    if (len(left_lane_lines) == 0 or len(right_lane_lines) == 0):
        outOfLane = True
        cv2.putText(test_img, "Not inside lane", (int(width / 2 - 150), 50),
                    font, fontScale, fontColor, lineType)
    else:
        outOfLane = False
        left_lane_line, right_lane_line = get_lines_mean(
            left_lane_lines), get_lines_mean(right_lane_lines)
        low_y, high_y = height - 1, int(height * 0.6)
        left_low, left_high = (int((low_y-left_lane_line[1])/left_lane_line[0]), low_y),\
                              (int((high_y-left_lane_line[1])/left_lane_line[0]), high_y)
        right_low, right_high = (int((low_y-right_lane_line[1])/right_lane_line[0]), low_y),\
                              (int((high_y-right_lane_line[1])/right_lane_line[0]), high_y)
        cv2.line(test_img, tuple(left_low), tuple(left_high), (0, 0, 255), 5)
        cv2.line(test_img, tuple(right_low), tuple(right_high), (0, 0, 255), 5)

    return test_img, outOfLane
Пример #2
0
def driver_perspective_transform(img_BGR, debug=False):
    #     ud_img_BGR = cv2.undistort(img_BGR, mtx, dist, None, mtx)
    ud_img_BGR = img_BGR
    ud_img_RGB = cv2.cvtColor(ud_img_BGR, cv2.COLOR_BGR2RGB)
    if (debug):
        show_images([ud_img_RGB])
    detector = lane_detection.LaneDetector()
    lines = detector.process(ud_img_RGB, True, 0.5, 0.16, debug)

    vp = vanishing_point.calculate_vanishing_point(lines, ud_img_BGR, debug)

    H, H_inv, warped = perspective_transform.perspective_transform(
        vp, ud_img_BGR, debug)

    x_pixels_per_meter , y_pixels_per_meter, left_low, left_high, right_low, right_high = \
                    perspective_transform.get_ratio(H, H_inv, warped, mtx, debug)

    return H, x_pixels_per_meter, y_pixels_per_meter
Пример #3
0
 def __init__(self, camera_calibrator):
     self.camera_calibrator = camera_calibrator
     self.lane_detector = ld.LaneDetector()
Пример #4
0
def get_ratio(H, H_inv, orig_warped, mtx, debug):
    meter_to_feet = 1/3.28084
    low_thresh = 50
    high_thresh = 100
	
    if(debug):
        warped = np.array(orig_warped)
    
    #gray = cv2.cvtColor(warped,cv2.COLOR_BGR2GRAY)
    #gray = cv2.GaussianBlur(gray,(9, 9),0)
    #edges = cv2.Canny(gray,low_thresh,high_thresh,apertureSize = 3)
    #utilities.show_images([edges])
    #lines = cv2.HoughLines(edges,1,np.pi/180,100)
    detector = lane_detection.LaneDetector()
    lines = detector.process(cv2.cvtColor(orig_warped,cv2.COLOR_BGR2RGB), False, 0.02, debug)
    '''lines = cv2.HoughLinesP(
        edges,
        rho=2,
        theta=np.pi / 180,
        threshold=50,
        lines=np.array([]),
        minLineLength=40,
        maxLineGap=120
    )'''
    
    x_min = 1000000
    x_max = 0
	
    tot_xlft, cnt_xlft = 0, 0
    tot_xrit, cnt_xrit = 0, 0 
    
    for line in lines:
        x1, y1, x2, y2 = utilities.get2pts(line[0], True)

        '''if(x1 < x_min):
            x_min = x1
        if(x1 > x_max):
            x_max = x1'''

        if(min(x1, x2) < 0.5*orig_warped.shape[1]):
            tot_xlft += float(x1+x2) / 2
            cnt_xlft += 1
        else:
            tot_xrit += float(x1+x2) / 2
            cnt_xrit += 1
			
        if(debug):
            cv2.line(warped,(x1,y1),(x2,y2),(0,0,255),2)
    
    warped_height = orig_warped.shape[0]
    '''left_low, left_high, right_low, right_high = get_mapped_pxl([x_min, warped_height-1], H_inv),\
    get_mapped_pxl([x_min, 0], H_inv), get_mapped_pxl([x_max, warped_height-1], H_inv),\
    get_mapped_pxl([x_max, 0], H_inv)'''
    
    avg_xlft, avg_xrit = int(float(tot_xlft) / cnt_xlft), int(float(tot_xrit) / cnt_xrit)
	
    left_low, left_high, right_low, right_high = get_mapped_pxl([avg_xlft, warped_height-1], H_inv),\
    get_mapped_pxl([avg_xlft, 0], H_inv), get_mapped_pxl([avg_xrit, warped_height-1], H_inv),\
    get_mapped_pxl([avg_xrit, 0], H_inv)
	
    if(debug):
    	# lanes on warped
        utilities.show_images([cv2.cvtColor(warped, cv2.COLOR_BGR2RGB)])
    
    approx_dist = avg_xrit - avg_xlft
    x_pixels_per_meter = approx_dist / (12 * meter_to_feet)
    
    inv_H_x_mtx = np.linalg.inv(np.matmul(H , mtx))
    x_norm = np.linalg.norm(inv_H_x_mtx[:,0])
    y_norm = np.linalg.norm(inv_H_x_mtx[:,1])
    scale = x_norm / y_norm
    y_pixels_per_meter = x_pixels_per_meter * scale
    
    return x_pixels_per_meter , y_pixels_per_meter, left_low, left_high, right_low, right_high