def pipeline_video(image, camera_matrix, dist_coeffs, prev_left, prev_right): # 2- Apply a distortion correction to raw images. undist = correct_distortion(image, camera_matrix, dist_coeffs) # 3- Binary image (thresholding w/ color transforms, gradients, etc.) binary_image = apply_thresholds(undist) # 4- Perspective transform (birds-eye view) src = np.float32([[560, 475], [725, 475], [1010, 660], [295, 660]]) M, binary_warped = warp_image(binary_image, src) # 5- Detect lane pixels. if prev_left.detected and prev_right.detected: left_fitx, right_fitx, ploty, img_res, left_fit, right_fit = search_around_poly(binary_warped, prev_left.best_fit, prev_right.best_fit) detected = True else: detected, left_fitx, right_fitx, ploty, img_res, left_fit, right_fit = find_lane_pixels(binary_warped) left_line = Line(prev_left.n, detected, prev_left.n_undetected, prev_left.n_last_fits, left_fit) right_line = Line(prev_right.n, detected, prev_right.n_undetected, prev_right.n_last_fits, right_fit) if detected: # 6- Determine curvature and position with respect to center. curvature = measure_curvature_real(left_fitx, right_fitx, ploty) # 7- Warp lane boundaries into original image. # Create an image to draw the lines on warp_zero = np.zeros_like(binary_warped).astype(np.uint8) color_warp = np.dstack((warp_zero, warp_zero, warp_zero)) # Recast the x and y points into usable format for cv2.fillPoly() pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))]) pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))]) pts = np.hstack((pts_left, pts_right)) # Draw the lane onto the warped blank image cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0)) # Warp the blank back to original image space using inverse perspective matrix (Minv) Minv = np.linalg.inv(M) newwarp = cv2.warpPerspective(color_warp, Minv, (image.shape[1], image.shape[0])) # 8- Output visual display of the lane boundaries and numerical estimation of lane curvature and vehicle position. # Combine the result with the original image result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0) # Curvature and offset center_x = np.mean(pts[:, :, 0]) offset = np.abs((image.shape[1] // 2)-center_x) result = cv2.putText(result, "Curvature: {}".format(curvature), (10,40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2) result = cv2.putText(result, "Offset: {}".format(offset), (10,80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2) # plt.imshow(result) else: result = undist result = cv2.putText(result, "Lines not detected", (10,80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2) return result, left_line, right_line
def pipeline(img): # camera calibration and image undistortion undist = line.camera_calibration_undistortion(img) #color and gradient threshold binary_output = color_gradient_thresh.color_gradient_thresh(undist, s_thresh=(130,255), l_thresh=(225,255), sobel_kernel=3, mag_thresh=(100, 255),graddir_thresh=(0, np.pi/2)) #perspective transform srcpoints = np.float32([[585,460],[203,720],[1127,720],[695,460]]) dstpoints = np.float32([[320,0],[320,720],[960,720],[960,0]]) binary_warped=perspective_transform.corners_unwarp(binary_output, srcpoints, dstpoints) # Choose either sliding window or search from prior based on sanity check if line.detected==False: #sliding window and find the initial polynomial parameters left_fit,right_fit,left_fitx,right_fitx=sliding_window.fit_polynomial(binary_warped) line.detected=True #search from prior left_fit, right_fit, left_fitx, right_fitx = search_from_prior.search_around_poly(binary_warped,left_fit,right_fit) else: #search from prior left_fit, right_fit, left_fitx, right_fitx = search_from_prior.search_around_poly(binary_warped,line.avg_left_fit,line.avg_right_fit) #measure curvature y_eval=undist.shape[0] left_radius, right_radius = curvature.measure_curvature_real(left_fit,right_fit,y_eval) radius=(left_radius+right_radius)/2.0 line.update(left_fitx, right_fitx, left_fit, right_fit, radius) # draw lane mark lane_marked_img = draw_lane.draw_lane_mark(binary_warped,line.avg_leftx,line.avg_rightx) #perspective transform back to original perspective binary_original = perspective_transform.corners_unwarp(lane_marked_img,dstpoints,srcpoints) #combine original image with lane marked image result = cv2.addWeighted(undist, 1, binary_original, 0.3, 0) #measure the car center relative to lane center xm_per_pix = 3.7/378 # meters per pixel in x dimension line.line_base_pos=((left_fitx[-1] + right_fitx[-1])/2.0-undist.shape[1]/2.0)*xm_per_pix if line.line_base_pos > 0: direction = 'left' elif line.line_base_pos < 0: direction = 'right' # print radius and distance information font = cv2.FONT_HERSHEY_SIMPLEX str1 = str('Radius of curvature ' + '{:04.2f}'.format(line.avg_curvature/1000) + 'km') cv2.putText(result,str1,(400,50), font, 1,(255,255,255),2,cv2.LINE_AA) str2 = str('{:04.2f}'.format(abs(line.line_base_pos)) + 'm ' + direction + ' of center') cv2.putText(result,str2,(400,80), font, 1,(255,255,255),2,cv2.LINE_AA) return result
def process_image(img): """ This function finds the lane lines in each image and stores their information in the global line objects :param img: One frame of Highway video :return: Annotated Image """ # Correct for distortion based on calibration parameters undistorted = cv2.undistort(img, mtx, dist, None, mtx) # Apply filter to detect lines in image line_image = thresholds(undistorted, s_thresh=(150, 255), sx_thresh=(30, 100)) # Warp the image to achieve a birds-eye view warped, M = perspective_transform(line_image) # Locate Lines using moving boxes if lane lines not detected in previous frame if not left_line.detected or len(left_line.all_fit) < left_line.n: print('Attempting Box algorithm') out_img, left_fit, right_fit, ploty, left_fitx, right_fitx, lefty, leftx, righty, rightx = \ fit_polynomial(warped) # Locate Lines using search near previously found lane lines else: print('Attempting Search around Polynomial') out_img, left_fit, right_fit, ploty, left_fitx, right_fitx, lefty, leftx, righty, rightx = \ search_around_poly(warped, left_line.best_fit, right_line.best_fit) # If this algorithm fails, use Box algorithm instead if out_img is None: print('Search around Polynomial was NOT successful.') out_img, left_fit, right_fit, ploty, left_fitx, right_fitx, lefty, leftx, righty, rightx = \ fit_polynomial(warped) # Calculate curvature for found lane line polynomials, at the bottom of the image left_curv, right_curv, left_fit_cr, right_fit_cr = measure_curvature_real( warped, copy.copy(left_fit), copy.copy(right_fit)) # This following if statements checks if the found curves are reasonable by checking: # 1. That the distance between the two curves is reasonable # 2. That both polynomials exist # 3. That their curvatures are similar (curvatures are allowed to vary by 50%) # 4. That the curves are similar (c1 and c2 are allowed to vary by 50%) valid = True right_base = poly_result(right_fit, 720) left_base = poly_result(left_fit, 720) if not ((right_base - left_base) > 2.5 / XM_PER_PIX and (right_base - left_base) < 4.5 / XM_PER_PIX): print(right_base) print(left_base) print("Invalid Distance") print("Distance: {}".format((right_base - left_base) * XM_PER_PIX)) valid = False if not (left_fit is not None and right_fit is not None): print("Invalid Data") valid = False if not ((left_curv / right_curv) < 3 and (left_curv / right_curv) > 0.33 and left_curv > 800 and right_curv > 800): print("Invalid Curvature") print("Left Line Curvature {}".format(left_curv)) print("Right Line Curvature {}".format(right_curv)) valid = False if not ((left_fit[0] / right_fit[0]) < 3 and (left_fit[0] / right_fit[0]) > 0.33 \ and (left_fit[1] / right_fit[1]) < 3 and (left_fit[1] / right_fit[1]) > 0.33) : print("Lines are not parallel.") valid = False if valid or left_line.cnt > 3 or left_line.recent_xfitted == []: # The counter avoids that the algorithm gets stuck for too long # Update Lane Lines left_line.update(detected=True, radius_of_curvature=left_curv, allx=None, ally=None, current_fit=left_fit, fitx=left_fitx) right_line.update(detected=True, radius_of_curvature=right_curv, allx=None, ally=None, current_fit=right_fit, fitx=right_fitx) #print("Curvature of left lane line: {}".format(left_curv)) print("Curvature of right lane line: {}".format(right_curv)) else: print("Warning: Lines were not detected!") left_line.detected = False right_line.detected = False left_line.cnt += 1 right_line.cnt += 1 print("Curvature of right lane line: {}".format(right_curv)) lane_curvature = measure_lane_curvature_real( warped, copy.copy(left_line.best_fit), copy.copy(right_line.best_fit)) offset = lane_center_offset(left_line.line_base_pos, right_line.line_base_pos) print("Lane Center Offset: {}".format(offset)) output = draw_image(warped, left_line.recent_xfitted, right_line.recent_xfitted, ploty, img, inv(M), undistorted) # I used this as an example to understand how to use cv2.putText: # https: // www.programcreek.com / python / example / 83399 / cv2.putText cv2.putText(output, "Radius of curvature: {:.2f}m".format(lane_curvature), org = (200,75), \ fontFace = cv2.FONT_HERSHEY_SIMPLEX,fontScale=1, color = (255, 255, 255), thickness=2, lineType=cv2.LINE_AA) if offset < 0: offset_string = "Vehicle is {:.2f}m left of center.".format( abs(offset)) else: offset_string = "Vehicle is {:.2f}m right of center.".format(offset) cv2.putText(output, offset_string, org=(200, 150), \ fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(255, 255, 255), thickness=2, lineType=cv2.LINE_AA) #cv2.imshow("Display Corners", output) #cv2.waitKey(0) return output
def pipeline(write_images, show_images, show_images2, write_images2, show_images3, write_images3, show_images4, write_images4): """ This is the pipeline used to develop the image annotation algoorithm used in main.py See main.py for mor information regarding the code :return: None """ # Best guess initial polynomial left_fit = np.array([0, 0, 217]) right_fit = np.array([0, 0, 950]) # Calibration do_calibration = False if do_calibration: calibration_images = "camera_cal/" dimension_list = [[5, 9], [6, 9], [6, 9], [6, 9], [6, 9], [6, 9], [6, 9], [6, 9], [6, 9], [6, 9], [6, 9], [6, 9], [6, 9], [6, 9], [6, 8], [6, 7], [6, 9], [6, 9], [6, 9], [6, 9]] mtx, dist = camera_calibration(calibration_images, dimension_list, display_corners=False) else: mtx = np.array([[1.16067642 * 1000, 0.00000000, 6.65724920 * 100], [0.00000000, 1.15796966 * 1000, 3.88971790 * 100], [0.00000000, 0.00000000, 1.00000000]]) dist = np.array( [[-0.25427498, 0.04987807, -0.00043039, 0.00027334, -0.1336389]]) # Load Test Images test_images = "test_images/" all_files = os.listdir(test_images) image_names = [file for file in all_files if file[-4:] == '.jpg'] init = True # init flag decides if box algorithm or search near polynomial is used to find lane lines for image in image_names: print(test_images + image) img = cv2.imread(test_images + image) undistorted = cv2.undistort(img, mtx, dist, None, mtx) line_image = thresholds(undistorted, s_thresh=(150, 255), sx_thresh=(30, 100)) if write_images: cv2.imwrite(test_images + "filtered/" + image, line_image * 255) if show_images: compare_images(undistorted, line_image, False, True, 'Undistorted Image', 'Filtered Image') warped, M = perspective_transform(line_image) if show_images2: compare_images(line_image, warped, True, True, 'Filtered Image', 'Warped Image') if write_images2: cv2.imwrite(test_images + "warped/" + image, warped * 255) if init: out_img, left_fit, right_fit, ploty, left_fitx, right_fitx, lefty, leftx, righty, rightx = fit_polynomial( warped) if show_images3: #print(out_img) compare_images(undistorted, out_img, True, True, 'Undistorted Image', 'Annotated Image') if write_images3: cv2.imwrite(test_images + "fitted/" + image, out_img) else: out_img, left_fit, right_fit, ploty, left_fitx, right_fitx, lefty, leftx, righty, rightx = \ search_around_poly(warped, left_fit, right_fit) if show_images3: #print(out_img) compare_images(undistorted, out_img, True, True, 'Undistorted Image', 'Annotated Image') if write_images3: cv2.imwrite(test_images + "fitted/" + image, out_img) left_curv, right_curv, left_fit_cr, right_fit_cr = measure_curvature_real( warped, copy.copy(left_fit), copy.copy(right_fit)) #print("Left Fit: {}".format(left_fit)) #print("Right Fit: {}".format(right_fit)) print("Left curvature: {}".format(left_curv)) print("Right curvature: {}".format(right_curv)) right_base = poly_result(right_fit, 720) * XM_PER_PIX left_base = poly_result(left_fit, 720) * XM_PER_PIX print("Left Base: {}".format(left_base)) print("Right Base: {}".format(right_base)) offset = lane_center_offset(left_base, right_base) print("Lane Center Offset: {}".format(offset)) # Checking that they have similar curvature # Checking that they are separated by approximately the right distance horizontally # Checking that they are roughly parallel ??????? if (left_curv / right_curv) < 1.5 and (left_curv / right_curv) > 0.66 \ and right_fit_cr[0] - left_fit_cr[0] > 3 and right_fit_cr[0] - left_fit_cr[0] < 4.5: valid = True output = draw_image(warped, left_fitx, right_fitx, ploty, img, inv(M), undistorted) if show_images4: compare_images(line_image, output, True, False, 'Line', 'Annotated Image') if write_images4: cv2.imwrite(test_images + "final/" + image, output)