def processing(img, object_points, img_points, M, Minv, left_line, right_line): #camera calibration, image distortion correction undist = utils.cal_undistort(img, object_points, img_points) #get the thresholded binary image thresholded = thresholding(undist) #perform perspective transform thresholded_wraped = cv2.warpPerspective(thresholded, M, img.shape[1::-1], flags=cv2.INTER_LINEAR) #perform detection if left_line.detected and right_line.detected: left_fit, right_fit, left_lane_inds, right_lane_inds = utils.find_line_by_previous( thresholded_wraped, left_line.current_fit, right_line.current_fit) else: left_fit, right_fit, left_lane_inds, right_lane_inds = utils.find_line( thresholded_wraped) left_line.update(left_fit) right_line.update(right_fit) #draw the detected laneline and the information area_img = utils.draw_area(undist, thresholded_wraped, Minv, left_fit, right_fit) curvature, pos_from_center = utils.calculate_curv_and_pos( thresholded_wraped, left_fit, right_fit) result = utils.draw_values(area_img, curvature, pos_from_center) return result
def pipline_img(img,M,Minv): '''img 为已经二值化的经过滤波的图像''' thresholded = utils.thresholding(img) thresed_warped = Mtrans.warper(thresholded,M) # find line points left_fit, right_fit, left_lane_inds, right_lane_inds,leftx_base,rightx_base = find_line(thresed_warped) cur,dst = utils.calculate_curv_and_pos(thresed_warped,left_fit, right_fit) # df = np.concatenate([left_fit.reshape(1,-1), right_fit.reshape(1,-1)],axis = 0) # print(df,left_fit.reshape(1,-1)) res = utils.draw_area(img,thresed_warped,Minv,left_fit,right_fit) return utils.draw_values(res,cur,dst)
def processing(img, object_points, img_points, M, Minv, left_line, right_line): undist = utils.cal_undistort(img, object_points, img_points) thresholded = thresholding(undist) thresholded_wraped = cv2.warpPerspective(thresholded, M, img.shape[1::-1], flags=cv2.INTER_LINEAR) if left_line.detected and right_line.detected: left_fit, right_fit, left_lane_inds, right_lane_inds = utils.find_line_by_previous( thresholded_wraped, left_line.current_fit, right_line.current_fit) else: left_fit, right_fit, left_lane_inds, right_lane_inds = utils.find_line( thresholded_wraped) left_line.update(left_fit) right_line.update(right_fit) area_img = utils.draw_area(undist, thresholded_wraped, Minv, left_fit, right_fit) curvature, pos_from_center = utils.calculate_curv_and_pos( thresholded_wraped, left_fit, right_fit) result = utils.draw_values(area_img, curvature, pos_from_center) return result
def processing(img, M, Minv, left_line, right_line): prev_time = time.time() img = Image.fromarray(img) undist = img #get the thresholded binary image img = np.array(img) blur_img = cv2.GaussianBlur(img, (3, 3), 0) Sobel_x_thresh = utils.abs_sobel_thresh(blur_img, orient='x', thresh_min=90, thresh_max=255) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) dilated = cv2.dilate(Sobel_x_thresh, kernel, iterations=2) #perform perspective transform thresholded_wraped = cv2.warpPerspective(dilated, M, img.shape[1::-1], flags=cv2.INTER_LINEAR) #perform detection if left_line.detected and right_line.detected: left_fit, right_fit = utils.find_line_by_previous( thresholded_wraped, left_line.current_fit, right_line.current_fit) else: left_fit, right_fit = utils.find_line(thresholded_wraped) left_line.update(left_fit) right_line.update(right_fit) # #draw the detected laneline and the information undist = Image.fromarray(img) area_img = utils.draw_area(undist, thresholded_wraped, Minv, left_fit, right_fit) area_img = np.array(area_img) curr_time = time.time() exec_time = curr_time - prev_time info = "time: %.2f ms" % (1000 * exec_time) print(info) return area_img, thresholded_wraped
def processing(img, M, Minv, left_line, right_line): #img = RotateClockWise90(img) #img = np.rot90(img) prev_time = time.time() img = Image.fromarray(img) undist = img #get the thresholded binary image img = np.array(img) thresholded = thresholding(img) #perform perspective transform thresholded_wraped = cv2.warpPerspective(thresholded, M, img.shape[1::-1], flags=cv2.INTER_LINEAR) #perform detection if left_line.detected and right_line.detected: left_fit, right_fit, left_lane_inds, right_lane_inds = utils.find_line_by_previous( thresholded_wraped, left_line.current_fit, right_line.current_fit) else: left_fit, right_fit, left_lane_inds, right_lane_inds = utils.find_line( thresholded_wraped) left_line.update(left_fit) right_line.update(right_fit) # #draw the detected laneline and the information undist = Image.fromarray(img) area_img, gre1 = utils.draw_area(undist, thresholded_wraped, Minv, left_fit, right_fit) curvature, pos_from_center = utils.calculate_curv_and_pos( thresholded_wraped, left_fit, right_fit) area_img = np.array(area_img) result = utils.draw_values(area_img, curvature, pos_from_center) curr_time = time.time() exec_time = curr_time - prev_time info = "time: %.2f ms" % (1000 * exec_time) print(info) return result, thresholded_wraped
def processing(img, object_points, img_points, M, Minv, left_line, right_line): # camera calibration, image distortion correction # undist = utils.cal_undistort(img,object_points,img_points) cv2.imshow("img", img) hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # 设置阈值下限和上限,去除背景颜色 lower_red = np.array([80, 0, 0]) upper_red = np.array([160, 255, 150]) lower_white = np.array([0, 0, 221]) upper_white = np.array([180, 30, 255]) lower_yellow = np.array([100, 43, 46]) upper_yellow = np.array([120, 255, 255]) lower_lane = np.array([0, 0, 50]) upper_lane = np.array([180, 43, 120]) # 创建掩膜 # 将原图像和掩膜做位与运算 white_img = colorMask("white_mask", img, hsv, lower_white, upper_white) # yellow_img = colorMask("yellow_mask", img, hsv, lower_yellow, upper_yellow) # lane_img = colorMask("lane_mask", img, hsv, lower_lane, upper_lane) undist = img gray = cv2.cvtColor(white_img, cv2.COLOR_RGB2GRAY) ret, binary = cv2.threshold(gray, 200, 255, cv2.THRESH_BINARY | cv2.THRESH_TRIANGLE) ret, binary2 = cv2.threshold(gray, 200, 1, cv2.THRESH_BINARY | cv2.THRESH_TRIANGLE) #cv2.imshow("binary", binary) rgb = cv2.cvtColor(binary, cv2.COLOR_GRAY2RGB) cv2.imshow("rgb", rgb) #cv2.imshow("binary2", binary) # get the thresholded binary image thresholded = thresholding(rgb) cv2.imshow("thresholded*255", thresholded * 255) #perform perspective transform thresholded_wraped = cv2.warpPerspective(binary2, M, img.shape[1::-1], flags=cv2.INTER_LINEAR) cv2.imshow("thresholded_wraped*255", thresholded_wraped * 255) #perform detection if left_line.detected and right_line.detected: print("find line") left_fit, right_fit, left_lane_inds, right_lane_inds = utils.find_line_by_previous( thresholded_wraped, left_line.current_fit, right_line.current_fit) else: print("no find line") left_fit, right_fit, left_lane_inds, right_lane_inds = utils.find_line( thresholded_wraped) left_line.update(left_fit) right_line.update(right_fit) #draw the detected laneline and the information area_img = utils.draw_area(undist, thresholded_wraped, Minv, left_fit, right_fit) cv2.imshow("area_img", area_img) cv2.waitKey(50) #print(area_img) #curvature,pos_from_center = utils.calculate_curv_and_pos(thresholded_wraped,left_fit, right_fit) #result = utils.draw_values(area_img,curvature,pos_from_center) #result=area_img result = rgb return result
plt.savefig('lane fitting.png') # In[23]: for binary_warped in perspective_transformed_images: left_fit, right_fit, left_lane_inds, right_lane_inds = utils.find_line( binary_warped) curvature, distance_from_center = utils.calculate_curv_and_pos( binary_warped, left_fit, right_fit) # In[24]: plt.figure(figsize=(20, 40)) for i in range(0, (len(undistorted_test_images))): left_fit, right_fit, left_lane_inds, right_lane_inds = utils.find_line( perspective_transformed_images[i]) curvature, distance_from_center = utils.calculate_curv_and_pos( perspective_transformed_images[i], left_fit, right_fit) result = utils.draw_area(undistorted_test_images[i], perspective_transformed_images[i], Minv, left_fit, right_fit) img = utils.draw_values(result, curvature, distance_from_center) plt.subplot(len(undistorted_test_images), 1, i + 1) plt.title('result') plt.imshow(img) plt.savefig('result.png') # In[ ]: