def _detect_lane(self, image, left_fit_prev=None, right_fit_prev=None): # Undistort image mtx, dist = self.undis_param undist = cv2.undistort(image, mtx, dist, None, mtx) # Apply threshold thresh = Thresholds(undist, self.debug) undist_binary = thresh.apply_thresholds(ksize=self.ksize, s_thresh=self.s_thresh, sx_thresh=self.sx_thresh, sy_thresh=self.sy_thresh, m_thresh=self.m_thresh, d_thresh=self.d_thresh) # Warp binary image transform = PerspectiveTransform(undist_binary, image, self.debug) binary_warped = transform.warp() # Find left and right lane markers polynomial polyfit = FitPolynomial(binary_warped, self.debug) left_fit, right_fit = polyfit.fit_polynomial(left_fit_prev, right_fit_prev) # Warp image with lanes back out_image = transform.warp_back(undist, polyfit) # Curvature and offset curvature = polyfit.curvature() offset = polyfit.offset() return out_image, left_fit, right_fit, curvature, offset
def __init__(self, src, dst, n_images=1, calibration=None, line_segments=LINE_SEGMENTS, offset=0): self.n_images = n_images self.camera_calibration = calibration self.line_segments = line_segments self.image_offset = offset self.left_line = None self.right_line = None self.center_poly = None self.curvature = 0.0 self.offset = 0.0 self.perspective = PerspectiveTransform(src, dst) self.distances = []
def __init__(self, window_width=35, window_height=120, margin=40): # Load previously calibration camera calibraton parameters. # If camera is not calibrated, look at the calibration.py for howto do it. self.mtx, self.dist = load_calibration_matrix( 'camera_cal/dist_pickle.p') self.window_width = window_width self.window_height = window_height self.margin = margin self.perspective = PerspectiveTransform(debug=True) self.tracker = LaneTracker(self.window_width, self.window_height, self.margin, 30 / 720, 3.7 / 700)
'./images/' + f for f in os.listdir('./images') if os.path.isfile(os.path.join('./images', f)) ] # Selecting random file for testing file_img = example_files[np.random.randint(0, len(example_files))] # file_img = './images/806123698_321554.jpg' # Good file for testing img = imageio.imread(file_img) plt.figure(figsize=(10, 10)) plt.imshow(img) plt.show() # Finding corners from input image corner_points = CornerDetector(img).find_corners4().astype(np.float32) corner_points[:, [0, 1]] = corner_points[:, [1, 0]] # Computing the perspective transform img_p = PerspectiveTransform(img, corner_points).four_point_transform() # Finding text areas img_cv = cv2.cvtColor(img_p, cv2.COLOR_RGB2BGR) # Testing with different structuring element sizes sizes = [(17, 3), (30, 10), (5, 5), (9, 3)] for size in sizes: strs, bound_rects, img_bboxes = TextDetector(img_cv, size).recognize_text() plt.figure(figsize=(10, 10)) plt.imshow(cv2.cvtColor(img_bboxes, cv2.COLOR_BGR2RGB)) plt.show() print(size) print(*strs, sep='\n')
__author__ = 'z84105425' # -*- coding:utf-8 -*- import cv2 from threshold import Threshold from perspective_transform import PerspectiveTransform if __name__ == '__main__': image_path = "bios.jpg" img = cv2.imread(image_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 获取阈值 th = Threshold(gray.shape[1], gray.shape[0], 0.83, 0.03) thresh_1 = th.get_thresh_1(gray) thresh_2 = th.get_thresh_2(gray) # 二值化 ret1, binary1 = cv2.threshold(gray, thresh_1, 255, cv2.THRESH_BINARY) ret2, binary2 = cv2.threshold(gray, thresh_2, 255, cv2.THRESH_BINARY) cv2.namedWindow("binary1", 0) cv2.namedWindow("binary2", 0) cv2.imshow("binary1", binary1) cv2.imshow("binary2", binary2) cv2.imwrite('binary1.jpg', binary1) cv2.waitKey(0) # 透视变换 pt = PerspectiveTransform(10000) pt.perspective_transform(img, binary1)