import calibrate import lanedetect import cv2 ret, mtx, dist, rvecs, tvecs = calibrate.calibrate("data/camera_cal/", 6, 9) cap = cv2.VideoCapture('data/video/project_video.mp4') if (cap.isOpened() == False): print("Error opening video stream or file") while (cap.isOpened()): ret, frame = cap.read() if ret == True: ds = calibrate.undistort(frame, mtx, dist) ds = lanedetect.filterYellowWhiteLane(ds) resized = cv2.resize(ds, (960, 540)) cv2.imshow('output', resized) if cv2.waitKey(25) & 0xFF == ord('q'): break else: break cap.release() cv2.destroyAllWindows()
def process_image(self, img): print('> Processing frame {}'.format(self.frame)) undistort = calibrate.undistort(img, self.mtx, self.dist) binary = apply_filters(undistort) # Warp binary image warped = transform.transform( binary, src=self.src, dst=self.dst ) # Search for lane sw = histogram.SlidingWindow(warped) sw.search() valid_left, valid_right = self.lines.validate(sw.left_x, sw.right_x) sw_left_curve, sw_right_curve = sw.curvature_ft() if len(self.lines.lines) > 0: new_left, new_right = self.lines.lines[-1] # default to previous lines if valid_left: new_left = sw.left_x self.left_curve = sw_left_curve if valid_right: new_right = sw.right_x self.right_curve = sw_right_curve self.center_offset = histogram.center_offset( img=warped, left_x=new_left, right_x=new_right ) else: new_left, new_right = sw.left_x, sw.right_x self.left_curve, self.right_curve = sw.curvature_ft() self.center_offset = sw.center_offset() self.lines.add_lanes(new_left, new_right) # Add frame self.frame += 1 # Unwarp image left_x, right_x = self.lines.lines[-1] unwarped = transform.unwarp_lane( warped, orig_img=undistort, src=self.src, dst=self.dst, plot_y=sw.plot_y, left_x=left_x, right_x=right_x ) # Add curvature, center offset out_img = Image.fromarray(unwarped) font = ImageFont.truetype('/Library/Fonts/Arial.ttf', 32) draw = ImageDraw.Draw(out_img) draw.text( (10, 10), 'Left curvature: {} ft'.format(round(self.left_curve, 2)), font=font ) draw.text( (10, 58), 'Right curvature: {} ft'.format(round(self.right_curve, 2)), font=font ) draw.text( (10, 106), 'Center offset: {} ft'.format(round(self.center_offset, 2)), font=font ) return np.asarray(out_img)
idx = 0 K, D = calibrate.calibrate_camera('camera_cal', nx=9, ny=6) while (True): # Capture frame-by-frame ret, frame = cap.read() if not ret: break # if not ((idx > 990) and (idx < 1055)): # idx+=1 # continue frame = calibrate.undistort(frame, K, D) resized_frame = cv2.resize(frame, RESIZE) # Display the resulting frame # cv2.imshow('frame', resized_frame) # if cv2.waitKey(1) & 0xFF == ord('q'): # break road_img = transform.transform_road_img(frame) cv2.imwrite('prob/{}.jpg'.format(idx), road_img) resized_road_img = cv2.resize(road_img, RESIZE) # cv2.imshow('road_img', resized_road_img) # if cv2.waitKey(1) & 0xFF == ord('q'): # break
def main(): # calibrate the camera using the given chessboard images ret, mtx, dist, rvecs, tvecs = calibrate( path='../camera_cal/calibration*.jpg', xy=(9, 6), draw_corners=False) # inst. Lane object lane = Lane() # read video predicted_frames = [] input_video = 'project_video.mp4' cap = cv2.VideoCapture(os.path.join('../input_videos/', input_video)) while cap.isOpened(): ret, frame = cap.read() if not ret: print('Cant receive frame. Exiting..') break # undistort an image rgb_img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # BGR => RGB undist = undistort(rgb_img, mtx, dist) # convert to gray gray = cv2.cvtColor(undist, cv2.COLOR_RGB2GRAY) # RGB => GRAY # apply gradient and color thresholding gradx = th.abs_sobel_thresh(gray) direction = th.dir_thresh(gray) gradient_binary = np.zeros_like(direction) gradient_binary[(gradx == 1) & (direction == 1)] = 1 color_binary = th.saturation_thresh(frame) # combine gradient and color thresholding thresholded_img = th.threshold(gradient_binary, color_binary) # perspective transform: easier to measure curvature of lane from bird's eye view # also makes it easier to match car's location with a road map src, dst, M, M_inv = pt.get_transform_matrix() # transform image size = (thresholded_img.shape[1], thresholded_img.shape[0]) transformed_img = cv2.warpPerspective(thresholded_img, M, size) # draw lines on transformed gray_transformed_img = np.uint8(transformed_img * 255) bgr_transformed_img = cv2.cvtColor(gray_transformed_img, cv2.COLOR_GRAY2BGR) #pt.draw_plot_save(bgr_transformed_img, dst, 'Test Transformation', '../output_images/test_transform.png') # fit lines left_fit, right_fit, y, offset_meters = lane.fit_polynomials( transformed_img) # create blank for drawing lane lines zeros = np.zeros_like(transformed_img).astype(np.uint8) draw_img = np.dstack((zeros, zeros, zeros)) # format points for fill poly pts_left = np.array([np.transpose(np.vstack([left_fit, y]))]) # [left_fit ... y] pts_right = np.array( [np.flipud(np.transpose(np.vstack([right_fit, y])))]) pts = np.hstack((pts_left, pts_right)) # [pts_left, pts_right] cv2.fillPoly(draw_img, np.int_([pts]), (0, 255, 0)) # unwarp transformed image unwarped = cv2.warpPerspective(draw_img, M_inv, (gray.shape[1], gray.shape[0])) # combine lane drawing w/ original image final_image = cv2.addWeighted(undist, 1, unwarped, 0.25, 0) # add measurement data to frame offset_side = 'left' if offset_meters < 0 else 'right' final_image = cv2.putText( final_image, f'Offset: {abs(offset_meters):0.2f}m {offset_side} of center', (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA) # show predict cv2.imshow('frame', cv2.cvtColor(final_image, cv2.COLOR_RGB2BGR)) # store predicted frames predicted_frames.append(final_image) if cv2.waitKey(1) == ord('q'): break # release cap object cap.release() cv2.destroyAllWindows()
line_warp = line_lt.draw(line_warp, color=(255, 0, 0), average=keep_state) line_warp = line_rt.draw(line_warp, color=(0, 0, 255), average=keep_state) line_dewarped = cv2.warpPerspective(line_warp, Minv, (width, height)) lines_mask = blend_onto_road.copy() idx = np.any([line_dewarped != 0][0], axis=2) lines_mask[idx] = line_dewarped[idx] blend_onto_road = cv2.addWeighted(src1=lines_mask, alpha=0.8, src2=blend_onto_road, beta=0.5, gamma=0.) return blend_onto_road if __name__ == '__main__': line_lt, line_rt = Line(buffer_len=10), Line(buffer_len=10) ret, mtx, dist, rvecs, tvecs = calibrate_camera(calib_images_dir='camera_cal') # show result on test images for test_img in glob.glob('test_images/*.jpg'): img = cv2.imread(test_img) img_undistorted = undistort(img, mtx, dist, verbose=False) img_binary = binarize(img_undistorted, verbose=False) img_birdeye, M, Minv = birdeye(img_binary, verbose=False) line_lt, line_rt, img_out = get_fits_by_sliding_windows(img_birdeye, line_lt, line_rt, n_windows=7, verbose=True)