def main(): args = parse_args() if not os.path.exists(IMGSDIR_CALIB): print "(ERROR) Calibration images not found. Please place \ the LDWS_calibrate_short/ images in the current directory." exit(1) if not os.path.exists(IMGSDIR_TEST): print "(ERROR) Test images not found. Please place the \ LDWS_test_short/ images in the current directory." exit(1) imgpaths_calib = util.get_imgpaths(IMGSDIR_CALIB) if not os.path.isdir(args.imgsdir): imgpaths_test = [args.imgsdir] else: imgpaths_test = util.get_imgpaths(args.imgsdir) if args.reuse_calib: print "(Reusing camera calibration matrix)" K = np.array([[674.07224154, 0., 262.77722917], [0., 670.26875783, 330.21546389], [0., 0., 1.]]) else: print "(Estimating camera matrix...)" t = time.time() K = calibrate_camera.calibrate_camera(imgpaths_calib, 8, 8, 0.048) dur = time.time() - t print "(Finished. {0:.4f})".format(dur) for i, imgpath in enumerate(imgpaths_test): print "\n==== ({0}/{1}) Detecting lanes... [{2}]====".format( i + 1, len(imgpaths_test), os.path.split(imgpath)[1]) I = cv2.imread(imgpath, cv2.CV_LOAD_IMAGE_GRAYSCALE) h, w = I.shape[0:2] t = time.time() line1, line2 = detect_lanes.detect_lanes(I, win1=WIN_LEFT, win2=WIN_RIGHT, threshold1=110, threshold2=220, apertureSize=3, show_edges=False) dur = time.time() - t print " Finished detecting lanes ({0:.4f}s)".format(dur) if line1 == None or line2 == None: print "({0}/{1}) Error: Couldn't find lanes.".format( i + 1, len(imgpaths_test)) continue # Choose 4 points on the lanes to estimate the planar homography y1 = intrnd(0.45 * h) y2 = intrnd(0.65 * h) pts = np.array([ [compute_x(line1, y1), y1], # Left lane, far [compute_x(line2, y1), y1], # Right lane, far [compute_x(line1, y2), y2], # Left lane, close [compute_x(line2, y2), y2] ]) # Right lane, close # These world points have the origin at the middle of the lane, # directly below the camera. # Depths 5.0, 3.0 are chosen arbitrarily, as we don't have depth # information. Thus, the computed H will only be accurate in the # X axis. pts_worldm = np.array([ [-LANE_W / 2.0, 5.0], # Left lane, far [LANE_W / 2.0, 5.0], # Right lane, far [-LANE_W / 2.0, 3.0], # Left lane, close [LANE_W / 2.0, 3.0] ]) # Right lane, close # These world points are scaled+translated to generate a # reasonably-sized image w/ perspective effects removed. # Note: the origin here is not the same origin as in pts_worldm! pts_world = np.array([[0.0, 0.0], [LANE_W * 1e2, 0.0], [0.0, 500], [LANE_W * 1e2, 500.0]]) pts_world[:, 0] += LANE_W * 1e2 # Let's see more of the horizontal image pts_world[:, 1] += 200 # Let's see more down the road # H_metric := Homography mapping the image (pixel coords) to the # world plane defined by pts_worldm H_metric = cv2.getPerspectiveTransform(pts.astype('float32'), pts_worldm.astype('float32')) H = cv2.getPerspectiveTransform(pts.astype('float32'), pts_world.astype('float32')) ## Estimate where the camera is w.r.t. the world ref. frame R, T = estimate_extrinsic_parameters(H_metric) xdist = T[0] - (LANE_W / 2.0) print " Distance from center of lane: X={0:.2f} meters".format( xdist) LEFT_THRESH = -1.0 # Stay within 1.0 meters of the center of the lane RIGHT_THRESH = 1.0 if xdist >= RIGHT_THRESH: print " WARNING: Camera center is awfully close to the \ RIGHT side of the lane!" elif xdist <= LEFT_THRESH: print " WARNING: Camera center is awfully close to the \ LEFT side of the lane!" Irgb = cv2.imread(imgpath, cv2.CV_LOAD_IMAGE_COLOR) Iipm = cv2.warpPerspective(Irgb.astype('float64'), H, (1000, 700)) cv2.namedWindow("win2: Perspective-rectified image") cv2.imshow("win2: Perspective-rectified image", Iipm.astype('uint8')) Irgb = detect_lanes.draw_subwindow(Irgb, WIN_LEFT) Irgb = detect_lanes.draw_subwindow(Irgb, WIN_RIGHT) for _pt in pts: _pt = tuple([intrnd(x) for x in _pt]) cv2.circle(Irgb, _pt, 3, (0, 0, 255)) print " ({0}/{1}) Displaying detected lanes.".format( i + 1, len(imgpaths_test)) show_lanes(Irgb, line1, line2) print "Done."
def estimate_planar_homography(I, line1, line2, K, win1, win2, lane_width): """ Estimates the planar homography H between the camera image plane, and the World (ground) plane. The World reference frame is directly below the camera, with: - Z-axis parallel to the road - X-axis on the road plane - Y-axis pointing upward from the road plane - origin is on the road plane (Y=0), and halfway between the two lanes boundaries (center of road). Input: nparray I nparray line1, line2: [a, b, c] nparray K The 3x3 camera intrinsic matrix. tuple win1, win2: (float x, float y, float w, float h) float lane_width Output: nparray H where H is a 3x3 homography. """ h, w = I.shape[0:2] x_win1 = intrnd(w * win1[0]) y_win1 = intrnd(h * win1[1]) x_win2 = intrnd(w * win2[0]) y_win2 = intrnd(h * win2[1]) pts = [] NUM = 10 h_win = intrnd(h * win1[3]) # Assume window heights same btwn left/right for i in xrange(NUM): frac = i / float(NUM) y_cur = intrnd((y_win1 - (h_win / 2) + frac * h_win)) pt_i = (compute_x(line1, y_cur), y_cur) pt_j = (compute_x(line2, y_cur), y_cur) pts.append((pt_i, pt_j)) r1 = solve_for_r1(pts, K, lane_width) vanishing_pt = np.cross(line1, line2) vanishing_pt = vanishing_pt / vanishing_pt[2] vanishing_pt[1] = vanishing_pt[1] ## DEBUG Plot points on image, save to file for visual verification Irgb = util.to_rgb(I) COLOURS = [(255, 0, 0), (0, 255, 0)] for i, (pt_i, pt_j) in enumerate(pts): clr = COLOURS[i % 2] cv2.circle(Irgb, tuple(map(intrnd, pt_i)), 5, clr) cv2.circle(Irgb, tuple(map(intrnd, pt_j)), 5, clr) cv2.circle(Irgb, (intrnd(vanishing_pt[0]), intrnd(vanishing_pt[1])), 5, (0, 0, 255)) cv2.imwrite("_Irgb.png", Irgb) r3 = solve_for_r3(vanishing_pt, line1, line2, K) T = solve_for_t(pts, K, r1, r3, lane_width) print "T_pre:", T T = T * (2.1798 / T[1]) # Height of camera is 2.1798 meters T[2] = 1 # We want the ref. frame to be directly below camera (why 1?!) #T = T / np.linalg.norm(T) print "T_post:", T H = np.zeros([3, 3]) H[:, 0] = r1 H[:, 1] = r3 H[:, 2] = T return np.dot(K, H)
def main(): args = parse_args() if not os.path.exists(IMGSDIR_CALIB): print "(ERROR) Calibration images not found. Please place \ the LDWS_calibrate_short/ images in the current directory." exit(1) if not os.path.exists(IMGSDIR_TEST): print "(ERROR) Test images not found. Please place the \ LDWS_test_short/ images in the current directory." exit(1) imgpaths_calib = util.get_imgpaths(IMGSDIR_CALIB) if not os.path.isdir(args.imgsdir): imgpaths_test = [args.imgsdir] else: imgpaths_test = util.get_imgpaths(args.imgsdir) if args.reuse_calib: print "(Reusing camera calibration matrix)" K = np.array([[ 674.07224154, 0., 262.77722917], [ 0., 670.26875783, 330.21546389], [ 0., 0., 1. ]]) else: print "(Estimating camera matrix...)" t = time.time() K = calibrate_camera.calibrate_camera(imgpaths_calib, 8, 8, 0.048) dur = time.time() - t print "(Finished. {0:.4f})".format(dur) for i, imgpath in enumerate(imgpaths_test): print "\n==== ({0}/{1}) Detecting lanes... [{2}]====".format(i+1, len(imgpaths_test), os.path.split(imgpath)[1]) I = cv2.imread(imgpath, cv2.CV_LOAD_IMAGE_GRAYSCALE) h, w = I.shape[0:2] t = time.time() line1, line2 = detect_lanes.detect_lanes(I, win1=WIN_LEFT, win2=WIN_RIGHT, threshold1=110, threshold2=220, apertureSize=3, show_edges=False) dur = time.time() - t print " Finished detecting lanes ({0:.4f}s)".format(dur) if line1 == None or line2 == None: print "({0}/{1}) Error: Couldn't find lanes.".format(i+1, len(imgpaths_test)) continue # Choose 4 points on the lanes to estimate the planar homography y1 = intrnd(0.45 * h) y2 = intrnd(0.65 * h) pts = np.array([[compute_x(line1, y1), y1], # Left lane, far [compute_x(line2, y1), y1], # Right lane, far [compute_x(line1, y2), y2], # Left lane, close [compute_x(line2, y2), y2]]) # Right lane, close # These world points have the origin at the middle of the lane, # directly below the camera. # Depths 5.0, 3.0 are chosen arbitrarily, as we don't have depth # information. Thus, the computed H will only be accurate in the # X axis. pts_worldm = np.array([[-LANE_W/2.0, 5.0], # Left lane, far [LANE_W/2.0, 5.0], # Right lane, far [-LANE_W/2.0, 3.0], # Left lane, close [LANE_W/2.0, 3.0]]) # Right lane, close # These world points are scaled+translated to generate a # reasonably-sized image w/ perspective effects removed. # Note: the origin here is not the same origin as in pts_worldm! pts_world = np.array([[0.0, 0.0], [LANE_W*1e2, 0.0], [0.0, 500], [LANE_W*1e2, 500.0]]) pts_world[:,0] += LANE_W*1e2 # Let's see more of the horizontal image pts_world[:,1] += 200 # Let's see more down the road # H_metric := Homography mapping the image (pixel coords) to the # world plane defined by pts_worldm H_metric = cv2.getPerspectiveTransform(pts.astype('float32'), pts_worldm.astype('float32')) H = cv2.getPerspectiveTransform(pts.astype('float32'), pts_world.astype('float32')) ## Estimate where the camera is w.r.t. the world ref. frame R, T = estimate_extrinsic_parameters(H_metric) xdist = T[0] - (LANE_W / 2.0) print " Distance from center of lane: X={0:.2f} meters".format(xdist) LEFT_THRESH = -1.0 # Stay within 1.0 meters of the center of the lane RIGHT_THRESH = 1.0 if xdist >= RIGHT_THRESH: print " WARNING: Camera center is awfully close to the \ RIGHT side of the lane!" elif xdist <= LEFT_THRESH: print " WARNING: Camera center is awfully close to the \ LEFT side of the lane!" Irgb = cv2.imread(imgpath, cv2.CV_LOAD_IMAGE_COLOR) Iipm = cv2.warpPerspective(Irgb.astype('float64'), H, (1000, 700)) cv2.namedWindow("win2: Perspective-rectified image") cv2.imshow("win2: Perspective-rectified image", Iipm.astype('uint8')) Irgb = detect_lanes.draw_subwindow(Irgb, WIN_LEFT) Irgb = detect_lanes.draw_subwindow(Irgb, WIN_RIGHT) for _pt in pts: _pt = tuple([intrnd(x) for x in _pt]) cv2.circle(Irgb, _pt, 3, (0, 0, 255)) print " ({0}/{1}) Displaying detected lanes.".format(i+1, len(imgpaths_test)) show_lanes(Irgb, line1, line2) print "Done."
def estimate_planar_homography(I, line1, line2, K, win1, win2, lane_width): """ Estimates the planar homography H between the camera image plane, and the World (ground) plane. The World reference frame is directly below the camera, with: - Z-axis parallel to the road - X-axis on the road plane - Y-axis pointing upward from the road plane - origin is on the road plane (Y=0), and halfway between the two lanes boundaries (center of road). Input: nparray I nparray line1, line2: [a, b, c] nparray K The 3x3 camera intrinsic matrix. tuple win1, win2: (float x, float y, float w, float h) float lane_width Output: nparray H where H is a 3x3 homography. """ h, w = I.shape[0:2] x_win1 = intrnd(w * win1[0]) y_win1 = intrnd(h * win1[1]) x_win2 = intrnd(w * win2[0]) y_win2 = intrnd(h * win2[1]) pts = [] NUM = 10 h_win = intrnd(h*win1[3]) # Assume window heights same btwn left/right for i in xrange(NUM): frac = i / float(NUM) y_cur = intrnd((y_win1-(h_win/2) + frac*h_win)) pt_i = (compute_x(line1, y_cur), y_cur) pt_j = (compute_x(line2, y_cur), y_cur) pts.append((pt_i, pt_j)) r1 = solve_for_r1(pts, K, lane_width) vanishing_pt = np.cross(line1, line2) vanishing_pt = vanishing_pt / vanishing_pt[2] vanishing_pt[1] = vanishing_pt[1] ## DEBUG Plot points on image, save to file for visual verification Irgb = util.to_rgb(I) COLOURS = [(255, 0, 0), (0, 255, 0)] for i, (pt_i, pt_j) in enumerate(pts): clr = COLOURS[i % 2] cv2.circle(Irgb, tuple(map(intrnd, pt_i)), 5, clr) cv2.circle(Irgb, tuple(map(intrnd, pt_j)), 5, clr) cv2.circle(Irgb, (intrnd(vanishing_pt[0]), intrnd(vanishing_pt[1])), 5, (0, 0, 255)) cv2.imwrite("_Irgb.png", Irgb) r3 = solve_for_r3(vanishing_pt, line1, line2, K) T = solve_for_t(pts, K, r1, r3, lane_width) print "T_pre:", T T = T * (2.1798 / T[1]) # Height of camera is 2.1798 meters T[2] = 1 # We want the ref. frame to be directly below camera (why 1?!) #T = T / np.linalg.norm(T) print "T_post:", T H = np.zeros([3,3]) H[:, 0] = r1 H[:, 1] = r3 H[:, 2] = T return np.dot(K, H)