def abs_sobel_thresh(img, orient='x', thresh_min=0, thresh_max=255, ksize=3, out_depth=cv2.CV_64F, vwr=None): assert (type(img) is Image) # 2) Take the derivative in x or y given orient = 'x' or 'y' if orient == 'x': sobel = Sobel(img, out_depth, 1, 0, ksize) else: sobel = Sobel(img, out_depth, 0, 1, ksize) # 3) Take the absolute value of the derivative or gradient abs_sobel = np.absolute(sobel.img_data) # 4) Scale to 8-bit (0 - 255) then convert to type = np.uint8 scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel)) # 5) Create a mask of 1's where the scaled gradient magnitude # is > thresh_min and < thresh_max sxbinary = np.zeros_like(scaled_sobel) sxbinary[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1 # 6) Return this mask as your binary_output image ut.oneShotMsg("FIXME: this squeeze thing may be a problem") binary_image = Image(img_data=np.squeeze(sxbinary), title="scaled_sobel", img_type='gray') return binary_image
def my_way(ploty, left_fit_cr, right_fit_cr, leftx, rightx): cd = cache_dict pd = parm_dict path = ut.get_fnames("test_images/", "*.jpg")[0] init_img, binary_warped = iu.get_binary_warped_image(path, cd, pd, vwr=None) # img is just to painlessly fake out Lane ctor lane = lu.Lane(cd, pd, img=init_img, units='pixels', vwr=None) ut.oneShotMsg("FIXME: units above must be meters") lane.ploty = ploty lane.left_bndry = lu.LaneBoundary( 0, # hope max ix doesnt matter for this test binary_warped, 'L', lane=lane, vwr=None) lane.right_bndry = lu.LaneBoundary( 0, # hope max ix doesnt matter for this test binary_warped, 'R', lane=lane, vwr=None) lane.left_bndry.x = leftx lane.right_bndry.x = rightx lane.left_bndry.fit_coeff = left_fit_cr lane.right_bndry.fit_coeff = right_fit_cr lane.left_bndry.radius_of_curvature() lane.right_bndry.radius_of_curvature() print("FIXME(meters): " + str((lane.left_bndry.curve_radius, lane.right_bndry.curve_radius)))
def lane_finding_take_1(path, cd = None, pd =None): #@Undistort the image using cv2.undistort() with mtx and dist from cache #@Convert to grayscale #@ Find the chessboard corners # Draw corners # Define 4 source points (the outer 4 corners detected in the chessboard pattern) # Define 4 destination points (must be listed in the same order as src points!) # Use cv2.getPerspectiveTransform() to get M, the transform matrix # use cv2.warpPerspective() to apply M and warp your image to a top-down view tmp = iu.imRead(path, reader='cv2', vwr=vwr) undistorted = iu.cv2Undistort(tmp, cd['mtx'], cd['dist'], vwr) top_down = iu.look_down(undistorted, cd, vwr) gray = iu.cv2CvtColor(top_down, cv2.COLOR_BGR2GRAY, vwr) abs_sobel = iu.abs_sobel_thresh(gray, 'x', pd['sobel_min_thresh'], pd['sobel_max_thresh'], pd['sobel_kernel_size'], pd['sobel_out_depth'], vwr) mag_sobel = iu.mag_thresh(gray, pd['sobel_min_thresh'], pd['sobel_max_thresh'], pd['sobel_kernel_size'], pd['sobel_out_depth'], vwr) ut.oneShotMsg("FIXME: need parms for sobel_dir_thresh_(max,min), ksizse") dir_sobel = iu.dir_thresh(gray, 0.7, # FIXME: need new gpd['sobel_dir_thresh_min'] ? 1.3, # FIXME: need new gpd['sobel_dir_thresh_min'] ? 15, # FIXME: gpd['sobel_kernel_size'], pd['sobel_out_depth'], vwr) ut.oneShotMsg("FIXME: need parm dict entries for hls thresh") hls_thresh = iu.hls_thresh(undistorted, 80, # FIXME: shdb in gpd 255, #FIXME: shdb in gpd vwr) # 4 combo combined = iu.combined_thresh([abs_sobel, dir_sobel, hls_thresh, mag_sobel ], "abs+dir+hls+mag") #3 combos combined = iu.combined_thresh([abs_sobel, dir_sobel, hls_thresh ], "abs+dir+hls") combined = iu.combined_thresh([abs_sobel, dir_sobel, mag_sobel ], "abs+dir+mag") combined = iu.combined_thresh([abs_sobel, hls_thresh, mag_sobel ], "abs+hls+mag") #2 combos combined = iu.combined_thresh([abs_sobel, dir_sobel ], "abs+dir") combined = iu.combined_thresh([abs_sobel, hls_thresh ], "abs+hls") combined = iu.combined_thresh([abs_sobel, mag_sobel ], "abs+mag") combined = iu.combined_thresh([mag_sobel, dir_sobel ], "mag+dir") vwr.show() print("FIXME: combined thresholds not working too well right now")
def hls_lab_lane_detect(img, cache_dict=None, parm_dict=None): # temporarily deprecated in favor of lab+luv but leave it here assert (type(img) is Image) ut.oneShotMsg("hls_lab_lane_detect") vwr = cache_dict['viewer'] hls_binary_l = oneChannelInAlternateColorspace2BinaryinaryImage( img, cv2.COLOR_BGR2HLS, 1, cd=cache_dict, pd=parm_dict) lab_binary_b = oneChannelInAlternateColorspace2BinaryinaryImage( img, cv2.COLOR_BGR2Lab, 2, cd=cache_dict, pd=parm_dict) combined = np.zeros_like(hls_binary_l.img_data) combined[(hls_binary_l.img_data == 1) | (lab_binary_b.img_data == 1)] = 1 ret = Image(img_data=combined, title="hls+lab", img_type='gray') return ret
def lab_luv_lane_detect(img, cache_dict=None, parm_dict=None): # on advice of reviewer, trying lab:B + luv:L # failed to detect lane lines in 24/1200 frames assert (type(img) is Image) ut.oneShotMsg("lab_luv_lane_detect") vwr = cache_dict['viewer'] lab_binary_b = oneChannelInAlternateColorspace2BinaryinaryImage( img, cv2.COLOR_BGR2Lab, 2, cd=cache_dict, pd=parm_dict) luv_binary_l = oneChannelInAlternateColorspace2BinaryinaryImage( img, cv2.COLOR_BGR2Luv, 0, cd=cache_dict, pd=parm_dict) combined = np.zeros_like(luv_binary_l.img_data) combined[(lab_binary_b.img_data == 1) | (luv_binary_l.img_data == 1)] = 1 ret = Image(img_data=combined, title="lab:b+luv:l", img_type='gray') return ret
def hls_lab_luv_lane_detect(img, cache_dict=None, parm_dict=None): # lab_luv worked better but still failed on 2 frames so let's revive hls from above # result: no improvement still failing on two frames, so go back to lab_luv assert (type(img) is Image) ut.oneShotMsg("hls_lab_luv_lane_detect") vwr = cache_dict['viewer'] hls_binary_l = oneChannelInAlternateColorspace2BinaryinaryImage( img, cv2.COLOR_BGR2HLS, 1, cd=cache_dict, pd=parm_dict) lab_binary_b = oneChannelInAlternateColorspace2BinaryinaryImage( img, cv2.COLOR_BGR2Lab, 2, cd=cache_dict, pd=parm_dict) luv_binary_l = oneChannelInAlternateColorspace2BinaryinaryImage( img, cv2.COLOR_BGR2Luv, 0, cd=cache_dict, pd=parm_dict) combined = np.zeros_like(luv_binary_l.img_data) combined[(lab_binary_b.img_data == 1) | (luv_binary_l.img_data == 1) | hls_binary_l.img_data == 1] = 1 ret = Image(img_data=combined, title="lab:b+luv:l", img_type='gray') return ret
def pipeline_6_12_hls(path, s_thresh=(170, 255), sx_thresh=(20, 100), cd=None, pd =None, vwr=None): ut.oneShotMsg("FIXME: need parms for s_thresh and sx_thres") img = iu.imRead(path, reader='cv2', vwr=vwr) undistorted = iu.undistort(img, cd, vwr) top_down = iu.look_down(undistorted, cd, vwr) # Convert to HLS color space and separate the V channel hls = iu.cv2CvtColor(top_down, cv2.COLOR_BGR2HLS) h_channel = hls.img_data[:,:,0] l_channel = hls.img_data[:,:,1] s_channel = hls.img_data[:,:,2] iv._push(vwr, iu.Image(img_data=np.squeeze(h_channel), title="h_chan", img_type='gray')) iv._push(vwr, iu.Image(img_data=np.squeeze(l_channel), title="l_chan", img_type='gray')) iv._push(vwr, iu.Image(img_data=np.squeeze(s_channel), title="s_chan", img_type='gray')) # Sobel x sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivative in x abs_sobelx = np.absolute(sobelx) # Abs x drvtv to accentuate lines away from horizontal scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx)) iv._push(vwr, iu.Image(img_data = np.squeeze(scaled_sobel), title="scaled_sobel", img_type='gray')) ## # Threshold x gradient sxbinary = np.zeros_like(scaled_sobel) sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1 # Threshold color channel s_binary = np.zeros_like(s_channel) s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1 iv._push(vwr, iu.Image(img_data = np.squeeze(s_binary), title="sobel binary", img_type='gray')) # Stack each channel color_binary = np.dstack(( np.zeros_like(sxbinary), sxbinary, s_binary)) * 255 iv._push(vwr, # it's clear that the combo of sobel_x and s channel is best so far iu.Image(img_data = np.squeeze(color_binary), title="color_binary")) return color_binary
def FIXME_lane_detect(img, cache_dict=None, parm_dict=None): ut.oneShotMsg("FIXME_lane_detect") ut.brk("you didn't really mean that")