예제 #1
0
    # Perspective transform
    img, binary_unwarped, m, m_inv = perspective_transform(img)
    plt.imshow(img, cmap='gray', vmin=0, vmax=1)
    plt.savefig('example_images/warped_' + out_image_file)

    # Polynomial fit
    ret = line_fit(img)
    left_fit = ret['left_fit']
    right_fit = ret['right_fit']
    nonzerox = ret['nonzerox']
    nonzeroy = ret['nonzeroy']
    left_lane_inds = ret['left_lane_inds']
    right_lane_inds = ret['right_lane_inds']
    save_file = 'example_images/polyfit_' + out_image_file
    viz2(img, ret, save_file=save_file)

    # Do full annotation on original image
    # Code is the same as in 'line_fit_video.py'
    orig = mpimg.imread('test_images/' + image_file)
    undist = cv2.undistort(orig, mtx, dist, None, mtx)
    left_curve, right_curve = calc_curve(left_lane_inds, right_lane_inds,
                                         nonzerox, nonzeroy)

    bottom_y = undist.shape[0] - 1
    bottom_x_left = left_fit[0] * (bottom_y**
                                   2) + left_fit[1] * bottom_y + left_fit[2]
    bottom_x_right = right_fit[0] * (
        bottom_y**2) + right_fit[1] * bottom_y + right_fit[2]
    vehicle_offset = undist.shape[1] / 2 - (bottom_x_left + bottom_x_right) / 2
예제 #2
0
def annotate_image(img_in):
	"""
	Annotate the input image with lane line markings
	Returns annotated image
	"""
	global mtx, dist, left_line, right_line, detected, frameCount, retLast
	global left_curve, right_curve, left_lane_inds, right_lane_inds

	frameCount += 1
	src = np.float32(
		[[200, 720],
		 [1100, 720],
		 [520, 500],
		 [760, 500]])

	x = [src[0, 0], src[1, 0], src[3, 0], src[2, 0], src[0, 0]]
	y = [src[0, 1], src[1, 1], src[3, 1], src[2, 1], src[0, 1]]

	# Undistort, threshold, perspective transform
	undist = cv2.undistort(img_in, mtx, dist, None, mtx)
	img, abs_bin, mag_bin, dir_bin, hls_bin = combined_thresh(undist)
	binary_warped, binary_unwarped, m, m_inv = perspective_transform(img)



	# Perform polynomial fit
	if not detected:
		# Slow line fit
		ret = line_fit(binary_warped)
		# if detect no lanes, use last result instead.
		if len(ret) == 0:
			ret = retLast
		left_fit = ret['left_fit']
		right_fit = ret['right_fit']
		nonzerox = ret['nonzerox']
		nonzeroy = ret['nonzeroy']
		out_img = ret['out_img']
		left_lane_inds = ret['left_lane_inds']
		right_lane_inds = ret['right_lane_inds']
		histogram = ret['histo']

		# Get moving average of line fit coefficients
		left_fit = left_line.add_fit(left_fit)
		right_fit = right_line.add_fit(right_fit)

		# Calculate curvature
		left_curve, right_curve = calc_curve(left_lane_inds, right_lane_inds, nonzerox, nonzeroy)

		detected = True  # slow line fit always detects the line

	else:  # implies detected == True
		# Fast line fit
		left_fit = left_line.get_fit()
		right_fit = right_line.get_fit()
		ret = tune_fit(binary_warped, left_fit, right_fit)
		left_fit = ret['left_fit']
		right_fit = ret['right_fit']
		nonzerox = ret['nonzerox']
		nonzeroy = ret['nonzeroy']
		left_lane_inds = ret['left_lane_inds']
		right_lane_inds = ret['right_lane_inds']

		# Only make updates if we detected lines in current frame
		if ret is not None:
			left_fit = ret['left_fit']
			right_fit = ret['right_fit']
			nonzerox = ret['nonzerox']
			nonzeroy = ret['nonzeroy']
			left_lane_inds = ret['left_lane_inds']
			right_lane_inds = ret['right_lane_inds']

			left_fit = left_line.add_fit(left_fit)
			right_fit = right_line.add_fit(right_fit)
			left_curve, right_curve = calc_curve(left_lane_inds, right_lane_inds, nonzerox, nonzeroy)
		else:
			detected = False

	vehicle_offset = calc_vehicle_offset(undist, left_fit, right_fit)

	# Perform final visualization on top of original undistorted image
	result = final_viz(undist, left_fit, right_fit, m_inv, left_curve, right_curve, vehicle_offset)

	retLast = ret

	save_viz2 = './output_images/polyfit_test%d.jpg' % (frameCount)

	viz2(binary_warped, ret, save_viz2)

	save_warped = './output_images/warped_test%d.jpg' % (frameCount)
	plt.imshow(binary_warped, cmap='gray', vmin=0, vmax=1)
	if save_warped is None:
		plt.show()
	else:
		plt.savefig(save_warped)
	plt.gcf().clear()

	save_binary = './output_images/binary_test%d.jpg' % (frameCount)
	plt.imshow(img,	cmap='gray', vmin=0, vmax=1)
	if save_binary is None:
		plt.show()
	else:
		plt.savefig(save_binary)
	plt.gcf().clear()

	if frameCount > 0:
		fig = plt.gcf()
		fig.set_size_inches(16.5, 8.5)
		plt.subplot(2, 3, 1)
		plt.imshow(undist)
		# plt.plot(undist)
		plt.plot(x, y)
		plt.title('undist')
		plt.subplot(2, 3, 2)
		plt.imshow(hls_bin, cmap='gray', vmin=0, vmax=1)
		plt.title('hls_bin')
		plt.subplot(2, 3, 3)
		plt.imshow(abs_bin, cmap='gray', vmin=0, vmax=1)
		plt.title('abs_bin')
		plt.subplot(2, 3, 4)
		plt.imshow(img, cmap='gray', vmin=0, vmax=1)
		plt.title('img')
		plt.subplot(2, 3, 5)
		plt.imshow(out_img)
		plt.title('out_img')
		plt.subplot(2, 3, 6)
		plt.imshow(result, cmap='gray', vmin=0, vmax=1)
		plt.title('result')

		save_result = 'D:/code/github_code/CarND-Advanced-Lane-Lines-P4/output_images/result-test%d.jpg' % (frameCount)
		if save_result is None:
			plt.show()
		else:
			plt.savefig(save_result)
		plt.gcf().clear()

	return result
예제 #3
0
def annotate_image(img_in):
    """
    Annotate the input image with lane line markings
    Returns annotated image
    """

    # Get Frames and Image properties
    g.frames = g.frames + 1
    g.vi_width = img_in.shape[1]
    g.vi_height = img_in.shape[0]

    # Undistort, threshold, perspective transform
    # Transforms an image to compensate for lens distortion.
    # Python: cv2.undistort(src, cameraMatrix, distCoeffs[, dst[, newCameraMatrix]]) -> dst
    # src - Input (distorted) image.
    # dst - Output (corrected) image that has the same size and type as src .
    # cameraMatrix - Input camera matrix A = Matrix(3,3) [[fx,0,cx], [0,fy,cy] [0,0,1]
    # distCoeffs - Input vector of distortion coefficients  (k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements.
    #              -->  If the vector is NULL/empty, the zero distortion coefficients are assumed.
    # newCameraMatrix - Camera matrix of the distorted image. By default,
    #              -->  it is the same as cameraMatrix but you may additionally scale and shift the result by using a different matrix.
    if (RLD_ZERO_DISTORSION != True):
        undist = cv2.undistort(img_in, g.mtx, g.dist, None, g.mtx)
        # print ('DISTORSION CORRECTED undist: ', undist)
    else:
        undist = cv2.undistort(img_in, g.mtx, None, None, g.mtx)
        # print ('DISTORSION NOT CORRECTED undist: ', undist)
    # Display undistort image
    if DEBUG_LINE_FIT_VIDEO >= DEBUG_LEVEL2:
        if g.cold_boot:
            cv2.namedWindow('undist_image', cv2.WINDOW_NORMAL)
            cv2.resizeWindow('undist_image', g.s_win_width, g.s_win_height)
            cv2.moveWindow('undist_image', 0, g.s_height // 2 + g.s_win_height)
        undist_bgr = cv2.cvtColor(undist, cv2.COLOR_RGB2BGR)
        cv2.imshow('undist_image', undist_bgr)

    # Combine all threshold mask on the undistort image
    #histogram_calc(undist)
    img, abs_bin, mag_bin, dir_bin, hls_bin, hsv_bin = combined_thresh(undist)
    #img = combined_canny(undist)
    if DEBUG_LINE_FIT_VIDEO >= DEBUG_LEVEL2:
        if g.cold_boot:
            cv2.namedWindow('undist_comb_thresh', cv2.WINDOW_NORMAL)
            cv2.resizeWindow('undist_comb_thresh', g.s_win_width,
                             g.s_win_height)
            cv2.moveWindow(
                'undist_comb_thresh', 3 * g.s_win_width, 2 * g.s_win_height -
                g.s_win_height // 2 + 2 * g.s_win_height_offset)
        cv2.imshow('undist_comb_thresh', img)

    # Apply perpective transformation/warp binary image
    binary_warped, binary_unwarped, m, m_inv, src, dst = perspective_transform(
        img)
    unwarped_trapez = (np.dstack(
        (binary_unwarped, binary_unwarped, binary_unwarped)) *
                       255).astype('uint8')
    if DEBUG_LINE_FIT_VIDEO >= DEBUG_LEVEL2:
        if g.cold_boot:
            cv2.namedWindow('warped_image', cv2.WINDOW_NORMAL)
            cv2.resizeWindow('warped_image', g.s_win_width, g.s_win_height)
            cv2.moveWindow('warped_image', 4 * g.s_win_width,
                           3 * g.s_win_height)
        cv2.imshow('warped_image', binary_warped)

        if g.cold_boot:
            cv2.namedWindow('unwarped_image', cv2.WINDOW_NORMAL)
            cv2.resizeWindow('unwarped_image', g.s_win_width, g.s_win_height)
            cv2.moveWindow('unwarped_image', 3 * g.s_win_width,
                           3 * g.s_win_height)
            if g.trackbar_enabled:
                cv2.createTrackbar('top', 'unwarped_image',
                                   int(g.trap_top_width * 100), 100, nothing)
                cv2.createTrackbar('bottom', 'unwarped_image',
                                   int(g.trap_bottom_width * 100), 100,
                                   nothing)
                cv2.createTrackbar('height', 'unwarped_image',
                                   int(g.trap_height * 100), 100, nothing)
                cv2.setTrackbarPos('top', 'unwarped_image',
                                   int(g.trap_top_width * 100))
                cv2.setTrackbarPos('bottom', 'unwarped_image',
                                   int(g.trap_bottom_width * 100))
                cv2.setTrackbarPos('height', 'unwarped_image',
                                   int(g.trap_height * 100))
        if g.trackbar_enabled:
            l_top = cv2.getTrackbarPos('top', 'unwarped_image')
            g.trap_top_width = float(l_top) / 100
            l_bottom = cv2.getTrackbarPos('bottom', 'unwarped_image')
            g.trap_bottom_width = float(l_bottom) / 100
            l_height = cv2.getTrackbarPos('height', 'unwarped_image')
            g.trap_height = float(l_height) / 100
        cv2.polylines(unwarped_trapez, np.int32([src]), True, (255, 255, 0), 1,
                      0)
        cv2.imshow('unwarped_image', unwarped_trapez)

    # Perform polynomial fit
    left_fit = None
    right_fit = None

    if not g.detected:
        # Slow line fit
        ret = line_fit(binary_warped)
        #print ('ret:', ret)
        if ret is not None:
            left_fit = ret['left_fit']
            right_fit = ret['right_fit']
            nonzerox = ret['nonzerox']
            nonzeroy = ret['nonzeroy']
            g.left_lane_inds = ret['left_lane_inds']
            g.right_lane_inds = ret['right_lane_inds']

            # Get moving average of line fit coefficients
            left_fit = g.left_line.add_fit(left_fit)
            right_fit = g.right_line.add_fit(right_fit)

            # Calculate curvature
            g.left_curve, g.right_curve = calc_curve(g.left_lane_inds,
                                                     g.right_lane_inds,
                                                     nonzerox, nonzeroy)
            if g.detect_fast_mode_allowed:
                g.detected = True  # slow line fit always detects the line
            else:
                g.detected = False  # Force the slow mode for ever

            if DEBUG_LINE_FIT_VIDEO >= DEBUG_LEVEL2:
                viz1(binary_warped, ret, save_file=None)
                viz2(binary_warped, ret, save_file=None)
        else:
            if not g.degraded_viz_mode:
                g.detected = False

    else:  # implies g.detected == True
        # Fast line fit
        left_fit = g.left_line.get_fit()
        right_fit = g.right_line.get_fit()
        ret = tune_fit(binary_warped, left_fit, right_fit)

        # Only make updates if we detected lines in current frame
        if ret is not None:
            left_fit = ret['left_fit']
            right_fit = ret['right_fit']
            nonzerox = ret['nonzerox']
            nonzeroy = ret['nonzeroy']
            g.left_lane_inds = ret['left_lane_inds']
            g.right_lane_inds = ret['right_lane_inds']

            left_fit = g.left_line.add_fit(left_fit)
            right_fit = g.right_line.add_fit(right_fit)
            g.left_curve, g.right_curve = calc_curve(g.left_lane_inds,
                                                     g.right_lane_inds,
                                                     nonzerox, nonzeroy)
            if DEBUG_LINE_FIT_VIDEO >= DEBUG_LEVEL2:
                #viz1(binary_warped, ret, save_file=None)
                viz2(binary_warped, ret, save_file=None)
        else:
            if not g.degraded_viz_mode:
                g.detected = False

    if ret is not None:
        vehicle_offset = calc_vehicle_offset(undist, left_fit, right_fit)

        # Perform final visualization on top of original undistorted image
        # print ('g.detected:',g.detected ,'g.left_curve:',g.left_curve ,'g.right_curve:',g.right_curve)
        # print ('left_fit:',left_fit ,'right_fit:',right_fit)
        result = final_viz(undist, left_fit, right_fit, m_inv, g.left_curve,
                           g.right_curve, vehicle_offset)
    else:
        if g.degraded_viz_mode and g.left_line.len_fit(
        ) != 0 and g.right_line.len_fit() != 0:
            if left_fit is None or right_fit is None:
                left_fit = g.left_line.get_fit()
                right_fit = g.right_line.get_fit()
            vehicle_offset = calc_vehicle_offset(undist, left_fit, right_fit)
            result = final_viz(undist, left_fit, right_fit, m_inv,
                               g.left_curve, g.right_curve, vehicle_offset)
        else:
            result = undist

    if DEBUG_LINE_FIT_VIDEO >= DEBUG_LEVEL2:
        if g.cold_boot:
            cv2.namedWindow('final_visu', cv2.WINDOW_NORMAL)
            cv2.resizeWindow('final_visu', 640, 480)
        cv2.putText(result, 'Filter: ' + g.combined_filter_type,
                    (1, int(3 * g.vi_height * VI_TEXT_OFFSET / VI_HEIGHT)),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    g.vi_height * VI_FONT_SIZE / VI_HEIGHT, (0, 255, 0), 1,
                    cv2.LINE_AA)
        cv2.putText(
            result, 'Res: ' + str(g.vi_width) + 'x' + str(g.vi_height) +
            ' - Frame:' + str(g.frames),
            (1, int(4 * g.vi_height * VI_TEXT_OFFSET / VI_HEIGHT)),
            cv2.FONT_HERSHEY_SIMPLEX, g.vi_height * VI_FONT_SIZE / VI_HEIGHT,
            (255, 0, 0), 1, cv2.LINE_AA)
        cv2.putText(
            result,
            'pX: ' + str(round(g.scale_px_width, 2)) + ' mm/px' + ' l_size:' +
            str(round(g.lane_size_px * g.scale_px_width / 1000, 2)) + ' m',
            (1, int(5 * g.vi_height * VI_TEXT_OFFSET / VI_HEIGHT)),
            cv2.FONT_HERSHEY_SIMPLEX, g.vi_height * VI_FONT_SIZE / VI_HEIGHT,
            (255, 0, 0), 1, cv2.LINE_AA)
        cv2.putText(result,
                    g.detect_mode + '  Recovery: ' + str(g.degraded_viz_count),
                    (1, int(6 * g.vi_height * VI_TEXT_OFFSET / VI_HEIGHT)),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    g.vi_height * VI_FONT_SIZE / VI_HEIGHT, (0, 255, 255), 1,
                    cv2.LINE_AA)
        result_bgr = cv2.cvtColor(result, cv2.COLOR_RGB2BGR)
        cv2.imshow('final_visu', result_bgr)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            sys.exit(0)

    g.cold_boot = False

    return result
예제 #4
0
		img = np.uint8(img)
	img = cv2.blur(img, (5,5))
	#img = cv2.undistort(img, mtx, dist, None, mtx)
	img2, abs_bin, mag_bin, dir_bin, hls_bin= combined_thresh(img)
	#img, _, img2 = combined_thresh_canny(img)
	img3, binary_unwarped, m, m_inv = perspective_transform(img2)
	
	ret = line_fit(img3, viz=1)
	left_fit = ret['left_fit']
	right_fit = ret['right_fit']
	nonzerox = ret['nonzerox']
	nonzeroy = ret['nonzeroy']
	left_lane_inds = ret['left_lane_inds']
	right_lane_inds = ret['right_lane_inds']
	save_file = os.path.dirname(os.path.abspath(__file__))+'/saves/polyfit1_' + out_image_file
	img4=viz2(img3, ret, save_file=save_file)

	# Do full annotation on original image
	# Code is the same as in 'line_fit_video.py'
	undist = img
	left_curve, right_curve = calc_curve(left_lane_inds, right_lane_inds, nonzerox, nonzeroy)

	bottom_y = undist.shape[0] - 1
	bottom_x_left = left_fit[0]*(bottom_y**2) + left_fit[1]*bottom_y + left_fit[2]
	bottom_x_right = right_fit[0]*(bottom_y**2) + right_fit[1]*bottom_y + right_fit[2]
	vehicle_offset = undist.shape[1]/2 - (bottom_x_left + bottom_x_right)/2

	xm_per_pix = 2.05/490 # meters per pixel in x dimension
	vehicle_offset *= xm_per_pix

	img5 = final_viz(undist, left_fit, right_fit, m_inv, left_curve, right_curve, vehicle_offset)
	# Perspective transform
	img, binary_unwarped, m, m_inv = perspective_transform(img)
	plt.imshow(img, cmap='gray', vmin=0, vmax=1)
	plt.savefig('example_images/warped_' + out_image_file)

	# Polynomial fit
	ret = line_fit(img)
	left_fit = ret['left_fit']
	right_fit = ret['right_fit']
	nonzerox = ret['nonzerox']
	nonzeroy = ret['nonzeroy']
	left_lane_inds = ret['left_lane_inds']
	right_lane_inds = ret['right_lane_inds']
	save_file = 'example_images/polyfit_' + out_image_file
	viz2(img, ret, save_file=save_file)

	# Do full annotation on original image
	# Code is the same as in 'line_fit_video.py'
	orig = mpimg.imread('test_images/' + image_file)
	undist = cv2.undistort(orig, mtx, dist, None, mtx)
	left_curve, right_curve = calc_curve(left_lane_inds, right_lane_inds, nonzerox, nonzeroy)

	bottom_y = undist.shape[0] - 1
	bottom_x_left = left_fit[0]*(bottom_y**2) + left_fit[1]*bottom_y + left_fit[2]
	bottom_x_right = right_fit[0]*(bottom_y**2) + right_fit[1]*bottom_y + right_fit[2]
	vehicle_offset = undist.shape[1]/2 - (bottom_x_left + bottom_x_right)/2

	xm_per_pix = 3.7/700 # meters per pixel in x dimension
	vehicle_offset *= xm_per_pix