def draw_lanes(self, left_fit, right_fit):

        undistorted_image = pers.undist(
            self.img,
            calib_filename=
            "C:/Users/Chris/SDC_projects/CarND-Advanced-Lane-Lines-P4/saved_calibration.p"
        )
        y_vals = np.arange(0, self.image_height)

        left_fitx = self.get_x_for_line(left_fit, y_vals)
        right_fitx = self.get_x_for_line(right_fit, y_vals)

        # Create an image to draw the lines on
        color_warp = np.zeros_like(self.img).astype(np.uint8)

        # Recast the x and y points into usable format for cv2.fillPoly()
        pts_left = np.array([np.transpose(np.vstack([left_fitx, y_vals]))])
        pts_right = np.array(
            [np.flipud(np.transpose(np.vstack([right_fitx, y_vals])))])
        pts = np.hstack((pts_left, pts_right))

        # Draw the lane onto the warped blank image
        cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))

        # Warp the blank back to original image space using inverse perspective matrix (Minv)
        original_image, newwarp, perspective_Ms = pers.warp_image(
            color_warp, True)
        # Combine the result with the original image
        result = cv2.addWeighted(undistorted_image, 1, newwarp, .3, 0)

        # Obtain distance from center and curvature radius

        #        print("test321", self.curverad, self.distance_from_center)
        # Print curvature and center offset on an image
        stats_text = 'Curvature: {0:.0f}m, Center offset: {1:.1f}m'.format(self.curverad, \
                                                                    self.distance_from_center, \
                                                                    )
        stats_text_2 = ('Current  lanes score: ' +
                        str(round(self.last_lane_score, 2)) +
                        ' recent lanes score: ' +
                        str(round(np.nanmedian(self.recent_lane_scores), 2)))

        print(stats_text)
        print(stats_text_2)
        text_offset = 670
        text_shift = 80
        font = cv2.FONT_HERSHEY_SIMPLEX
        #        cv2.putText(result, stats_text, \
        #                    (text_offset + text_shift, self.image_height - text_offset + text_shift), \
        #                    font, 1, (0, 0, 0), 2, cv2.LINE_AA)
        cv2.putText(result, stats_text, (text_shift, self.image_height - text_offset), \
                    font, 0.8, (255, 0, 255), 2, cv2.LINE_AA)

        text_offset = 640
        text_shift = 80

        cv2.putText(result, stats_text_2, (text_shift, self.image_height - text_offset), \
                    font, 0.8, (255, 255, 255), 2, cv2.LINE_AA)

        return result
Example #2
0
    def calibration(self,
                    img,
                    sideLengthSquareMM,
                    calibrationType: str = 'area'):
        """calibrate setup with a given calibrationtype.
        Check the function for all calibration types."""

        if calibrationType == 'area':
            validateImg = self._scaling_trough_area(img,
                                                    sideLengthSquareMM,
                                                    imgIsBW=False)
            validateImg = cv2.cvtColor(validateImg, cv2.COLOR_GRAY2BGR)
            return validateImg

        elif calibrationType == 'warp':
            self.cornerPoints = fl.find_corners(img)
            validateImg = fl.draw_corners(img, self.cornerPoints)

            warpedImg = perspective.warp_image(img, self.cornerPoints,
                                               cfg.warpscale)

            self._scaling_trough_area(warpedImg,
                                      sideLengthSquareMM,
                                      imgIsBW=False)

            return validateImg

        else:
            assert False, "WARNING! No calibration done, unknown calibration type!"
Example #3
0
def test_main():
    # test_image_path = get_input_path("test_images").joinpath("test1.jpg")
    # test_images = get_input_path("test_images").glob("*.jpg")
    from pathlib import Path

    test_images = Path("/home/bajic/Pictures/lila/").glob("*.jpg")
    lane_detector = LaneDetector()

    for i, test_image_path in enumerate(test_images):
        fig, ax = plt.subplots(3, 2)
        image = mpimg.imread(str(test_image_path))
        ax[0, 0].imshow(image)

        camera_matrix, distortion_coeff = load_camera_precalculated_coefficients()
        undistort_image = correct_camera_distortion(image, camera_matrix, distortion_coeff)
        ax[0, 1].imshow(undistort_image)

        binary_image = get_binary_image(undistort_image)
        ax[1, 0].imshow(binary_image, cmap="gray")

        warped_image, warp_matrix = warp_image(binary_image)
        ax[1, 1].imshow(warped_image, cmap="gray")

        leftx, lefty, rightx, righty, lane_img = lane_detector.sliding_window(warped_image)
        left_fitx, right_fitx, ploty = lane_detector.get_polynominals(lane_img, leftx, lefty, rightx, righty)
        ax[2, 0].imshow(lane_img)

        dewarped_lane_image = draw_lane(warped_image, left_fitx, right_fitx, ploty, warp_matrix)
        merged_img = weighted_img(dewarped_lane_image, image, β=0.8)
        ax[2, 1].imshow(merged_img)

        plt.show()
        break
Example #4
0
def process_image(image, lane_detector, camera_matrix, distortion_coeff):
    undistort_image = correct_camera_distortion(image, camera_matrix, distortion_coeff)

    binary_image = get_binary_image(undistort_image)

    warped_image, warp_matrix = warp_image(binary_image)

    left_fitx, right_fitx, ploty, out_img = lane_detector.get_lanes(warped_image)
    dewarped_lane_image = draw_lane(warped_image, left_fitx, right_fitx, ploty, warp_matrix)

    # dewarped_lane_image = reverse_warp(out_img, warp_matrix)
    merged_img = weighted_img(dewarped_lane_image, image, β=0.8)

    road_and_car_parameters = RoadAndCar(left_fitx, right_fitx, ploty)
    left_curverad, right_curverad = road_and_car_parameters.measure_curvature_real()

    global curvature_radiuses
    curvature_radius = round(np.mean([left_curverad, right_curverad]), 0)
    if len(curvature_radiuses) > 10:
        curvature_radius_mean = np.mean(curvature_radiuses)
        if curvature_radius < curvature_radius_mean * 3:
            curvature_radiuses.append(curvature_radius)
            curvature_radiuses.pop(0)
        else:
            curvature_radius = round(curvature_radius_mean, 0)
        curvature_radius = round(np.mean(curvature_radiuses), 0)
    else:
        curvature_radiuses.append(curvature_radius)

    curvature_text = f"Radius of Curvature = {curvature_radius} m"
    cat_distance_text = road_and_car_parameters.get_car_distance_text(merged_img)

    cv2.putText(merged_img, curvature_text, (10, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
    cv2.putText(merged_img, cat_distance_text, (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
    return merged_img
Example #5
0
    def preprocess(self, img):
        """Transforms the image according to the given options"""

        #cv2.imshow("before preprocess", img)

        if self.cam_matrix is not None and self.dist_param is not None:
            # remove fisheye effect from image
            img = undistort(self.cam_matrix, self.dist_param, img)

        if self.cornerPoints is not None:  # remove perspective warp
            img = perspective.warp_image(img, self.cornerPoints, cfg.warpscale)

        #cv2.imshow("after preprocess", img)

        return img
Example #6
0
def transform_image(image, show_process=False):
    camera_matrix, distortion_coeff = load_camera_precalculated_coefficients()
    undistort_image = correct_camera_distortion(image, camera_matrix,
                                                distortion_coeff)

    binary_image = get_binary_image(undistort_image)
    binary_warped, M = warp_image(binary_image)

    if show_process:
        fig, ax = plt.subplots(3)
        ax[0].imshow(undistort_image)
        ax[1].imshow(binary_image, cmap="gray")
        ax[2].imshow(binary_warped, cmap="gray")
        plt.show()

    return M, binary_warped
    def slide_windows(self, img):

        print("#### searching for lane points within ", self.nwindows,
              " sliding windows ####")

        original_image, binary_warped, perspective_M, color_binary = th.warp_and_threshholding(
            img)

        # Assuming you have created a warped binary image called "binary_warped"
        # Take a histogram of the bottom half of the image
        histogram = np.sum(binary_warped[binary_warped.shape[0] // 2:, :],
                           axis=0)
        # Create an output image to draw on and  visualize the result
        out_img = np.dstack(
            (binary_warped, binary_warped, binary_warped)) * 255

        # Find the peak of the left and right halves of the histogram
        # These will be the starting point for the left and right lines
        midpoint = np.int(histogram.shape[0] / 2)
        leftx_base = np.argmax(histogram[:midpoint])
        rightx_base = np.argmax(histogram[midpoint:]) + midpoint

        # Set height of windows
        window_height = np.int(binary_warped.shape[0] / self.nwindows)
        # Identify the x and y positions of all nonzero pixels in the image
        nonzero = binary_warped.nonzero()
        nonzeroy = np.array(nonzero[0])
        nonzerox = np.array(nonzero[1])
        # Current positions to be updated for each window
        leftx_current = leftx_base
        rightx_current = rightx_base

        # Create empty lists to receive left and right lane pixel indices
        left_lane_inds = []
        right_lane_inds = []

        # Step through the windows one by one
        for window in range(self.nwindows):

            # Identify window boundaries in x and y (and right and left)
            win_y_low = binary_warped.shape[0] - (window + 1) * window_height
            win_y_high = binary_warped.shape[0] - window * window_height
            win_xleft_low = leftx_current - self.margin
            win_xleft_high = leftx_current + self.margin
            win_xright_low = rightx_current - self.margin
            win_xright_high = rightx_current + self.margin
            # Draw the windows on the visualization image
            cv2.rectangle(out_img, (win_xleft_low, win_y_low),
                          (win_xleft_high, win_y_high), (0, 255, 0), 3)
            cv2.rectangle(out_img, (win_xright_low, win_y_low),
                          (win_xright_high, win_y_high), (0, 255, 0), 3)

            good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high)
                              & (nonzerox >= win_xleft_low) &
                              (nonzerox < win_xleft_high)).nonzero()[0]

            good_right_inds = ((nonzeroy >= win_y_low) &
                               (nonzeroy < win_y_high) &
                               (nonzerox >= win_xright_low) &
                               (nonzerox < win_xright_high)).nonzero()[0]
            # Append these indices to the lists
            left_lane_inds.append(good_left_inds)
            right_lane_inds.append(good_right_inds)
            # If you found > minpix pixels, recenter next window on their mean position
            if len(good_left_inds) > self.minpix:
                leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
            if len(good_right_inds) > self.minpix:
                rightx_current = np.int(np.mean(nonzerox[good_right_inds]))

        # Concatenate the arrays of indices
        left_lane_inds = np.concatenate(left_lane_inds)
        right_lane_inds = np.concatenate(right_lane_inds)

        # Extract left and right line pixel positions
        self.leftx = nonzerox[left_lane_inds]
        self.lefty = nonzeroy[left_lane_inds]
        self.rightx = nonzerox[right_lane_inds]
        self.righty = nonzeroy[right_lane_inds]

        # Fit a second order polynomial to each
        if len(self.leftx) > self.minpix:
            left_fit = np.polyfit(self.lefty, self.leftx, 2)
            self.last_fit_left = left_fit
            if len(self.rightx) > self.minpix:
                right_fit = np.polyfit(self.righty, self.rightx, 2)
                self.last_fit_right = right_fit
                self.detected = True
            else:
                print("right fit reqires more points!", len(self.rightx))
                self.detected = False
                right_fit = np.nanmean(self.recent_right_fits, axis=0)
        else:
            print("left fit reqires more points!", len(self.leftx))
            self.detected = False
            left_fit = np.nanmean(self.recent_left_fits, axis=0)

        # Generate x and y values for plotting
        self.ploty = np.linspace(0, binary_warped.shape[0] - 1,
                                 binary_warped.shape[0])
        left_fitx = left_fit[0] * self.ploty**2 + left_fit[
            1] * self.ploty + left_fit[2]
        right_fitx = right_fit[0] * self.ploty**2 + right_fit[
            1] * self.ploty + right_fit[2]

        out_img[self.lefty, self.leftx] = [255, 0, 0]
        out_img[nonzeroy[right_lane_inds],
                nonzerox[right_lane_inds]] = [0, 0, 255]

        original_image, top_down, perspective_Ms = pers.warp_image(
            out_img, True)

        if self.plotting is True:
            plt.figure(0)
            plt.imshow(out_img)
            plt.plot(left_fitx, self.ploty, color='white', linewidth=2)
            plt.plot(right_fitx, self.ploty, color='white', linewidth=2)
            plt.xlim(0, 1280)
            plt.ylim(720, 0)
            plt.figure(1)
            plt.imshow(top_down)
            plt.figure(2)
            plt.imshow(img)

            return out_img
    def scan_rows_near_fit(self, img):
        print(self.ploty.shape, self.leftx.shape)
        print("#### scanning rows close to fit with margin of ", self.margin,
              "####")
        #print ("left_fit", self.left_fit)

        original_image, binary_warped, perspective_M, color_binary = th.warp_and_threshholding(
            img)

        # Assume you now have a new warped binary image
        # from the next frame of video (also called "binary_warped")
        # It's now much easier to find line pixels!
        nonzero = binary_warped.nonzero()
        nonzeroy = np.array(nonzero[0])
        nonzerox = np.array(nonzero[1])

        last_fit_left = self.last_fit_left
        last_fit_right = self.last_fit_right

        left_lane_inds = ((nonzerox >
                           (last_fit_left[0] *
                            (nonzeroy**2) + last_fit_left[1] * nonzeroy +
                            last_fit_left[2] - self.margin)) &
                          (nonzerox <
                           (last_fit_left[0] *
                            (nonzeroy**2) + last_fit_left[1] * nonzeroy +
                            last_fit_left[2] + self.margin)))

        right_lane_inds = ((nonzerox >
                            (last_fit_right[0] *
                             (nonzeroy**2) + last_fit_right[1] * nonzeroy +
                             last_fit_right[2] - self.margin)) &
                           (nonzerox <
                            (last_fit_right[0] *
                             (nonzeroy**2) + last_fit_right[1] * nonzeroy +
                             last_fit_right[2] + self.margin)))

        # Again, extract left and right line pixel positions
        self.leftx = nonzerox[left_lane_inds]
        self.lefty = nonzeroy[left_lane_inds]
        self.rightx = nonzerox[right_lane_inds]
        self.righty = nonzeroy[right_lane_inds]

        # Fit a second order polynomial to each
        if len(self.leftx) > self.minpix:
            left_fit = np.polyfit(self.lefty, self.leftx, 2)
            self.last_fit_left = left_fit
            if len(self.rightx) > self.minpix:
                right_fit = np.polyfit(self.righty, self.rightx, 2)
                self.last_fit_right = right_fit
                self.detected = True
            else:
                print("right fit reqires more points!", len(self.rightx))
                self.detected = False
                right_fit = np.nanmean(self.recent_right_fits, axis=0)
        else:
            print("left fit reqires more points!", len(self.leftx))
            self.detected = False
            left_fit = np.nanmean(self.recent_left_fits, axis=0)

        # Generate x and y values for plotting
        self.ploty = np.linspace(0, binary_warped.shape[0] - 1,
                                 binary_warped.shape[0])
        left_fitx = left_fit[0] * self.ploty**2 + left_fit[
            1] * self.ploty + left_fit[2]
        right_fitx = right_fit[0] * self.ploty**2 + right_fit[
            1] * self.ploty + right_fit[2]

        out_img = np.dstack(
            (binary_warped, binary_warped, binary_warped)) * 255

        out_img[self.lefty, self.leftx] = [255, 0, 0]
        out_img[nonzeroy[right_lane_inds],
                nonzerox[right_lane_inds]] = [0, 0, 255]

        original_image, top_down, perspective_Ms = pers.warp_image(
            out_img, True)

        if self.plotting is True:

            plt.figure(0)
            plt.imshow(out_img)
            plt.plot(left_fitx, self.ploty, color='white', linewidth=2)
            plt.plot(right_fitx, self.ploty, color='white', linewidth=2)
            plt.xlim(0, 1280)
            plt.ylim(720, 0)
            plt.figure(1)
            plt.imshow(top_down)

            # Create an image to draw on and an image to show the selection window
            out_img = np.dstack(
                (binary_warped, binary_warped, binary_warped)) * 255
            window_img = np.zeros_like(out_img)
            fit_img = np.zeros_like(out_img)
            # Color in left and right line pixels
            out_img[nonzeroy[left_lane_inds],
                    nonzerox[left_lane_inds]] = [255, 0, 0]
            out_img[nonzeroy[right_lane_inds],
                    nonzerox[right_lane_inds]] = [0, 0, 255]

            # Generate a polygon to illustrate the search window area
            # And recast the x and y points into usable format for cv2.fillPoly()
            left_line_window1 = np.array([
                np.transpose(np.vstack([left_fitx - self.margin, self.ploty]))
            ])
            left_line_window2 = np.array([
                np.flipud(
                    np.transpose(
                        np.vstack([left_fitx + self.margin, self.ploty])))
            ])
            left_line_pts = np.hstack((left_line_window1, left_line_window2))
            right_line_window1 = np.array([
                np.transpose(np.vstack([right_fitx - self.margin, self.ploty]))
            ])
            right_line_window2 = np.array([
                np.flipud(
                    np.transpose(
                        np.vstack([right_fitx + self.margin, self.ploty])))
            ])
            right_line_pts = np.hstack(
                (right_line_window1, right_line_window2))

            # Draw the lane onto the warped blank image
            cv2.fillPoly(window_img, np.int_([left_line_pts]), (0, 255, 0))
            cv2.fillPoly(window_img, np.int_([right_line_pts]), (0, 255, 0))
            ## draw fit lines
            left_fit_pts_l = np.array([
                np.transpose(
                    np.vstack([left_fitx - self.fit_line_width, self.ploty]))
            ])
            left_fit_pts_r = np.array([
                np.flipud(
                    np.transpose(
                        np.vstack(
                            [left_fitx + self.fit_line_width, self.ploty])))
            ])

            left_fit_pts = np.hstack((left_fit_pts_l, left_fit_pts_r))

            right_fit_pts_l = np.array([
                np.transpose(
                    np.vstack([right_fitx - self.fit_line_width, self.ploty]))
            ])
            right_fit_pts_r = np.array([
                np.flipud(
                    np.transpose(
                        np.vstack(
                            [right_fitx + self.fit_line_width, self.ploty])))
            ])

            right_fit_pts = np.hstack((right_fit_pts_l, right_fit_pts_r))

            cv2.fillPoly(fit_img, np.int_([left_fit_pts]), (255, 255, 255))
            cv2.fillPoly(fit_img, np.int_([right_fit_pts]), (255, 255, 255))

            result = cv2.addWeighted(out_img, 1, window_img, 0.2, 0)
            result = cv2.addWeighted(result, 1, fit_img, 0.3, 0)
            original_image, top_down, perspective_Ms = pers.warp_image(
                result, True)
            plt.figure(2)
            plt.imshow(top_down)

        return out_img