예제 #1
0
    def _detect_lane(self, image, left_fit_prev=None, right_fit_prev=None):
        # Undistort image
        mtx, dist = self.undis_param
        undist = cv2.undistort(image, mtx, dist, None, mtx)

        # Apply threshold
        thresh = Thresholds(undist, self.debug)
        undist_binary = thresh.apply_thresholds(ksize=self.ksize,
                                                s_thresh=self.s_thresh,
                                                sx_thresh=self.sx_thresh,
                                                sy_thresh=self.sy_thresh,
                                                m_thresh=self.m_thresh,
                                                d_thresh=self.d_thresh)
        # Warp binary image
        transform = PerspectiveTransform(undist_binary, image, self.debug)
        binary_warped = transform.warp()

        # Find left and right lane markers polynomial
        polyfit = FitPolynomial(binary_warped, self.debug)
        left_fit, right_fit = polyfit.fit_polynomial(left_fit_prev,
                                                     right_fit_prev)

        # Warp image with lanes back
        out_image = transform.warp_back(undist, polyfit)

        # Curvature and offset
        curvature = polyfit.curvature()
        offset = polyfit.offset()

        return out_image, left_fit, right_fit, curvature, offset
 def __init__(self, src, dst, n_images=1, calibration=None, line_segments=LINE_SEGMENTS, offset=0):
     self.n_images = n_images
     self.camera_calibration = calibration
     self.line_segments = line_segments
     self.image_offset = offset
     self.left_line = None
     self.right_line = None
     self.center_poly = None
     self.curvature = 0.0
     self.offset = 0.0
     self.perspective = PerspectiveTransform(src, dst)
     self.distances = []
예제 #3
0
    def __init__(self, window_width=35, window_height=120, margin=40):
        # Load previously calibration camera calibraton parameters.
        # If camera is not calibrated, look at the calibration.py for howto do it.
        self.mtx, self.dist = load_calibration_matrix(
            'camera_cal/dist_pickle.p')

        self.window_width = window_width
        self.window_height = window_height
        self.margin = margin

        self.perspective = PerspectiveTransform(debug=True)
        self.tracker = LaneTracker(self.window_width, self.window_height,
                                   self.margin, 30 / 720, 3.7 / 700)
예제 #4
0
    def pipeline(img, lanes_fit, camera_matrix, dist_coef):
        # debug flag
        is_debug_enabled = True

        # checkbox dimensions for calibration
        nx, ny, channels = 9, 6, 3

        # calibrate camera and undistort the image
        undistorted_image = PreProcessing.get_undistorted_image(
            nx, ny, img, camera_matrix, dist_coef)

        # get the color and gradient threshold image
        binary_image = PreProcessing.get_binary_image(undistorted_image)

        # get source and destination points
        src, dst = PerspectiveTransform.get_perspective_points(img)

        # get image with source and destination points drawn
        img_src, img_dst = PerspectiveTransform.get_sample_wrapped_images(
            img, src, dst)

        # perspective transform to bird eye view
        warped_image = PerspectiveTransform.get_wrapped_image(
            binary_image, src, dst)

        # find the lanes lines and polynomial fit
        if len(lanes_fit) == 0:
            lane_lines, lanes_fit, left_xy, right_xy = LanesFitting.get_lanes_fit(
                warped_image)
        else:
            lane_lines, lanes_fit, left_xy, right_xy = LanesFitting.update_lanes_fit(
                warped_image, lanes_fit)

        # find the radius of curvature
        radius = Metrics.get_curvature_radius(lane_lines, left_xy, right_xy)

        # find the car distance from center lane
        center_distance, lane_width = Metrics.get_distance_from_center(
            lane_lines, lanes_fit)

        # unwrap the image
        resultant_img = PerspectiveTransform.get_unwrapped_image(
            undistorted_image, warped_image, src, dst, lanes_fit)

        # visualize the pipeline
        if is_debug_enabled is True:
            resultant_img = Visualization.visualize_pipeline(
                resultant_img, img_dst, binary_image, lane_lines, radius,
                center_distance, lane_width)

        return lanes_fit, resultant_img
예제 #5
0
    def start(self):
        ap = argparse.ArgumentParser()
        ap.add_argument("-v", "--video", help="path to the video file")
        ap.add_argument("-a",
                        "--min-area",
                        type=int,
                        default=400,
                        help="minimum area size")
        args = vars(ap.parse_args())

        # print(args["video"])
        # print(self.path)
        # self.vs = cv2.VideoCapture(self.path)

        firstFrame = None
        # Flag for cropping image. True if ROI selected
        backboard_found = False
        five_frame_processed = 0
        frame_buffer = []
        object_counter = 0
        play_count = 0
        # destination points for homography estimation based on homo_dst.jpg
        # dst_img = cv2.imread("homo_dst.jpg")
        dst_points = np.array([[206, 4], [205, 227], [394, 227], [394, 4]])

        fgbg = cv2.createBackgroundSubtractorMOG2()
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7))
        poly_constructor = PolyFit()
        operations = IOOperations()

        while True:
            # grab the current frame
            self.frame = self.vs.read()
            self.frame = self.frame[1]

            if self.frame is None:
                break

            # resize the frame, convert it to grayscale
            self.frame = imutils.resize(self.frame, width=400)
            self.frame = cv2.rotate(self.frame, cv2.ROTATE_90_COUNTERCLOCKWISE)
            # Appends the current frame tho the buffer to use after
            frame_object = FrameObject(self.frame, object_counter)
            object_counter = object_counter + 1
            frame_buffer.append(frame_object)
            coef_x = []
            coef_y = []
            src_shot_location = np.array([])

            # Crop image 280 130 90 90
            frame_copy = self.frame.copy()
            # r = [280, 130, 90, 90]
            imCrop = frame_copy[int(self.r[1]):int(self.r[1] + self.r[3]),
                                int(self.r[0]):int(self.r[0] + self.r[2])]
            # imCrop = frame_copy[int(130):int(130 + 90), int(280):int(280 + 90)]

            # if the frame could not be grabbed, then we have reached the end
            # of the video
            if self.frame is None or imCrop is None:
                break

            # After finding movement inside the ROI, programs skips the 5 frames
            # to prevent from tracking same shot again
            if five_frame_processed == 6:
                five_frame_processed = 0

            if play_count == int(self.play_border):
                break

            # resize the frame, convert it to grayscale
            gray = cv2.cvtColor(imCrop, cv2.COLOR_BGR2GRAY)

            # if the first frame is None, initialize it
            if firstFrame is None:
                firstFrame = gray
                continue

            fgmask = fgbg.apply(gray)
            closing = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel)
            opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, kernel)

            # dilate the opening image to fill in holes, then find contours
            # on thresholded image
            thresh = cv2.dilate(opening, kernel, iterations=2)
            cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)
            cnts = cnts[0] if imutils.is_cv2() else cnts[1]

            # loop over the contours
            for c in cnts:
                # if the contour is too small, ignore it
                if cv2.contourArea(c) < args["min_area"]:
                    continue

                # compute the bounding box for the contour, find ball with blob detection, draw it on the frame
                (x, y, w, h) = cv2.boundingRect(c)
                points_of_ball = [
                    int(x) + int(self.r[0]),
                    int(y) + int(self.r[1]), w, h
                ]
                roi = imCrop[y:(y + h), x:(x + w)]
                key_points = DetectBall.detectBall(roi)
                if len(key_points) > 0:
                    cv2.rectangle(imCrop, (x, y), (x + w, y + h), (0, 255, 0),
                                  2)

                    inner_count = 0
                    if five_frame_processed == 0:
                        search_x = points_of_ball[0]
                        search_y = points_of_ball[1]
                        search_w = points_of_ball[2]
                        search_h = points_of_ball[3]
                        path_base = 'play_' + str(play_count)
                        path_search = 'play_' + str(
                            play_count) + '/search_' + str(play_count)
                        path_thresh = 'play_' + str(
                            play_count) + '/search_thresh_' + str(play_count)
                        path_coefs = 'play_' + str(
                            play_count) + '/coefs' + str(play_count)
                        # os.makedirs(path_base)
                        # os.makedirs(path_search)
                        # os.makedirs(path_thresh)
                        # Coefficients for the search areas in ball tracking
                        point_coefficient = 0.8
                        length_coefficient = 1.3
                        # Points that pass as an argument to the model and these will limit the detection area
                        yolo_x = 0
                        yolo_y = 0
                        yolo_w = 0
                        yolo_h = 0

                        for reverse_play in reversed(frame_buffer):
                            if inner_count == 35:
                                break

                            if inner_count > 31:
                                length_coefficient = 2

                            search_frame = reverse_play.getFrame()[
                                int(search_y * point_coefficient):(
                                    int(search_y +
                                        search_h * length_coefficient)),
                                int(search_x * point_coefficient):(
                                    int(search_x +
                                        search_w * length_coefficient))]

                            # cv2.imwrite(os.path.join(path_search, 'search_' + str(inner_count) + '.jpg'), search_frame)

                            reverse_gray = cv2.cvtColor(
                                reverse_play.getFrame(), cv2.COLOR_BGR2GRAY)
                            reverse_fgmask = fgbg.apply(reverse_gray)

                            reverse_closing = cv2.morphologyEx(
                                reverse_fgmask, cv2.MORPH_CLOSE, kernel)
                            reverse_opening = cv2.morphologyEx(
                                reverse_closing, cv2.MORPH_OPEN, kernel)

                            reverse_thresh = cv2.dilate(reverse_opening,
                                                        kernel,
                                                        iterations=2)

                            search_thresh = reverse_thresh[
                                int(search_y * point_coefficient):(
                                    int(search_y +
                                        search_h * length_coefficient)),
                                int(search_x * point_coefficient):(
                                    int(search_x +
                                        search_w * length_coefficient))]
                            # cv2.imwrite(os.path.join(path_thresh, 'search_thresh_' + str(inner_count) + '.jpg'), search_thresh)

                            reverse_cnts = cv2.findContours(
                                reverse_thresh.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
                            reverse_cnts = reverse_cnts[0] if imutils.is_cv2(
                            ) else reverse_cnts[1]

                            for r_c in reverse_cnts:
                                # if the contour is too small, ignore it
                                if cv2.contourArea(r_c) < args["min_area"]:
                                    continue

                                # compute the bounding box for the contour, find ball with blob detection, draw it on the frame
                                (x, y, w, h) = cv2.boundingRect(r_c)

                                r_roi = reverse_play.getFrame()[y:(y + h),
                                                                x:(x + w)]
                                r_roi_temp = reverse_play.getFrame()[y + 10:(
                                    y + h), x + 10:(x + w)]
                                r_key_points = DetectBall.detectBall(r_roi)
                                r_key_points_tmp = DetectBall.detectBall(
                                    r_roi_temp)
                                if len(r_key_points) > 0:
                                    if int(search_x *
                                           point_coefficient) < x < int(
                                               search_x + search_w *
                                               length_coefficient) and int(
                                                   search_y * point_coefficient
                                               ) < y < int(search_y +
                                                           search_h *
                                                           length_coefficient):
                                        if inner_count != 0:
                                            if y + h > int(search_y +
                                                           search_h *
                                                           length_coefficient):
                                                # print("first", inner_count)
                                                cv2.rectangle(
                                                    self.frame, (x, y),
                                                    (x + w, y + h - search_h),
                                                    (0, 255, 0), 2)
                                                yolo_x = x
                                                yolo_y = y
                                                yolo_w = w
                                                yolo_h = h
                                                if (x + w) / 2 != 0 and (
                                                        y + h -
                                                        search_h) / 2 != 0:
                                                    if 24 < inner_count < 31:
                                                        coef_x.append(
                                                            (x + w / 2))
                                                        coef_y.append(
                                                            (y + h + 60))
                                                    else:
                                                        coef_x.append(
                                                            (x + w / 2))
                                                        coef_y.append(
                                                            (y + h / 2 -
                                                             search_h))
                                            else:
                                                # print("second", inner_count)
                                                cv2.rectangle(
                                                    self.frame, (x, y),
                                                    (x + w, y + h),
                                                    (0, 255, 0), 2)
                                                if (x + w) / 2 != 0 and (
                                                        y + h) / 2 != 0:
                                                    coef_x.append((x + w / 2))
                                                    coef_y.append((y + h / 2))

                                        # cv2.imwrite(os.path.join(path_base, "detected_" + str(play_count)) + ".jpg", self.frame)
                                        if x == 0 or y == 0:
                                            continue
                                        search_x = x
                                        search_y = y
                                        search_w = w
                                        search_h = h
                            inner_count = inner_count + 1

                        # with open(path_coefs + '_x.txt', 'w') as f:
                        #     for index, item in enumerate(coef_x):
                        #         if index + 1 == len(coef_x):
                        #             f.write("%s" % item)
                        #         else:
                        #             f.write("%s," % item)

                        # with open(path_coefs + '_y.txt', 'w') as f:
                        #     for index, item in enumerate(coef_y):
                        #         if index + 1 == len(coef_y):
                        #             f.write("%s" % item)
                        #         else:
                        #             f.write("%s," % item)

                        np_coefs_x = np.array(coef_x)
                        np_coefs_y = np.array(coef_y)
                        try:
                            # self.frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
                            src_shot_location = pd.detect(
                                self.frame, yolo_x, yolo_y, yolo_w, yolo_h)
                            # self.frame = cv2.cvtColor(self.frame, cv2.COLOR_GRAY2BGR)
                            # cv2.imwrite(os.path.join(path_base, "yolo_image" + str(play_count)) + ".jpg", self.frame)
                            cv2.namedWindow("Shot #" + str(play_count + 1))
                            cv2.moveWindow("Shot #" + str(play_count + 1), 700,
                                           0)
                            cv2.imshow("Shot #" + str(play_count + 1),
                                       self.frame)
                            cv2.waitKey(20)
                            print("Shot location in real world: ",
                                  src_shot_location)

                            # warped = warp.four_point_transform(frame, self.src_points)
                            warped, h = pt.apply_homography(
                                self.src_points, dst_points, self.frame,
                                self.dst_img)
                            # cv2.imwrite(os.path.join(path_base, "warped_" + str(play_count)) + ".jpg", warped)

                            t_shot_cart_coor, t_shot_homo_coor = pt.point_to_point_homography(
                                src_shot_location, h)
                            # font = cv2.FONT_HERSHEY_SIMPLEX
                            cv2.circle(
                                self.dst_image_clone,
                                (t_shot_cart_coor[0], t_shot_cart_coor[1]),
                                3, (0, 0, 255),
                                thickness=-1)
                            # cv2.putText(self.dst_image_clone, str(play_count + 1),(t_shot_cart_coor[0], t_shot_cart_coor[1]), font, 0.5,(255,255,255),2,cv2.LINE_AA)
                            # cv2.circle(self.dst_image_clone, (t_shot_cart_coor[0], t_shot_cart_coor[1]), 3, (0, 0, 255), thickness=-1)
                            # cv2.imwrite(os.path.join(path_base, "heatmap") + ".jpg", self.dst_image_clone)
                        except:
                            print("[ERROR] Can't find shot location!")

                        frame_buffer = []
                        coef_x = []
                        coef_y = []
                        cv2.destroyWindow("Shot #" + str(play_count + 1))
                        play_count = play_count + 1
                    five_frame_processed = five_frame_processed + 1
class LaneDetector:
    ARROW_TIP_LENGTH = 0.5
    VERTICAL_OFFSET = 400
    HISTOGRAM_WINDOW = 7
    POLYNOMIAL_COEFFICIENT = 719
    LINE_SEGMENTS = 10

    def __init__(self, src, dst, n_images=1, calibration=None, line_segments=LINE_SEGMENTS, offset=0):
        self.n_images = n_images
        self.camera_calibration = calibration
        self.line_segments = line_segments
        self.image_offset = offset
        self.left_line = None
        self.right_line = None
        self.center_poly = None
        self.curvature = 0.0
        self.offset = 0.0
        self.perspective = PerspectiveTransform(src, dst)
        self.distances = []

    @staticmethod
    def _acceptable_lanes(left, right):
        if len(left[0]) < 3 or len(right[0]) < 3:
            return False
        else:
            new_left = Line(y=left[0], x=left[1])
            new_right = Line(y=right[0], x=right[1])
            return acceptable_lanes(new_left, new_right)

    def _check_lines(self, left_x, left_y, right_x, right_y):
        left_found, right_found = False, False

        if self._acceptable_lanes((left_x, left_y), (right_x, right_y)):
            left_found, right_found = True, True
        elif self.left_line and self.right_line:
            if self._acceptable_lanes((left_x, left_y), (self.left_line.ys, self.left_line.xs)):
                left_found = True
            if self._acceptable_lanes((right_x, right_y), (self.right_line.ys, self.right_line.xs)):
                right_found = True

        return left_found, right_found

    def _draw_info(self, image):
        font = cv2.FONT_HERSHEY_SIMPLEX
        text_curvature = 'Curvature: {}'.format(self.curvature)
        cv2.putText(image, text_curvature, (50, 50), font, 1, (255, 255, 255), 2)
        text_position = '{}m {} of center'.format(abs(self.offset), 'left' if self.offset < 0 else 'right')
        cv2.putText(image, text_position, (50, 100), font, 1, (255, 255, 255), 2)

    def _draw_overlay(self, image):
        overlay = np.zeros([*image.shape])
        mask = np.zeros([image.shape[0], image.shape[1]])
        lane_area = calculate_lane_area((self.left_line, self.right_line), image.shape[0], 20)
        mask = cv2.fillPoly(mask, np.int32([lane_area]), 1)
        mask = self.perspective.inverse_transform(mask)
        overlay[mask == 1] = (255, 128, 0)
        selection = (overlay != 0)
        image[selection] = image[selection] * 0.3 + overlay[selection] * 0.7
        mask[:] = 0
        mask = draw_polynomial(mask, self.center_poly, 20, 255, 5, True, self.ARROW_TIP_LENGTH)
        mask = self.perspective.inverse_transform(mask)
        image[mask == 255] = (255, 75, 2)
        mask[:] = 0
        mask = draw_polynomial(mask, self.left_line.best_fit_poly, 5, 255)
        mask = draw_polynomial(mask, self.right_line.best_fit_poly, 5, 255)
        mask = self.perspective.inverse_transform(mask)
        image[mask == 255] = (255, 200, 2)

    def _process_history(self, image, left_found, right_found, left_x, left_y, right_x, right_y):
        if self.left_line and self.right_line:
            left_x, left_y = lane_detection_history(image, self.left_line.best_fit_poly, self.line_segments)
            right_x, right_y = lane_detection_history(image, self.right_line.best_fit_poly, self.line_segments)

            left_found, right_found = self._check_lines(left_x, left_y, right_x, right_y)
        return left_found, right_found, left_x, left_y, right_x, right_y

    def _process_histogram(self, image, left_found, right_found, left_x, left_y, right_x, right_y):
        if not left_found:
            left_x, left_y = lane_detection_histogram(image, self.line_segments,
                                                      (self.image_offset, image.shape[1] // 2),
                                                      h_window=self.HISTOGRAM_WINDOW)
            left_x, left_y = remove_outliers(left_x, left_y)
        if not right_found:
            right_x, right_y = lane_detection_histogram(image, self.line_segments,
                                                        (image.shape[1] // 2, image.shape[1] - self.image_offset),
                                                        h_window=self.HISTOGRAM_WINDOW)
            right_x, right_y = remove_outliers(right_x, right_y)

        if not left_found or not right_found:
            left_found, right_found = self._check_lines(left_x, left_y, right_x, right_y)

        return left_found, right_found, left_x, left_y, right_x, right_y

    def _draw(self, image, original_image):
        if self.left_line and self.right_line:
            self.distances.append(self.left_line.get_best_fit_distance(self.right_line))
            self.center_poly = (self.left_line.best_fit_poly + self.right_line.best_fit_poly) / 2
            self.curvature = curvature(self.center_poly)
            self.offset = (image.shape[1] / 2 - self.center_poly(self.POLYNOMIAL_COEFFICIENT)) * 3.7 / 700
            self._draw_overlay(original_image)
            self._draw_info(original_image)

    def _update_lane_left(self, found, x, y):
        if found:
            if self.left_line:
                self.left_line.update(y=x, x=y)
            else:
                self.left_line = Line(self.n_images, y, x)

    def _update_lane_right(self, found, x, y):
        if found:
            if self.right_line:
                self.right_line.update(y=x, x=y)
            else:
                self.right_line = Line(self.n_images, y, x)

    def process_image(self, image):
        original_image = np.copy(image)

        image = self.camera_calibration.undistort(image)
        image = lane_mask(image, self.VERTICAL_OFFSET)
        image = self.perspective.transform(image)

        left_found = right_found = False
        left_x = left_y = right_x = right_y = []

        left_found, right_found, left_x, left_y, right_x, right_y = \
            self._process_history(image, left_found, right_found, left_x, left_y, right_x, right_y)
        left_found, right_found, left_x, left_y, right_x, right_y = \
            self._process_histogram(image, left_found, right_found, left_x, left_y, right_x, right_y)

        self._update_lane_left(left_found, left_x, left_y)
        self._update_lane_right(right_found, right_x, right_y)
        self._draw(image, original_image)

        return original_image
        './images/' + f for f in os.listdir('./images')
        if os.path.isfile(os.path.join('./images', f))
    ]
    # Selecting random file for testing
    file_img = example_files[np.random.randint(0, len(example_files))]
    # file_img = './images/806123698_321554.jpg'  # Good file for testing
    img = imageio.imread(file_img)
    plt.figure(figsize=(10, 10))
    plt.imshow(img)
    plt.show()

    # Finding corners from input image
    corner_points = CornerDetector(img).find_corners4().astype(np.float32)
    corner_points[:, [0, 1]] = corner_points[:, [1, 0]]

    # Computing the perspective transform
    img_p = PerspectiveTransform(img, corner_points).four_point_transform()

    # Finding text areas
    img_cv = cv2.cvtColor(img_p, cv2.COLOR_RGB2BGR)
    # Testing with different structuring element sizes
    sizes = [(17, 3), (30, 10), (5, 5), (9, 3)]
    for size in sizes:
        strs, bound_rects, img_bboxes = TextDetector(img_cv,
                                                     size).recognize_text()
        plt.figure(figsize=(10, 10))
        plt.imshow(cv2.cvtColor(img_bboxes, cv2.COLOR_BGR2RGB))
        plt.show()
        print(size)
        print(*strs, sep='\n')
예제 #8
0
class LanePipeline():
    def __init__(self, window_width=35, window_height=120, margin=40):
        # Load previously calibration camera calibraton parameters.
        # If camera is not calibrated, look at the calibration.py for howto do it.
        self.mtx, self.dist = load_calibration_matrix(
            'camera_cal/dist_pickle.p')

        self.window_width = window_width
        self.window_height = window_height
        self.margin = margin

        self.perspective = PerspectiveTransform(debug=True)
        self.tracker = LaneTracker(self.window_width, self.window_height,
                                   self.margin, 30 / 720, 3.7 / 700)

    def window_mask(self, img, centeroid, level):
        output = np.zeros_like(img)
        output[int(img.shape[0] -
                   (level + 1) * self.window_height):int(img.shape[0] - level *
                                                         self.window_height),
               max(0, int(centeroid - self.window_width)
                   ):min(int(centeroid + self.window_width), img.shape[1])] = 1

        return output

    def process(self, img):
        mtx = self.mtx
        dist = self.dist
        window_width = self.window_width
        window_height = self.window_height
        margin = self.margin

        # apply camera distortion
        img = cv2.undistort(img, mtx, dist, None, mtx)

        M, Minv, binary_warped = self.perspective.get_warped_image(img)

        binary_warped = binary_warped / 255

        window_centroids = self.tracker.sliding_window_centroids(binary_warped)

        l_points = np.zeros_like(binary_warped)
        r_points = np.zeros_like(binary_warped)

        rightx = []
        leftx = []

        for level in range(0, len(window_centroids)):

            leftx.append(window_centroids[level][0])
            rightx.append(window_centroids[level][1])

            l_mask = self.window_mask(binary_warped,
                                      window_centroids[level][0], level)
            r_mask = self.window_mask(binary_warped,
                                      window_centroids[level][1], level)

            l_points[(l_points == 255) | ((l_mask == 1))] = 255
            r_points[(r_points == 255) | ((r_mask == 1))] = 255

        template = np.array(
            cv2.merge((l_points, r_points, np.zeros_like(l_points))), np.uint8)
        warpage = np.array(
            cv2.merge((binary_warped, binary_warped, binary_warped)), np.uint8)
        warpage = cv2.addWeighted(warpage, 1, template, 0.5, 0.0)

        yvals = np.arange(100, binary_warped.shape[0])

        # y value of the window centroid
        y_centers = np.arange(binary_warped.shape[0] - (window_height / 2), 0,
                              -window_height)

        # Compute polynomial fit
        left_fit = np.polyfit(y_centers, leftx, 2)
        left_fitx = left_fit[0] * yvals * yvals + left_fit[
            1] * yvals + left_fit[2]
        left_fitx = np.array(left_fitx, np.int32)

        right_fit = np.polyfit(y_centers, rightx, 2)
        right_fitx = right_fit[0] * yvals * yvals + right_fit[
            1] * yvals + right_fit[2]
        right_fitx = np.array(right_fitx, np.int32)

        inner_lane = np.array(
            list(
                zip(
                    np.concatenate((left_fitx + window_width / 2,
                                    right_fitx[::-1] + window_width / 2),
                                   axis=0),
                    np.concatenate((yvals, yvals[::-1]), axis=0))), np.int32)

        road = np.zeros_like(img)

        cv2.fillPoly(road, [inner_lane], color=[0, 255, 0])

        road_warped = cv2.warpPerspective(road,
                                          Minv, (img.shape[1], img.shape[0]),
                                          flags=cv2.INTER_LINEAR)
        # road_warped_bkg = cv2.warpPerspective(road_bkg, Minv, (img.shape[1], img.shape[0]), flags=cv2.INTER_LINEAR)

        # base = cv2.addWeighted(img, 1.0, road_warped_bkg, -1.0, 0.0)
        result = cv2.addWeighted(img, 1.0, road_warped, 0.4, 0.0)

        ym_per_pix = self.tracker.ym_per_pix
        xm_per_pix = self.tracker.xm_per_pix

        curve_fit_cr = np.polyfit(
            np.array(y_centers, np.float32) * ym_per_pix,
            np.array(leftx, np.float32) * xm_per_pix, 2)
        curverad = (
            (1 + (2 * curve_fit_cr[0] * yvals[-1] * ym_per_pix +
                  curve_fit_cr[1])**2)**1.5) / np.absolute(2 * curve_fit_cr[0])

        camera_center = (left_fitx[-1] + right_fitx[-1]) / 2
        center_diff = (camera_center - binary_warped.shape[1] / 2) * xm_per_pix
        side_pos = 'left'
        if center_diff <= 0:
            side_pos = 'right'

        cv2.putText(result,
                    'Radius of Curvature = ' + str(round(curverad)) + '(m)',
                    (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
        cv2.putText(
            result, 'Vehicle is ' + str(abs(round(center_diff, 3))) + 'm ' +
            side_pos + ' of center', (50, 100), cv2.FONT_HERSHEY_SIMPLEX, 1,
            (255, 255, 255), 2)

        # Search area for next frame.
        from viz import image_mosaic
        lane_markers, canny, lanes = self.perspective.get_debug_imgs()
        result = image_mosaic(result, canny, lanes, lane_markers,
                              binary_warped, warpage)

        return result
예제 #9
0
파일: main.py 프로젝트: Leonz18/demo
__author__ = 'z84105425'
# -*- coding:utf-8 -*-

import cv2
from threshold import Threshold
from perspective_transform import PerspectiveTransform

if __name__ == '__main__':
    image_path = "bios.jpg"
    img = cv2.imread(image_path)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # 获取阈值
    th = Threshold(gray.shape[1], gray.shape[0], 0.83, 0.03)
    thresh_1 = th.get_thresh_1(gray)
    thresh_2 = th.get_thresh_2(gray)

    # 二值化
    ret1, binary1 = cv2.threshold(gray, thresh_1, 255, cv2.THRESH_BINARY)
    ret2, binary2 = cv2.threshold(gray, thresh_2, 255, cv2.THRESH_BINARY)
    cv2.namedWindow("binary1", 0)
    cv2.namedWindow("binary2", 0)
    cv2.imshow("binary1", binary1)
    cv2.imshow("binary2", binary2)
    cv2.imwrite('binary1.jpg', binary1)
    cv2.waitKey(0)

    # 透视变换
    pt = PerspectiveTransform(10000)
    pt.perspective_transform(img, binary1)