Ejemplo n.º 1
0
        print('Left lane coefficients : ')
        print(lc)
        lfit = np.poly1d(lc)

    if not len(rightx) is 0:
        right_fit = np.polyfit(righty, rightx, 2)
        rc, res, _, _, _ = np.polyfit(righty, rightx, 2, full=True)
        print('Right lane coefficients : ')
        print(rc)
        rfit = np.poly1d(rc)

    out_img = cv2.resize(out_img, (640, 360))
    cv2.imshow('out_img', out_img)

    if not len(leftx) is 0:
        left_fit = left_line.add_fit(left_fit)
    if not len(rightx) is 0:
        right_fit = right_line.add_fit(right_fit)

    if not len(leftx) is 0:
        y_eval = 719
        ym_per_pix = 5 / 720
        xm_per_pix = 0.3 / 1250
        left_curverad = (
            (1 + (2 * left_fit[0] * y_eval * ym_per_pix + left_fit[1])**2)**
            1.5) / np.absolute(2 * left_fit[0])
        right_curverad = (
            (1 + (2 * right_fit[0] * y_eval * ym_per_pix + right_fit[1])**2)**
            1.5) / np.absolute(2 * right_fit[0])

    if not len(leftx) is 0:
Ejemplo n.º 2
0
class FindLaneLine():
    def __init__(self, window_size, input_video, output_video):
        # init Camera
        with open('calibrate_camera.p', 'rb') as f:
            save_dict = pickle.load(f)
        print("Load calibrate_camera pickle -- done")
        self.input_video = input_video
        self.mtx = save_dict['mtx']
        self.dist = save_dict['dist']
        self.window_size = window_size
        self.left_line = Line(n=window_size)
        self.right_line = Line(n=window_size)
        # init detect lane False
        self.detected = False
        self.left_curve, self.right_curve = 0., 0.  # radius of curvature for left and right lanes
        self.left_lane_curve, self.right_lane_curve = None, None  # for calculating curvature

        # Annotated Input Video
        annotated_v = self.load_video(input_video)
        # generate output Video
        annotated_v.write_videofile(output_video, audio=False)

    def load_video(self, input_video):
        """ Given input_file video, save annotated video to output_file """
        video = VideoFileClip(input_video)
        return video.fl_image(self.annotate_frame)

    def annotate_frame(self, frame):
        #global mtx, dist, left_line, right_line, detected
        #global left_curve, right_curve, left_lane_inds, right_lane_inds

        # Undistort, threshold, perspective transform
        undist = cv2.undistort(frame, self.mtx, self.dist, None, self.mtx)
        img, abs_bin, mag_bin, dir_bin, hls_bin = combined_thresh(undist)
        binary_warped, binary_unwarped, m, m_inv = perspective_transform(img)

        # Perform polynomial fit
        if not self.detected:
            # Slow line fit
            ret = line_fit(binary_warped)
            left_fit = ret['left_fit']
            right_fit = ret['right_fit']
            nonzerox = ret['nonzerox']
            nonzeroy = ret['nonzeroy']
            left_lane_inds = ret['left_lane_inds']
            right_lane_inds = ret['right_lane_inds']

            # Get moving average of line fit coefficients
            left_fit = self.left_line.add_fit(left_fit)
            right_fit = self.right_line.add_fit(right_fit)

            # Calculate curvature
            self.left_curve, self.right_curve = calc_curve(left_lane_inds, right_lane_inds, nonzerox, nonzeroy)

            self.detected = True  # slow line fit always detects the line

        else:  # implies detected == True
            # Fast line fit
            left_fit = self.left_line.get_fit()
            right_fit = self.right_line.get_fit()
            ret = tune_fit(binary_warped, left_fit, right_fit)
            left_fit = ret['left_fit']
            right_fit = ret['right_fit']
            nonzerox = ret['nonzerox']
            nonzeroy = ret['nonzeroy']
            left_lane_inds = ret['left_lane_inds']
            right_lane_inds = ret['right_lane_inds']

            # Only make updates if we detected lines in current frame
            if ret is not None:
                left_fit = ret['left_fit']
                right_fit = ret['right_fit']
                nonzerox = ret['nonzerox']
                nonzeroy = ret['nonzeroy']
                left_lane_inds = ret['left_lane_inds']
                right_lane_inds = ret['right_lane_inds']

                left_fit = self.left_line.add_fit(left_fit)
                right_fit = self.right_line.add_fit(right_fit)
                self.left_curve, self.right_curve = calc_curve(left_lane_inds, right_lane_inds, nonzerox, nonzeroy)
            else:
                self.detected = False

        vehicle_offset = calc_vehicle_offset(undist, left_fit, right_fit)

        # Perform final visualization on top of original undistorted image
        result = final_viz(undist, left_fit, right_fit, m_inv, self.left_curve, self.right_curve, vehicle_offset)

        return result
    # wrap imgs
    wrapped = pipeline.wrap(gradient)
    # find left and right pixel poses
    # find left and right polyline
    left_line.count_check()
    right_line.count_check()
    lx, ly, rx, ry = pipeline.find_lines(wrapped)
    if len(rx) > 0 and len(ry) > 0:
        # set was detected
        right_line.was_detected = True
        # add founded points to instanse
        right_line.allx = rx
        right_line.ally = ry
        # add poly 
        l_fit, right_line.polx, right_line.ploty = pipeline.find_poly(wrapped.shape,right_line.allx, right_line.ally)
        right_line.add_fit(l_fit)
    # if find points of left line
    if len(lx) > 0 and len(ly) > 0:
        # set was detected
        left_line.was_detected = True
        # add founded points to instanse
        left_line.allx = lx
        left_line.ally = ly
        # add poly 
        r_fit, left_line.polx, left_line.ploty = pipeline.find_poly(wrapped.shape,left_line.allx, left_line.ally)
        left_line.add_fit(r_fit)

    left_curvd = pipeline.measure_curv(wrapped.shape, lx, ly)
    right_curvd = pipeline.measure_curv(wrapped.shape, rx, ry)
    avg_curvrd = round(np.mean([left_curvd,right_curvd]), 0)
    curv_text = f"current radius of  curvature is {avg_curvrd} meters"
Ejemplo n.º 4
0
class lanenet_detector():
    def __init__(self):

        self.bridge = CvBridge()
        self.sub_image = rospy.Subscriber('camera/image_raw',
                                          Image,
                                          self.img_callback,
                                          queue_size=1)
        self.pub_image = rospy.Publisher("lane_detection/annotate",
                                         Image,
                                         queue_size=1)
        self.pub_bird = rospy.Publisher("lane_detection/birdseye",
                                        Image,
                                        queue_size=1)
        self.left_line = Line(n=5)
        self.right_line = Line(n=5)
        self.detected = False
        self.hist = True

    def img_callback(self, data):

        try:
            # Convert a ROS image message into an OpenCV image
            cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)

        raw_img = cv_image.copy()
        mask_image, bird_image = self.detection(raw_img)

        # Convert an OpenCV image into a ROS image message
        out_img_msg = self.bridge.cv2_to_imgmsg(mask_image, 'bgr8')
        out_bird_msg = self.bridge.cv2_to_imgmsg(bird_image, 'bgr8')

        # Publish image message in ROS
        self.pub_image.publish(out_img_msg)
        self.pub_bird.publish(out_bird_msg)

    def gradient_thresh(self, img, thresh_min=25, thresh_max=100):
        """
        Apply sobel edge detection on input image in x, y direction
        """
        #1. Convert the image to gray scale
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        #2. Gaussian blur the image
        gray = cv2.GaussianBlur(gray, (5, 5), 0)
        #3. Use cv2.Sobel() to find derievatives for both X and Y Axis
        sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0)  #, ksize=1)
        sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1)  #, ksize=1)
        #4. Use cv2.addWeighted() to combine the results
        gray = cv2.addWeighted(sobelx, 0.5, sobely, 0.5, 0)
        #5. Convert each pixel to unint8, then apply threshold to get binary image
        gray = cv2.convertScaleAbs(gray)
        num, binary_output = cv2.threshold(gray, thresh_min, thresh_max,
                                           cv2.THRESH_BINARY)
        # binary_output = gray

        return binary_output

    def color_thresh(self, img, thresh=(100, 255)):
        """
        Convert RGB to HSL and threshold to binary image using S channel
        """
        #1. Convert the image from RGB to HSL
        hsl = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
        #2. Apply threshold on S channel to get binary image
        binary_output = cv2.inRange(hsl, (0, 0, thresh[0]),
                                    (255, 255, thresh[1]))

        return binary_output

    def combinedBinaryImage(self, img):
        """
        Get combined binary image from color filter and sobel filter
        """
        #1. Apply sobel filter and color filter on input image
        SobelOutput = self.gradient_thresh(img)
        ColorOutput = self.color_thresh(img)
        #2. Combine the outputs
        # SobelOutput = cv2.addWeighted(SobelOutput, 0.5, ColorOutput, 0.5, 0)
        ## Here you can use as many methods as you want.

        ## TODO

        ####

        binaryImage = np.zeros_like(SobelOutput)
        binaryImage[(ColorOutput != 0) | (SobelOutput != 0)] = 1
        # Remove noise from binary image
        binaryImage = morphology.remove_small_objects(
            binaryImage.astype('bool'), min_size=50, connectivity=2)

        binaryImage = binaryImage.astype(np.uint8) * 255

        return binaryImage

    def perspective_transform(self, img, verbose=False):
        """
        Get bird's eye view from input image
        """
        #1. Visually determine 4 source points and 4 destination points
        src_pts = np.array([[480, 250], [680, 250], [809, 370], [292, 370]],
                           np.float32)
        dst_pts = np.array([[292, 0], [809, 0], [809, 370], [292, 370]],
                           np.float32)

        #2. Get M, the transform matrix, and Minv, the inverse using cv2.getPerspectiveTransform()
        M = cv2.getPerspectiveTransform(src_pts, dst_pts)
        Minv = np.linalg.inv(M)

        #3. Generate warped image in bird view using cv2.warpPerspective()
        warped_img = cv2.warpPerspective(img, M, (1242, 375))

        ## TODO

        ####

        return warped_img, M, Minv

    def detection(self, img):

        binary_img = self.combinedBinaryImage(img)
        img_birdeye, M, Minv = self.perspective_transform(binary_img)

        if not self.hist:
            # Fit lane without previous result
            ret = line_fit(img_birdeye)
            left_fit = ret['left_fit']
            right_fit = ret['right_fit']
            nonzerox = ret['nonzerox']
            nonzeroy = ret['nonzeroy']
            left_lane_inds = ret['left_lane_inds']
            right_lane_inds = ret['right_lane_inds']

        else:
            # Fit lane with previous result
            if not self.detected:
                ret = line_fit(img_birdeye)
                left_fit = ret['left_fit']
                right_fit = ret['right_fit']
                nonzerox = ret['nonzerox']
                nonzeroy = ret['nonzeroy']
                left_lane_inds = ret['left_lane_inds']
                right_lane_inds = ret['right_lane_inds']

                left_fit = self.left_line.add_fit(left_fit)
                right_fit = self.right_line.add_fit(right_fit)

                self.detected = True

            else:
                left_fit = self.left_line.get_fit()
                right_fit = self.right_line.get_fit()
                ret = tune_fit(img_birdeye, left_fit, right_fit)
                left_fit = ret['left_fit']
                right_fit = ret['right_fit']
                nonzerox = ret['nonzerox']
                nonzeroy = ret['nonzeroy']
                left_lane_inds = ret['left_lane_inds']
                right_lane_inds = ret['right_lane_inds']

                if ret is not None:
                    left_fit = ret['left_fit']
                    right_fit = ret['right_fit']
                    nonzerox = ret['nonzerox']
                    nonzeroy = ret['nonzeroy']
                    left_lane_inds = ret['left_lane_inds']
                    right_lane_inds = ret['right_lane_inds']

                    left_fit = self.left_line.add_fit(left_fit)
                    right_fit = self.right_line.add_fit(right_fit)

                else:
                    self.detected = False

            # Annotate original image
            bird_fit_img = bird_fit(img_birdeye, ret, save_file=None)
            combine_fit_img = final_viz(img, left_fit, right_fit, Minv)

            return combine_fit_img, bird_fit_img
Ejemplo n.º 5
0
class lanenet_detector():
    def __init__(self):

        self.bridge = CvBridge()
        self.sub_image = rospy.Subscriber('camera/image_raw', Image, self.img_callback, queue_size=1)
        self.pub_image = rospy.Publisher("lane_detection/annotate", Image, queue_size=1)
        self.pub_bird = rospy.Publisher("lane_detection/birdseye", Image, queue_size=1)
        self.left_line = Line(n=5)
        self.right_line = Line(n=5)
        self.detected = False
        self.hist = True


    def img_callback(self, data):

        try:
            # Convert a ROS image message into an OpenCV image
            cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)
        
        raw_img = cv_image.copy() 
        mask_image, bird_image = self.detection(raw_img)

        # Convert an OpenCV image into a ROS image message
        out_img_msg = self.bridge.cv2_to_imgmsg(mask_image, 'bgr8')
        out_bird_msg = self.bridge.cv2_to_imgmsg(bird_image, 'bgr8')

        # Publish image message in ROS
        self.pub_image.publish(out_img_msg)
        self.pub_bird.publish(out_bird_msg)


    def gradient_thresh(self, img, thresh_min=25, thresh_max=100):
        """
        Apply sobel edge detection on input image in x, y direction
        """
        # 1. Convert the image to gray scale
        gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        # 2. Gaussian blur the image
        blur_img = cv2.GaussianBlur(gray_img, (5, 5), 0)
        # 3. Use cv2.Sobel() to find derievatives for both X and Y Axis
        sobelX = cv2.Sobel(blur_img, cv2.CV_64F, 1, 0, ksize=1)
        sobelY = cv2.Sobel(blur_img, cv2.CV_64F, 0, 1, ksize=1)
        #4. Use cv2.addWeighted() to combine the results
        blended_img = cv2.addWeighted(np.absolute(sobelX), 0.5, np.absolute(sobelY), 0.5, 0)
        #5. Convert each pixel to unint8, then apply threshold to get binary image
        uint8_img = np.uint8(blended_img)    
        ret,binary_output = cv2.threshold(uint8_img,thresh_min,thresh_max,cv2.THRESH_BINARY)

        return binary_output 


    def color_thresh(self, img, thresh=(100, 255)):
        """
        Convert RGB to HSL and threshold to binary image using S channel
        """
        #1. Convert the image from RGB to HSL
        hsl_img = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
        #2. Apply threshold on S channel to get binary image 
        binary_output = cv2.inRange(hsl_img,(0,0,thresh[0]), (255,255,255))

        return binary_output

    def combinedBinaryImage(self, img):
        """
        Get combined binary image from color filter and sobel filter 
        """
        #1. Apply sobel filter and color filter on input image
        SobelOutput = self.gradient_thresh(img)
        ColorOutput = self.color_thresh(img)
        #2. Combine the outputs

        binaryImage = cv2.addWeighted(np.absolute(SobelOutput), 0.5, np.absolute(ColorOutput), 0.5, 0)
        # Here you can use as many methods as you want. 
        binaryImage[(ColorOutput==1)|(SobelOutput==1)] = 1
        # Remove noise from binary image
        binaryImage = morphology.remove_small_objects(binaryImage.astype(np.uint8()),min_size=50,connectivity=2)

        # plt.imshow(img, interpolation='none') # Plot the image, turn off interpolation
        # plt.show()
        # cv2.imshow("Sobel", SobelOutput)
        # cv2.waitKey(0)
        # cv2.imshow("Color", ColorOutput)
        # cv2.waitKey(0)
        # print(SobelOutput, ColorOutput, binaryImage)
        # cv2.imshow("Combined", binaryImage)
        # cv2.waitKey(0)

        return binaryImage


    def perspective_transform(self, img, verbose=False):
        """
        Get bird's eye view from input image
        """
        #1. Visually determine 4 source points and 4 destination points
        
        #2. Get M, the transform matrix, and Minv, the inverse using cv2.getPerspectiveTransform()
        #3. Generate warped image in bird view using cv2.warpPerspective()

        h,w = img.shape[:2] #width and height
        # print(h,w)
        OFFSET = 50
        # 0011
        # x1, y1 = 550, 200 # upper left
        # x2, y2 = 200, 375 # lower left
        # x3, y3 = 650, 200 # upper right
        # x4, y4 = 1000, 375 # lower right

        # 0056yellow
        # x1, y1 = 500, 225 # upper left
        # x2, y2 = 200, 375 # lower left
        # x3, y3 = 650, 225 # upper right
        # x4, y4 = 800, 375 # lower right

        x1, y1 = 555, 195 # upper left
        x2, y2 = 630, 195 # lower left
        x3, y3 = 950, 375 # upper right
        x4, y4 = 200, 375 # lower right

        # dst = np.float32([(OFFSET,0),#upper left
        #           (OFFSET,375),#lower left
        #           (1242-OFFSET,0),#upper right
        #           (1242-OFFSET,375)]) #lower right
        dst = np.float32([(0,0),#upper left
                  (1000,0),#lower left
                  (1000,300),#upper right
                  (50,300)]) #lower right
        src = np.float32([(x1,y1),(x2,y2),(x3,y3),(x4,y4)])
        #2. Get M, the transform matrix, and Minv, the inverse using cv2.getPerspectiveTransform()
        M = cv2.getPerspectiveTransform(src, dst)
        Minv = cv2.getPerspectiveTransform(dst, src)       
        #3. Generate warped image in bird view using cv2.warpPerspective()
        warped_img = cv2.warpPerspective(img, M, (w,h), flags=cv2.INTER_LINEAR)
        ## TODO
        # cv2.imshow("bird", warped_img)
        # cv2.waitKey(0)
        ####
        
        return warped_img, M, Minv

    def detection(self, img):

        binary_img = self.combinedBinaryImage(img)
        img_birdeye, M, Minv = self.perspective_transform(binary_img)

        if not self.hist:
            # Fit lane without previous result
            ret = line_fit(img_birdeye)
            left_fit = ret['left_fit']
            right_fit = ret['right_fit']
            nonzerox = ret['nonzerox']
            nonzeroy = ret['nonzeroy']
            left_lane_inds = ret['left_lane_inds']
            right_lane_inds = ret['right_lane_inds']
        
        else:
            # Fit lane with previous result
            if not self.detected:
                ret = line_fit(img_birdeye)
                left_fit = ret['left_fit']
                right_fit = ret['right_fit']
                nonzerox = ret['nonzerox']
                nonzeroy = ret['nonzeroy']
                left_lane_inds = ret['left_lane_inds']
                right_lane_inds = ret['right_lane_inds']

                left_fit = self.left_line.add_fit(left_fit)
                right_fit = self.right_line.add_fit(right_fit)

                self.detected = True

            else:
                left_fit = self.left_line.get_fit()
                right_fit = self.right_line.get_fit()
                ret = tune_fit(img_birdeye, left_fit, right_fit)
                left_fit = ret['left_fit']
                right_fit = ret['right_fit']
                nonzerox = ret['nonzerox']
                nonzeroy = ret['nonzeroy']
                left_lane_inds = ret['left_lane_inds']
                right_lane_inds = ret['right_lane_inds']

                if ret is not None:
                    left_fit = ret['left_fit']
                    right_fit = ret['right_fit']
                    nonzerox = ret['nonzerox']
                    nonzeroy = ret['nonzeroy']
                    left_lane_inds = ret['left_lane_inds']
                    right_lane_inds = ret['right_lane_inds']

                    left_fit = self.left_line.add_fit(left_fit)
                    right_fit = self.right_line.add_fit(right_fit)

                else:
                    self.detected = False

            # Annotate original image
            bird_fit_img = bird_fit(img_birdeye, ret, save_file=None)
            combine_fit_img = final_viz(img, left_fit, right_fit, Minv)


            return combine_fit_img, bird_fit_img