def __init__(self):
        self.node_name = "Line Detector"
 
        self.image_size = rospy.get_param('~img_size')
        self.top_cutoff = rospy.get_param('~top_cutoff')
        
        self.bridge = CvBridge()
        self.detector = LineDetector()

        self.detector.hsv_white1 = np.array(rospy.get_param('~hsv_white1'))
        self.detector.hsv_white2 = np.array(rospy.get_param('~hsv_white2'))
        self.detector.hsv_yellow1 = np.array(rospy.get_param('~hsv_yellow1'))
        self.detector.hsv_yellow2 = np.array(rospy.get_param('~hsv_yellow2'))
        self.detector.hsv_red1 = np.array(rospy.get_param('~hsv_red1'))
        self.detector.hsv_red2 = np.array(rospy.get_param('~hsv_red2'))
        self.detector.hsv_red3 = np.array(rospy.get_param('~hsv_red3'))
        self.detector.hsv_red4 = np.array(rospy.get_param('~hsv_red4'))

        self.detector.dilation_kernel_size = rospy.get_param('~dilation_kernel_size')
        self.detector.canny_thresholds = rospy.get_param('~canny_thresholds')
        self.detector.hough_min_line_length = rospy.get_param('~hough_min_line_length')
        self.detector.hough_max_line_gap    = rospy.get_param('~hough_max_line_gap')
        self.detector.hough_threshold = rospy.get_param('~hough_threshold')
       
        self.sub_image = rospy.Subscriber("~image", CompressedImage, self.processImage)
        self.pub_lines = rospy.Publisher("~segment_list", SegmentList, queue_size=1)
        self.pub_image = rospy.Publisher("~image_with_lines", Image, queue_size=1)
    def __init__(self):
        self.node_name = "Line Detector"

        # Thread lock
        self.thread_lock = threading.Lock()

        # Constructor of line detector
        self.bridge = CvBridge()
        self.detector = LineDetector()
        self.wb = WhiteBalance()
        self.flag_wb_ref = False

        # Parameters
        self.flag_wb = rospy.get_param('~white_balance')

        self.image_size = rospy.get_param('~img_size')
        self.top_cutoff = rospy.get_param('~top_cutoff')

        self.detector.hsv_white1 = np.array(rospy.get_param('~hsv_white1'))
        self.detector.hsv_white2 = np.array(rospy.get_param('~hsv_white2'))
        self.detector.hsv_yellow1 = np.array(rospy.get_param('~hsv_yellow1'))
        self.detector.hsv_yellow2 = np.array(rospy.get_param('~hsv_yellow2'))
        self.detector.hsv_red1 = np.array(rospy.get_param('~hsv_red1'))
        self.detector.hsv_red2 = np.array(rospy.get_param('~hsv_red2'))
        self.detector.hsv_red3 = np.array(rospy.get_param('~hsv_red3'))
        self.detector.hsv_red4 = np.array(rospy.get_param('~hsv_red4'))

        self.detector.dilation_kernel_size = rospy.get_param(
            '~dilation_kernel_size')
        self.detector.canny_thresholds = rospy.get_param('~canny_thresholds')
        self.detector.hough_min_line_length = rospy.get_param(
            '~hough_min_line_length')
        self.detector.hough_max_line_gap = rospy.get_param(
            '~hough_max_line_gap')
        self.detector.hough_threshold = rospy.get_param('~hough_threshold')

        # Publishers
        self.pub_lines = rospy.Publisher("~segment_list",
                                         SegmentList,
                                         queue_size=1)
        self.pub_image = rospy.Publisher("~image_with_lines",
                                         Image,
                                         queue_size=1)

        # Verbose option
        self.verbose = rospy.get_param('~verbose')
        if self.verbose:
            self.toc_pre = rospy.get_time()

        # Subscribers
        self.sub_image = rospy.Subscriber("~image",
                                          CompressedImage,
                                          self.cbImage,
                                          queue_size=1)
        rospy.loginfo("[%s] Initialized." % (self.node_name))
Exemple #3
0
def main():
    line_finder = LineDetector(**settings)
    if args.file_.split('.')[-1] == 'jpg':
        img = mpimg.imread(args.path + args.file_)
        output_img = line_finder.forward(img)
        plt.imshow(output_img)
        plt.show()
    elif args.file_.split('.')[-1] == 'mp4':
        white_output = 'test_videos_output/output_video.mp4'
        clip1 = VideoFileClip(args.path + args.file_).subclip(0, args.subclip)
        white_clip = clip1.fl_image(line_finder.forward)
        white_clip.write_videofile(white_output, audio=False)
    else:
        print('Error {}: Not supported type.'.format(
            args.file_.split('.')[-1]))
        quit()
    def __init__(self):
        self.node_name = "Line Detector"
 
        self.image_size = rospy.get_param('~img_size')
        self.top_cutoff = rospy.get_param('~top_cutoff')
        
        self.bridge = CvBridge()
        self.detector = LineDetector()

        self.detector.hsv_white1 = np.array(rospy.get_param('~hsv_white1'))
        self.detector.hsv_white2 = np.array(rospy.get_param('~hsv_white2'))
        self.detector.hsv_yellow1 = np.array(rospy.get_param('~hsv_yellow1'))
        self.detector.hsv_yellow2 = np.array(rospy.get_param('~hsv_yellow2'))
        self.detector.hsv_red1 = np.array(rospy.get_param('~hsv_red1'))
        self.detector.hsv_red2 = np.array(rospy.get_param('~hsv_red2'))
        self.detector.hsv_red3 = np.array(rospy.get_param('~hsv_red3'))
        self.detector.hsv_red4 = np.array(rospy.get_param('~hsv_red4'))

        self.detector.dilation_kernel_size = rospy.get_param('~dilation_kernel_size')
        self.detector.canny_thresholds = rospy.get_param('~canny_thresholds')
        self.detector.hough_min_line_length = rospy.get_param('~hough_min_line_length')
        self.detector.hough_max_line_gap    = rospy.get_param('~hough_max_line_gap')
        self.detector.hough_threshold = rospy.get_param('~hough_threshold')
       
        self.sub_image = rospy.Subscriber("~image", CompressedImage, self.processImage)
        self.pub_lines = rospy.Publisher("~segment_list", SegmentList, queue_size=1)
        self.pub_image = rospy.Publisher("~image_with_lines", Image, queue_size=1)
        
        self.timer_param = rospy.Timer(rospy.Duration.from_sec(3.0),self.cbParamUpdate)
Exemple #5
0
    def __init__(self):
        self.node_name = rospy.get_name()
        rospy.loginfo("[%s] Initializing......" % (self.node_name))

        self.bridge = CvBridge()
        self.cv_image = None
        self.detector = LineDetector()
        self.visualization = True
        self.size = (320, 240)
        self.pid_enabled = False
        self.base_speed = 50
        # self.car = CarDriver()
        # self.config_file = os.path.dirname(os.path.abspath(__file__)) + "/default.yaml"
        self.image_msg = None
        self.lost_count = 0
        self.line_msg = GetLineDetectionResponse()
        self.pub_detections = rospy.Publisher("~image_color",
                                              CompressedImage,
                                              queue_size=1)
        self.pub_line_detection = rospy.Publisher("~line_detection",
                                                  LineDetection,
                                                  queue_size=1)

        self.detect_line_srv = rospy.Service('~detect_line', GetLineDetection,
                                             self.cbGetLineDetection)
        self.set_color_srv = rospy.Service('~set_color_threshold',
                                           SetColorThreshold,
                                           self.cbSetColorThreshold)
        self.get_color_srv = rospy.Service('~get_color_threshold',
                                           GetColorThreshold,
                                           self.cbGetColorThreshold)
        self.set_color_list_srv = rospy.Service('~get_color_list', GetStrings,
                                                self.cbGetColorList)
        # self.pid_set_enable_srv = rospy.Service('~pid_set_enable', SetInt32, self.cbPidSetEnable)
        # self.sub_image = rospy.Subscriber("~image_raw", Image, self.cbImg ,queue_size=1)
        self.sub_image = rospy.Subscriber("~image_raw/compressed",
                                          CompressedImage,
                                          self.cbImg,
                                          queue_size=1)

        # rospy.loginfo("[%s] wait_for_service : camera_get_frame..." % (self.node_name))
        # rospy.wait_for_service('~camera_get_frame')
        # self.get_frame = rospy.ServiceProxy('~camera_get_frame', GetFrame)
        rospy.loginfo("[%s] Initialized." % (self.node_name))
Exemple #6
0
 def __init__(self):
     self.node_name = "Line Detector"
     
     self.hei_image = self.setupParam("~hei_image", 240)
     self.wid_image = self.setupParam("~wid_image", 320)
     self.top_cutoff  = self.setupParam("~top_cutoff", 90)
     
     self.bridge = CvBridge()
     self.detector = LineDetector()
     
     self.sub_image = rospy.Subscriber("~image", CompressedImage, self.processImage)
     self.pub_lines = rospy.Publisher("~segment_list", SegmentList, queue_size=1)
     self.pub_image = rospy.Publisher("~image_with_lines", Image, queue_size=1)
    def __init__(self):
        self.bridge = CvBridge()

        self.sub_image = rospy.Subscriber("~image", Image, self.processImage)
        self.pub_lines = rospy.Publisher("~segment_list", SegmentList, queue_size=1)
        self.pub_image = rospy.Publisher("~image_with_lines", Image, queue_size=1)

        self.detector = LineDetector()

        self.segmentList = SegmentList()
        self.segment = Segment()
        self.pixel1 = Pixel()
        self.pixel2 = Pixel()
        self.point1 = Point()
        self.point2 = Point()
    def __init__(self):
        self.node_name = "Line Detector"

        # Thread lock 
        self.thread_lock = threading.Lock()
       
        # Constructor of line detector 
        self.bridge = CvBridge()
        self.detector = LineDetector()
        self.wb = WhiteBalance()
        self.flag_wb_ref = False
       
        # Parameters
        self.flag_wb = rospy.get_param('~white_balance')
        
        self.image_size = rospy.get_param('~img_size')
        self.top_cutoff = rospy.get_param('~top_cutoff')
  
        self.detector.hsv_white1 = np.array(rospy.get_param('~hsv_white1'))
        self.detector.hsv_white2 = np.array(rospy.get_param('~hsv_white2'))
        self.detector.hsv_yellow1 = np.array(rospy.get_param('~hsv_yellow1'))
        self.detector.hsv_yellow2 = np.array(rospy.get_param('~hsv_yellow2'))
        self.detector.hsv_red1 = np.array(rospy.get_param('~hsv_red1'))
        self.detector.hsv_red2 = np.array(rospy.get_param('~hsv_red2'))
        self.detector.hsv_red3 = np.array(rospy.get_param('~hsv_red3'))
        self.detector.hsv_red4 = np.array(rospy.get_param('~hsv_red4'))

        self.detector.dilation_kernel_size = rospy.get_param('~dilation_kernel_size')
        self.detector.canny_thresholds = rospy.get_param('~canny_thresholds')
        self.detector.hough_min_line_length = rospy.get_param('~hough_min_line_length')
        self.detector.hough_max_line_gap    = rospy.get_param('~hough_max_line_gap')
        self.detector.hough_threshold = rospy.get_param('~hough_threshold')

        # Publishers
        self.pub_lines = rospy.Publisher("~segment_list", SegmentList, queue_size=1)
        self.pub_image = rospy.Publisher("~image_with_lines", Image, queue_size=1)
       
        # Verbose option 
        self.verbose = rospy.get_param('~verbose')
        if self.verbose:
            self.toc_pre = rospy.get_time()   

        # Subscribers
        self.sub_image = rospy.Subscriber("~image", CompressedImage, self.cbImage, queue_size=1)
        rospy.loginfo("[%s] Initialized." %(self.node_name))
class LineDetectorNode(object):
    def __init__(self):
        self.bridge = CvBridge()

        self.sub_image = rospy.Subscriber("~image", Image, self.processImage)
        self.pub_lines = rospy.Publisher("~segment_list", SegmentList, queue_size=1)
        self.pub_image = rospy.Publisher("~image_with_lines", Image, queue_size=1)

        self.detector = LineDetector()

        self.segmentList = SegmentList()
        self.segment = Segment()
        self.pixel1 = Pixel()
        self.pixel2 = Pixel()
        self.point1 = Point()
        self.point2 = Point()

    def processImage(self, image_msg):
        image_cv = self.bridge.imgmsg_to_cv2(image_msg, "bgr8")

        # Resize image
        image_cv = cv2.resize(image_cv, (200, 150))
        image_cv = image_cv[image_cv.shape[0] / 2 :, :, :]

        # Detect lines and normals
        lines_white, normals_white = self.detector.detectLines(image_cv, "white")
        lines_yellow, normals_yellow = self.detector.detectLines(image_cv, "yellow")
        lines_red, normals_red = self.detector.detectLines(image_cv, "red")

        # Draw lines and normals
        image_with_lines_cv = image_cv
        self.detector.drawLines(image_with_lines_cv, lines_white, (0, 0, 0))
        self.detector.drawLines(image_with_lines_cv, lines_yellow, (255, 0, 0))
        self.detector.drawLines(image_with_lines_cv, lines_red, (0, 255, 0))
        self.detector.drawNormals(image_with_lines_cv, lines_white, normals_white)
        self.detector.drawNormals(image_with_lines_cv, lines_yellow, normals_yellow)
        self.detector.drawNormals(image_with_lines_cv, lines_red, normals_red)

        # TODO: Pixel frame to body frame covnersion

        # Publish the segments
        if len(lines_white) > 0:
            self.publishSegmentList(lines_white, normals_white, self.segmentList.WHITE)
            rospy.loginfo("[LineDetectorNode] number of white lines = %s" % (len(lines_white)))
        if len(lines_yellow) > 0:
            self.publishSegmentList(lines_yellow, normals_yellow, self.segmentList.YELLOW)
            rospy.loginfo("[LineDetectorNode] number of yellow lines = %s" % (len(lines_yellow)))
        if len(lines_red) > 0:
            self.publishSegmentList(lines_red, normals_red, self.segmentList.RED)
            rospy.loginfo("[LineDetectorNode] number of red lines = %s" % (len(lines_red)))

        # Publish the frame with lines
        image_msg = self.bridge.cv2_to_imgmsg(image_with_lines_cv, "bgr8")
        self.pub_image.publish(image_msg)

    def onShutdown(self):
        rospy.loginfo("[LineDetectorNode] Shutdown.")

    def publishSegmentList(self, lines, normals, color):
        self.segmentList.segments = []
        self.segmentList.color = color

        for u1, v1, u2, v2, norm_u, norm_v in np.hstack(lines, normals):
            self.pixel1.u = int(u1)
            self.pixel1.v = int(v1)
            self.pixel2.u = int(u2)
            self.pixel2.v = int(v2)
            self.segment.pixels[0] = self.pixel1
            self.segment.pixels[1] = self.pixel2
            self.segment.normal_u = norm_u
            self.segment.normal_v = norm_v

            # TODO: assign segment.points

            self.segmentList.segments.append(self.segment)

        self.pub_lines.publish(self.segmentList)
Exemple #10
0
class LineDetectorNode(object):
    def __init__(self):
        self.node_name = rospy.get_name()
        rospy.loginfo("[%s] Initializing......" % (self.node_name))

        self.bridge = CvBridge()
        self.cv_image = None
        self.detector = LineDetector()
        self.visualization = True
        self.size = (320, 240)
        self.pid_enabled = False
        self.base_speed = 50
        # self.car = CarDriver()
        # self.config_file = os.path.dirname(os.path.abspath(__file__)) + "/default.yaml"
        self.image_msg = None
        self.lost_count = 0
        self.line_msg = GetLineDetectionResponse()
        self.pub_detections = rospy.Publisher("~image_color",
                                              CompressedImage,
                                              queue_size=1)
        self.pub_line_detection = rospy.Publisher("~line_detection",
                                                  LineDetection,
                                                  queue_size=1)

        self.detect_line_srv = rospy.Service('~detect_line', GetLineDetection,
                                             self.cbGetLineDetection)
        self.set_color_srv = rospy.Service('~set_color_threshold',
                                           SetColorThreshold,
                                           self.cbSetColorThreshold)
        self.get_color_srv = rospy.Service('~get_color_threshold',
                                           GetColorThreshold,
                                           self.cbGetColorThreshold)
        self.set_color_list_srv = rospy.Service('~get_color_list', GetStrings,
                                                self.cbGetColorList)
        # self.pid_set_enable_srv = rospy.Service('~pid_set_enable', SetInt32, self.cbPidSetEnable)
        # self.sub_image = rospy.Subscriber("~image_raw", Image, self.cbImg ,queue_size=1)
        self.sub_image = rospy.Subscriber("~image_raw/compressed",
                                          CompressedImage,
                                          self.cbImg,
                                          queue_size=1)

        # rospy.loginfo("[%s] wait_for_service : camera_get_frame..." % (self.node_name))
        # rospy.wait_for_service('~camera_get_frame')
        # self.get_frame = rospy.ServiceProxy('~camera_get_frame', GetFrame)
        rospy.loginfo("[%s] Initialized." % (self.node_name))

    def cbImg(self, image_msg):
        self.image_msg = image_msg
        # self.line_msg = self.detectLine()
        if self.pid_enabled:
            self.cbPid(self.line_msg)
        # self.pub_line_detection.publish(self.line_msg)

    '''
    def cbPid(self,line_msg):
        center = line_msg.center
        if center[0] == 0:
            self.lost_count = self.lost_count + 1
            print('lost count %d' % (self.lost_count))
            if self.lost_count > 25:
                self.pid_enabled = False
                print('yellow line lost,stop')
            self.car.setWheelsSpeed(0,0)
            return
        self.lost_count = 0
        offset = 70 - center[0]
        bias = offset/160.0
        speed = self.base_speed/100.0*(1-bias)
        if speed < 0.4:
            speed = 0.4
        left = speed*(1-bias)
        right = speed*(1+bias)
        self.car.setWheelsSpeed(left,right)
    '''

    def detectLine(self, params):
        # start = time.time()
        # print(params)
        color = params.color.decode('utf-8')
        if not self.detector.colors.has_key(color):
            return GetLineDetectionResponse()
        image_msg = self.image_msg
        if image_msg == None:
            print('[%s]: camera msg not received' % (self.node_name))
            return GetLineDetectionResponse()
        if hasattr(image_msg, 'format'):  # CompressedImage
            try:
                cv_image = bgr_from_jpg(image_msg.data)
            except ValueError as e:
                rospy.loginfo('[%s] cannot decode image: %s' %
                              (self.node_name, e))
                return GetLineDetectionResponse()
        else:  # Image
            try:
                cv_image = self.bridge.imgmsg_to_cv2(image_msg,
                                                     desired_encoding="bgr8")
            except Exception as e:
                rospy.loginfo('[%s] cannot convert image: %s' %
                              (self.node_name, e))
                return GetLineDetectionResponse()
        # if cv_image.shape[0]!=self.size[1] or cv_image.shape[1]!=self.size[0]:
        #     cv_image = cv2.resize(cv_image,self.size)
        if params.y1 < params.y2 and params.x1 < params.x2:
            rect_image = cv_image[params.y1:params.y2, params.x1:params.x2]
        detection, image = self.detector.detect_hsv(
            rect_image, self.detector.colors[color])
        if self.visualization:
            cv_image[params.y1:params.y2, params.x1:params.x2] = image
            cv2.rectangle(cv_image, (params.x1, params.y1),
                          (params.x2, params.y2), (0, 0, 0), 1)
            # cv_image = cv2.resize(cv_image,(480,360))
            # imgmsg = self.bridge.cv2_to_imgmsg(cv_image,"bgr8")
            # self.pub_detections.publish(imgmsg)
            self.pubImage(cv_image)
        end = time.time()
        # print('time cost in detect line: %.2f ms' %  ((end - start)*1000))
        return GetLineDetectionResponse(detection[0:2], detection[2:4],
                                        detection[4])

    def toLineDetections(self, cnt):
        if cnt is not None:
            center, wh, angle = cv2.minAreaRect(cnt)
            return GetLineDetectionResponse(center, wh, angle)
        return GetLineDetectionResponse()

    def cbGetLineDetection(self, params):
        try:
            line_msg = self.detectLine(params)
            return line_msg
        except Exception as e:
            print(self.node_name)
            print(e)
            return GetLineDetectionResponse()
        # return self.line_msg
    def cbSetColorThreshold(self, params):
        print(params)
        try:
            self.detector.colors[params.color.decode('utf-8')] = [{
                "min": [params.low_h, params.low_s, params.low_v],
                "max": [params.high_h, params.high_s, params.high_v]
            }]
            return SetColorThresholdResponse("设置成功")
        except Exception as e:
            print(self.node_name)
            print(e)
            return SetColorThresholdResponse("设置失败,请检查参数")

    def cbGetColorThreshold(self, params):
        try:
            threshold = self.detector.colors[params.color.decode('utf-8')]
            return GetColorThresholdResponse(str(threshold))
        except Exception as e:
            print(self.node_name)
            print(e)
            return GetColorThresholdResponse()

    def cbGetColorList(self, params):
        return GetStringsResponse(self.detector.colors.keys())

    '''
    def cbPidSetEnable(self,params):
        if params.port == 1:
            self.pid_enabled = True
            self.base_speed = params.value
        else:
            self.pid_enabled = True
            self.base_speed = 0
            self.car.setWheelsSpeed(0,0)
        return SetInt32Response(params.port,params.value)
    '''

    def pubImage(self, image):
        msg = CompressedImage()
        msg.header.stamp = rospy.Time.now()
        msg.format = "jpeg"
        msg.data = jpg_from_bgr(image)
        self.pub_detections.publish(msg)

    def onShutdown(self):
        rospy.loginfo("[%s] Shutdown." % (self.node_name))
Exemple #11
0
# Note for vz:
# vz < 0 => ascend
# vz > 0 => descend

# UP = -0.5
# DOWN = 0.5

UP = 0
DOWN = 0

PRECISION = 3
YAW_CONTROL = True
ABS_MAX_TURNING_ANGLE = 75

ld = LineDetector(PRECISION)

arm_and_takeoff(1.5)

print("!!!!!!!!!!! MOVING START !!!!!!!!!")

while True:
    # getTurnDir returns
    dir = ld.getTurnDir()
    print("getTurnDir returns %d" % dir)

    # If there is no line, land the vehicle
    if dir == (PRECISION + 1):
        break

    # If YAW_CONTROL is true, it is possible to trace curves.
class LineDetectorNode(object):
    def __init__(self):
        self.node_name = "Line Detector"

        # Thread lock 
        self.thread_lock = threading.Lock()
       
        # Constructor of line detector 
        self.bridge = CvBridge()
        self.detector = LineDetector()
        self.wb = WhiteBalance()
        self.flag_wb_ref = False
       
        # Parameters
        self.flag_wb = rospy.get_param('~white_balance')
        
        self.image_size = rospy.get_param('~img_size')
        self.top_cutoff = rospy.get_param('~top_cutoff')
  
        self.detector.hsv_white1 = np.array(rospy.get_param('~hsv_white1'))
        self.detector.hsv_white2 = np.array(rospy.get_param('~hsv_white2'))
        self.detector.hsv_yellow1 = np.array(rospy.get_param('~hsv_yellow1'))
        self.detector.hsv_yellow2 = np.array(rospy.get_param('~hsv_yellow2'))
        self.detector.hsv_red1 = np.array(rospy.get_param('~hsv_red1'))
        self.detector.hsv_red2 = np.array(rospy.get_param('~hsv_red2'))
        self.detector.hsv_red3 = np.array(rospy.get_param('~hsv_red3'))
        self.detector.hsv_red4 = np.array(rospy.get_param('~hsv_red4'))

        self.detector.dilation_kernel_size = rospy.get_param('~dilation_kernel_size')
        self.detector.canny_thresholds = rospy.get_param('~canny_thresholds')
        self.detector.hough_min_line_length = rospy.get_param('~hough_min_line_length')
        self.detector.hough_max_line_gap    = rospy.get_param('~hough_max_line_gap')
        self.detector.hough_threshold = rospy.get_param('~hough_threshold')

        # Publishers
        self.pub_lines = rospy.Publisher("~segment_list", SegmentList, queue_size=1)
        self.pub_image = rospy.Publisher("~image_with_lines", Image, queue_size=1)
       
        # Verbose option 
        self.verbose = rospy.get_param('~verbose')
        if self.verbose:
            self.toc_pre = rospy.get_time()   

        # Subscribers
        self.sub_image = rospy.Subscriber("~image", CompressedImage, self.cbImage, queue_size=1)
        rospy.loginfo("[%s] Initialized." %(self.node_name))

    def cbImage(self,image_msg):
        # Start a daemon thread to process the image
        thread = threading.Thread(target=self.processImage,args=(image_msg,))
        thread.setDaemon(True)
        thread.start()
        # Returns rightaway

    def processImage(self,image_msg):
        if not self.thread_lock.acquire(False):
            # Return immediately if the thread is locked
            return
        
        # Verbose
        if self.verbose:
            rospy.loginfo("[%s] Latency received = %.3f ms" %(self.node_name, (rospy.get_time()-image_msg.header.stamp.to_sec()) * 1000.0))
        
        # time_start = rospy.Time.now()
        # time_start = event.last_real
        # msg_age = time_start - image_msg.header.stamp
        # rospy.loginfo("[LineDetector] image age: %s" %msg_age.to_sec())

        # Decode from compressed image
        # with OpenCV
        image_cv = cv2.imdecode(np.fromstring(image_msg.data, np.uint8), cv2.CV_LOAD_IMAGE_COLOR)
        
        # with PIL Image
        # image_cv = jpeg.JPEG(np.fromstring(image_msg.data, np.uint8)).decode()
        
        # with libjpeg-turbo
        # Convert from uncompressed image message
        # image_cv = self.bridge.imgmsg_to_cv2(image_msg, "bgr8")
        
        # Verbose
        if self.verbose:
            self.tic = rospy.get_time()   
            rospy.loginfo("[%s] Latency image decompressed = %.3f ms" %(self.node_name, (self.tic-image_msg.header.stamp.to_sec()) * 1000.0))
        
        # White balancing: set reference image to estimate parameters
        if self.flag_wb and (not self.flag_wb_ref):
            # set reference image to estimate parameters
            self.wb.setRefImg(image_cv)
            rospy.loginfo("[%s] White balance: parameters computed." %(self.node_name))
            print self.wb.norm_bgr
            self.flag_wb_ref = True

        # Resize and crop image
        hei_original = image_cv.shape[0]
        wid_original = image_cv.shape[1]
        if self.image_size[0]!=hei_original or self.image_size[1]!=wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv, (self.image_size[1], self.image_size[0]), interpolation=cv2.INTER_NEAREST)
        image_cv = image_cv[self.top_cutoff:,:,:]

        # White balancing
        if self.flag_wb and self.flag_wb_ref:
            self.wb.correctImg(image_cv)

        # Set the image to be detected
        self.detector.setImage(image_cv)
	
        # Detect lines and normals
        lines_white, normals_white = self.detector.detectLines('white')
        lines_yellow, normals_yellow = self.detector.detectLines('yellow')
        lines_red, normals_red = self.detector.detectLines('red')

        # Draw lines and normals
        self.detector.drawLines(lines_white, (0,0,0))
        self.detector.drawLines(lines_yellow, (255,0,0))
        self.detector.drawLines(lines_red, (0,255,0))
        #self.detector.drawNormals(lines_white, normals_white)
        #self.detector.drawNormals(lines_yellow, normals_yellow)
        #self.detector.drawNormals(lines_red, normals_red)

        # SegmentList constructor
        segmentList = SegmentList()
        segmentList.header.stamp = image_msg.header.stamp
        
        # Convert to normalized pixel coordinates, and add segments to segmentList
        arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array((1./self.image_size[1], 1./self.image_size[0], 1./self.image_size[1], 1./self.image_size[0]))
        if len(lines_white)>0:
            lines_normalized_white = ((lines_white + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(self.toSegmentMsg(lines_normalized_white, normals_white, Segment.WHITE))
        if len(lines_yellow)>0:
            lines_normalized_yellow = ((lines_yellow + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(self.toSegmentMsg(lines_normalized_yellow, normals_yellow, Segment.YELLOW))
        if len(lines_red)>0:
            lines_normalized_red = ((lines_red + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(self.toSegmentMsg(lines_normalized_red, normals_red, Segment.RED))
        
        # Verbose
        if self.verbose:
            self.toc = rospy.get_time()
            rospy.loginfo("[%s] Image processing time: %.3f ms" %(self.node_name, (self.toc-self.tic)*1000.0))
            rospy.loginfo("[%s] Number of white segments = %d" %(self.node_name, len(lines_white)))
            rospy.loginfo("[%s] number of yellow segments = %d" %(self.node_name, len(lines_yellow)))
            rospy.loginfo("[%s] number of red segments = %d" %(self.node_name, len(lines_red)))
            self.toc_pre = self.toc
 
        # Publish segmentList
        self.pub_lines.publish(segmentList)
        # time_spent = rospy.Time.now() - time_start
        # rospy.loginfo("[LineDetectorNode] Spent: %s" %(time_spent.to_sec()))
         
        # Publish the frame with lines
        image_msg_out = self.bridge.cv2_to_imgmsg(self.detector.getImage(), "bgr8")
        image_msg_out.header.stamp = image_msg.header.stamp
        self.pub_image.publish(image_msg_out)
        # time_spent = rospy.Time.now() - time_start
        # rospy.loginfo("[LineDetectorNode] Spent on img: %s" %(time_spent.to_sec()))

        # Verbose
        if self.verbose:
            rospy.loginfo("[%s] Latency sent = %.3f ms" %(self.node_name, (rospy.get_time()-image_msg.header.stamp.to_sec()) * 1000.0))

        # Release the thread lock
        self.thread_lock.release()

    def onShutdown(self):
            rospy.loginfo("[LineDetectorNode] Shutdown.")
            
    def toSegmentMsg(self,  lines, normals, color):
        
        segmentMsgList = []
        for x1,y1,x2,y2,norm_x,norm_y in np.hstack((lines,normals)):
            segment = Segment()
            segment.color = color
            segment.pixels_normalized[0].x = x1
            segment.pixels_normalized[0].y = y1
            segment.pixels_normalized[1].x = x2
            segment.pixels_normalized[1].y = y2
            segment.normal.x = norm_x
            segment.normal.y = norm_y
             
            segmentMsgList.append(segment)
        return segmentMsgList
class LineDetectorNode(object):
    def __init__(self):
        self.node_name = "Line Detector"

        # Thread lock
        self.thread_lock = threading.Lock()

        # Constructor of line detector
        self.bridge = CvBridge()
        self.detector = LineDetector()
        self.wb = WhiteBalance()
        self.flag_wb_ref = False

        # Parameters
        self.flag_wb = rospy.get_param('~white_balance')

        self.image_size = rospy.get_param('~img_size')
        self.top_cutoff = rospy.get_param('~top_cutoff')

        self.detector.hsv_white1 = np.array(rospy.get_param('~hsv_white1'))
        self.detector.hsv_white2 = np.array(rospy.get_param('~hsv_white2'))
        self.detector.hsv_yellow1 = np.array(rospy.get_param('~hsv_yellow1'))
        self.detector.hsv_yellow2 = np.array(rospy.get_param('~hsv_yellow2'))
        self.detector.hsv_red1 = np.array(rospy.get_param('~hsv_red1'))
        self.detector.hsv_red2 = np.array(rospy.get_param('~hsv_red2'))
        self.detector.hsv_red3 = np.array(rospy.get_param('~hsv_red3'))
        self.detector.hsv_red4 = np.array(rospy.get_param('~hsv_red4'))

        self.detector.dilation_kernel_size = rospy.get_param(
            '~dilation_kernel_size')
        self.detector.canny_thresholds = rospy.get_param('~canny_thresholds')
        self.detector.hough_min_line_length = rospy.get_param(
            '~hough_min_line_length')
        self.detector.hough_max_line_gap = rospy.get_param(
            '~hough_max_line_gap')
        self.detector.hough_threshold = rospy.get_param('~hough_threshold')

        # Publishers
        self.pub_lines = rospy.Publisher("~segment_list",
                                         SegmentList,
                                         queue_size=1)
        self.pub_image = rospy.Publisher("~image_with_lines",
                                         Image,
                                         queue_size=1)

        # Verbose option
        self.verbose = rospy.get_param('~verbose')
        if self.verbose:
            self.toc_pre = rospy.get_time()

        # Subscribers
        self.sub_image = rospy.Subscriber("~image",
                                          CompressedImage,
                                          self.cbImage,
                                          queue_size=1)
        rospy.loginfo("[%s] Initialized." % (self.node_name))

    def cbImage(self, image_msg):
        # Start a daemon thread to process the image
        thread = threading.Thread(target=self.processImage, args=(image_msg, ))
        thread.setDaemon(True)
        thread.start()
        # Returns rightaway

    def processImage(self, image_msg):
        if not self.thread_lock.acquire(False):
            # Return immediately if the thread is locked
            return

        # Verbose
        if self.verbose:
            rospy.loginfo(
                "[%s] Latency received = %.3f ms" %
                (self.node_name,
                 (rospy.get_time() - image_msg.header.stamp.to_sec()) *
                 1000.0))

        # time_start = rospy.Time.now()
        # time_start = event.last_real
        # msg_age = time_start - image_msg.header.stamp
        # rospy.loginfo("[LineDetector] image age: %s" %msg_age.to_sec())

        # Decode from compressed image
        # with OpenCV
        image_cv = cv2.imdecode(np.fromstring(image_msg.data, np.uint8),
                                cv2.CV_LOAD_IMAGE_COLOR)

        # with PIL Image
        # image_cv = jpeg.JPEG(np.fromstring(image_msg.data, np.uint8)).decode()

        # with libjpeg-turbo
        # Convert from uncompressed image message
        # image_cv = self.bridge.imgmsg_to_cv2(image_msg, "bgr8")

        # Verbose
        if self.verbose:
            self.tic = rospy.get_time()
            rospy.loginfo(
                "[%s] Latency image decompressed = %.3f ms" %
                (self.node_name,
                 (self.tic - image_msg.header.stamp.to_sec()) * 1000.0))

        # White balancing: set reference image to estimate parameters
        if self.flag_wb and (not self.flag_wb_ref):
            # set reference image to estimate parameters
            self.wb.setRefImg(image_cv)
            rospy.loginfo("[%s] White balance: parameters computed." %
                          (self.node_name))
            print self.wb.norm_bgr
            self.flag_wb_ref = True

        # Resize and crop image
        hei_original = image_cv.shape[0]
        wid_original = image_cv.shape[1]
        if self.image_size[0] != hei_original or self.image_size[
                1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv,
                                  (self.image_size[1], self.image_size[0]),
                                  interpolation=cv2.INTER_NEAREST)
        image_cv = image_cv[self.top_cutoff:, :, :]

        # White balancing
        if self.flag_wb and self.flag_wb_ref:
            self.wb.correctImg(image_cv)

        # Set the image to be detected
        self.detector.setImage(image_cv)

        # Detect lines and normals
        lines_white, normals_white = self.detector.detectLines('white')
        lines_yellow, normals_yellow = self.detector.detectLines('yellow')
        lines_red, normals_red = self.detector.detectLines('red')

        # Draw lines and normals
        self.detector.drawLines(lines_white, (0, 0, 0))
        self.detector.drawLines(lines_yellow, (255, 0, 0))
        self.detector.drawLines(lines_red, (0, 255, 0))
        #self.detector.drawNormals(lines_white, normals_white)
        #self.detector.drawNormals(lines_yellow, normals_yellow)
        #self.detector.drawNormals(lines_red, normals_red)

        # SegmentList constructor
        segmentList = SegmentList()
        segmentList.header.stamp = image_msg.header.stamp

        # Convert to normalized pixel coordinates, and add segments to segmentList
        arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array(
            (1. / self.image_size[1], 1. / self.image_size[0],
             1. / self.image_size[1], 1. / self.image_size[0]))
        if len(lines_white) > 0:
            lines_normalized_white = ((lines_white + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_white, normals_white,
                                  Segment.WHITE))
        if len(lines_yellow) > 0:
            lines_normalized_yellow = ((lines_yellow + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_yellow, normals_yellow,
                                  Segment.YELLOW))
        if len(lines_red) > 0:
            lines_normalized_red = ((lines_red + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_red, normals_red,
                                  Segment.RED))

        # Verbose
        if self.verbose:
            self.toc = rospy.get_time()
            rospy.loginfo("[%s] Image processing time: %.3f ms" %
                          (self.node_name, (self.toc - self.tic) * 1000.0))
            rospy.loginfo("[%s] Number of white segments = %d" %
                          (self.node_name, len(lines_white)))
            rospy.loginfo("[%s] number of yellow segments = %d" %
                          (self.node_name, len(lines_yellow)))
            rospy.loginfo("[%s] number of red segments = %d" %
                          (self.node_name, len(lines_red)))
            self.toc_pre = self.toc

        # Publish segmentList
        self.pub_lines.publish(segmentList)
        # time_spent = rospy.Time.now() - time_start
        # rospy.loginfo("[LineDetectorNode] Spent: %s" %(time_spent.to_sec()))

        # Publish the frame with lines
        image_msg_out = self.bridge.cv2_to_imgmsg(self.detector.getImage(),
                                                  "bgr8")
        image_msg_out.header.stamp = image_msg.header.stamp
        self.pub_image.publish(image_msg_out)
        # time_spent = rospy.Time.now() - time_start
        # rospy.loginfo("[LineDetectorNode] Spent on img: %s" %(time_spent.to_sec()))

        # Verbose
        if self.verbose:
            rospy.loginfo(
                "[%s] Latency sent = %.3f ms" %
                (self.node_name,
                 (rospy.get_time() - image_msg.header.stamp.to_sec()) *
                 1000.0))

        # Release the thread lock
        self.thread_lock.release()

    def onShutdown(self):
        rospy.loginfo("[LineDetectorNode] Shutdown.")

    def toSegmentMsg(self, lines, normals, color):

        segmentMsgList = []
        for x1, y1, x2, y2, norm_x, norm_y in np.hstack((lines, normals)):
            segment = Segment()
            segment.color = color
            segment.pixels_normalized[0].x = x1
            segment.pixels_normalized[0].y = y1
            segment.pixels_normalized[1].x = x2
            segment.pixels_normalized[1].y = y2
            segment.normal.x = norm_x
            segment.normal.y = norm_y

            segmentMsgList.append(segment)
        return segmentMsgList
Exemple #14
0
    def __init__(self, node_name):
        # Initialize the DTROS parent class
        super(LineDetectorNode, self).__init__(node_name=node_name,
                                               node_type=NodeType.PERCEPTION)

        # Define parameters
        self._line_detector_parameters = rospy.get_param(
            "~line_detector_parameters", None)
        self._colors = rospy.get_param("~colors", None)
        self._img_size = rospy.get_param("~img_size", None)
        self._top_cutoff = rospy.get_param("~top_cutoff", None)

        self.bridge = CvBridge()

        # The thresholds to be used for AntiInstagram color correction
        self.ai_thresholds_received = False
        self.anti_instagram_thresholds = dict()
        self.ai = AntiInstagram()

        # This holds the colormaps for the debug/ranges images after they are computed once
        self.colormaps = dict()

        # Create a new LineDetector object with the parameters from the Parameter Server / config file
        self.detector = LineDetector(**self._line_detector_parameters)
        # Update the color ranges objects
        self.color_ranges = {
            color: ColorRange.fromDict(d)
            for color, d in list(self._colors.items())
        }

        # Publishers
        self.pub_lines = rospy.Publisher("~segment_list",
                                         SegmentList,
                                         queue_size=1,
                                         dt_topic_type=TopicType.PERCEPTION)
        self.pub_d_segments = rospy.Publisher("~debug/segments/compressed",
                                              CompressedImage,
                                              queue_size=1,
                                              dt_topic_type=TopicType.DEBUG)
        self.pub_d_edges = rospy.Publisher("~debug/edges/compressed",
                                           CompressedImage,
                                           queue_size=1,
                                           dt_topic_type=TopicType.DEBUG)
        self.pub_d_maps = rospy.Publisher("~debug/maps/compressed",
                                          CompressedImage,
                                          queue_size=1,
                                          dt_topic_type=TopicType.DEBUG)
        # these are not compressed because compression adds undesired blur
        self.pub_d_ranges_HS = rospy.Publisher("~debug/ranges_HS",
                                               Image,
                                               queue_size=1,
                                               dt_topic_type=TopicType.DEBUG)
        self.pub_d_ranges_SV = rospy.Publisher("~debug/ranges_SV",
                                               Image,
                                               queue_size=1,
                                               dt_topic_type=TopicType.DEBUG)
        self.pub_d_ranges_HV = rospy.Publisher("~debug/ranges_HV",
                                               Image,
                                               queue_size=1,
                                               dt_topic_type=TopicType.DEBUG)

        # Subscribers
        self.sub_image = rospy.Subscriber("~image/compressed",
                                          CompressedImage,
                                          self.image_cb,
                                          buff_size=10000000,
                                          queue_size=1)

        self.sub_thresholds = rospy.Subscriber("~thresholds",
                                               AntiInstagramThresholds,
                                               self.thresholds_cb,
                                               queue_size=1)
Exemple #15
0
class LineDetectorNode(DTROS):
    """
    The ``LineDetectorNode`` is responsible for detecting the line white, yellow and red line segment in an image and
    is used for lane localization.

    Upon receiving an image, this node reduces its resolution, cuts off the top part so that only the
    road-containing part of the image is left, extracts the white, red, and yellow segments and publishes them.
    The main functionality of this node is implemented in the :py:class:`line_detector.LineDetector` class.

    The performance of this node can be very sensitive to its configuration parameters. Therefore, it also provides a
    number of debug topics which can be used for fine-tuning these parameters. These configuration parameters can be
    changed dynamically while the node is running via ``rosparam set`` commands.

    Args:
        node_name (:obj:`str`): a unique, descriptive name for the node that ROS will use

    Configuration:
        ~line_detector_parameters (:obj:`dict`): A dictionary with the parameters for the detector. The full list can be found in :py:class:`line_detector.LineDetector`.
        ~colors (:obj:`dict`): A dictionary of colors and color ranges to be detected in the image. The keys (color names) should match the ones in the Segment message definition, otherwise an exception will be thrown! See the ``config`` directory in the node code for the default ranges.
        ~img_size (:obj:`list` of ``int``): The desired downsized resolution of the image. Lower resolution would result in faster detection but lower performance, default is ``[120,160]``
        ~top_cutoff (:obj:`int`): The number of rows to be removed from the top of the image _after_ resizing, default is 40

    Subscriber:
        ~camera_node/image/compressed (:obj:`sensor_msgs.msg.CompressedImage`): The camera images
        ~anti_instagram_node/thresholds(:obj:`duckietown_msgs.msg.AntiInstagramThresholds`): The thresholds to do color correction

    Publishers:
        ~segment_list (:obj:`duckietown_msgs.msg.SegmentList`): A list of the detected segments. Each segment is an :obj:`duckietown_msgs.msg.Segment` message
        ~debug/segments/compressed (:obj:`sensor_msgs.msg.CompressedImage`): Debug topic with the segments drawn on the input image
        ~debug/edges/compressed (:obj:`sensor_msgs.msg.CompressedImage`): Debug topic with the Canny edges drawn on the input image
        ~debug/maps/compressed (:obj:`sensor_msgs.msg.CompressedImage`): Debug topic with the regions falling in each color range drawn on the input image
        ~debug/ranges_HS (:obj:`sensor_msgs.msg.Image`): Debug topic with a histogram of the colors in the input image and the color ranges, Hue-Saturation projection
        ~debug/ranges_SV (:obj:`sensor_msgs.msg.Image`): Debug topic with a histogram of the colors in the input image and the color ranges, Saturation-Value projection
        ~debug/ranges_HV (:obj:`sensor_msgs.msg.Image`): Debug topic with a histogram of the colors in the input image and the color ranges, Hue-Value projection

    """
    def __init__(self, node_name):
        # Initialize the DTROS parent class
        super(LineDetectorNode, self).__init__(node_name=node_name,
                                               node_type=NodeType.PERCEPTION)

        # Define parameters
        self._line_detector_parameters = rospy.get_param(
            "~line_detector_parameters", None)
        self._colors = rospy.get_param("~colors", None)
        self._img_size = rospy.get_param("~img_size", None)
        self._top_cutoff = rospy.get_param("~top_cutoff", None)

        self.bridge = CvBridge()

        # The thresholds to be used for AntiInstagram color correction
        self.ai_thresholds_received = False
        self.anti_instagram_thresholds = dict()
        self.ai = AntiInstagram()

        # This holds the colormaps for the debug/ranges images after they are computed once
        self.colormaps = dict()

        # Create a new LineDetector object with the parameters from the Parameter Server / config file
        self.detector = LineDetector(**self._line_detector_parameters)
        # Update the color ranges objects
        self.color_ranges = {
            color: ColorRange.fromDict(d)
            for color, d in list(self._colors.items())
        }

        # Publishers
        self.pub_lines = rospy.Publisher("~segment_list",
                                         SegmentList,
                                         queue_size=1,
                                         dt_topic_type=TopicType.PERCEPTION)
        self.pub_d_segments = rospy.Publisher("~debug/segments/compressed",
                                              CompressedImage,
                                              queue_size=1,
                                              dt_topic_type=TopicType.DEBUG)
        self.pub_d_edges = rospy.Publisher("~debug/edges/compressed",
                                           CompressedImage,
                                           queue_size=1,
                                           dt_topic_type=TopicType.DEBUG)
        self.pub_d_maps = rospy.Publisher("~debug/maps/compressed",
                                          CompressedImage,
                                          queue_size=1,
                                          dt_topic_type=TopicType.DEBUG)
        # these are not compressed because compression adds undesired blur
        self.pub_d_ranges_HS = rospy.Publisher("~debug/ranges_HS",
                                               Image,
                                               queue_size=1,
                                               dt_topic_type=TopicType.DEBUG)
        self.pub_d_ranges_SV = rospy.Publisher("~debug/ranges_SV",
                                               Image,
                                               queue_size=1,
                                               dt_topic_type=TopicType.DEBUG)
        self.pub_d_ranges_HV = rospy.Publisher("~debug/ranges_HV",
                                               Image,
                                               queue_size=1,
                                               dt_topic_type=TopicType.DEBUG)

        # Subscribers
        self.sub_image = rospy.Subscriber("~image/compressed",
                                          CompressedImage,
                                          self.image_cb,
                                          buff_size=10000000,
                                          queue_size=1)

        self.sub_thresholds = rospy.Subscriber("~thresholds",
                                               AntiInstagramThresholds,
                                               self.thresholds_cb,
                                               queue_size=1)

    def thresholds_cb(self, thresh_msg):
        self.anti_instagram_thresholds["lower"] = thresh_msg.low
        self.anti_instagram_thresholds["higher"] = thresh_msg.high
        self.ai_thresholds_received = True

    def image_cb(self, image_msg):
        """
        Processes the incoming image messages.

        Performs the following steps for each incoming image:

        #. Performs color correction
        #. Resizes the image to the ``~img_size`` resolution
        #. Removes the top ``~top_cutoff`` rows in order to remove the part of the image that doesn't include the road
        #. Extracts the line segments in the image using :py:class:`line_detector.LineDetector`
        #. Converts the coordinates of detected segments to normalized ones
        #. Creates and publishes the resultant :obj:`duckietown_msgs.msg.SegmentList` message
        #. Creates and publishes debug images if there is a subscriber to the respective topics

        Args:
            image_msg (:obj:`sensor_msgs.msg.CompressedImage`): The receive image message

        """

        # Decode from compressed image with OpenCV
        try:
            image = self.bridge.compressed_imgmsg_to_cv2(image_msg)
        except ValueError as e:
            self.logerr(f"Could not decode image: {e}")
            return

        # Perform color correction
        if self.ai_thresholds_received:
            image = self.ai.apply_color_balance(
                self.anti_instagram_thresholds["lower"],
                self.anti_instagram_thresholds["higher"], image)

        # Resize the image to the desired dimensions
        height_original, width_original = image.shape[0:2]
        img_size = (self._img_size[1], self._img_size[0])
        if img_size[0] != width_original or img_size[1] != height_original:
            image = cv2.resize(image,
                               img_size,
                               interpolation=cv2.INTER_NEAREST)
        image = image[self._top_cutoff:, :, :]

        # Extract the line segments for every color
        self.detector.setImage(image)
        detections = {
            color: self.detector.detectLines(ranges)
            for color, ranges in list(self.color_ranges.items())
        }

        # Construct a SegmentList
        segment_list = SegmentList()
        segment_list.header.stamp = image_msg.header.stamp

        # Remove the offset in coordinates coming from the removing of the top part and
        arr_cutoff = np.array([0, self._top_cutoff, 0, self._top_cutoff])
        arr_ratio = np.array([
            1.0 / self._img_size[1],
            1.0 / self._img_size[0],
            1.0 / self._img_size[1],
            1.0 / self._img_size[0],
        ])

        # Fill in the segment_list with all the detected segments
        for color, det in list(detections.items()):
            # Get the ID for the color from the Segment msg definition
            # Throw and exception otherwise
            if len(det.lines) > 0 and len(det.normals) > 0:
                try:
                    color_id = getattr(Segment, color)
                    lines_normalized = (det.lines + arr_cutoff) * arr_ratio
                    segment_list.segments.extend(
                        self._to_segment_msg(lines_normalized, det.normals,
                                             color_id))
                except AttributeError:
                    self.logerr(
                        f"Color name {color} is not defined in the Segment message"
                    )

        # Publish the message
        self.pub_lines.publish(segment_list)

        # If there are any subscribers to the debug topics, generate a debug image and publish it
        if self.pub_d_segments.get_num_connections() > 0:
            colorrange_detections = {
                self.color_ranges[c]: det
                for c, det in list(detections.items())
            }
            debug_img = plotSegments(image, colorrange_detections)
            debug_image_msg = self.bridge.cv2_to_compressed_imgmsg(debug_img)
            debug_image_msg.header = image_msg.header
            self.pub_d_segments.publish(debug_image_msg)

        if self.pub_d_edges.get_num_connections() > 0:
            debug_image_msg = self.bridge.cv2_to_compressed_imgmsg(
                self.detector.canny_edges)
            debug_image_msg.header = image_msg.header
            self.pub_d_edges.publish(debug_image_msg)

        if self.pub_d_maps.get_num_connections() > 0:
            colorrange_detections = {
                self.color_ranges[c]: det
                for c, det in list(detections.items())
            }
            debug_img = plotMaps(image, colorrange_detections)
            debug_image_msg = self.bridge.cv2_to_compressed_imgmsg(debug_img)
            debug_image_msg.header = image_msg.header
            self.pub_d_maps.publish(debug_image_msg)

        for channels in ["HS", "SV", "HV"]:
            publisher = getattr(self, f"pub_d_ranges_{channels}")
            if publisher.get_num_connections() > 0:
                debug_img = self._plot_ranges_histogram(channels)
                debug_image_msg = self.bridge.cv2_to_imgmsg(debug_img,
                                                            encoding="bgr8")
                debug_image_msg.header = image_msg.header
                publisher.publish(debug_image_msg)

    @staticmethod
    def _to_segment_msg(lines, normals, color):
        """
        Converts line detections to a list of Segment messages.

        Converts the resultant line segments and normals from the line detection to a list of Segment messages.

        Args:
            lines (:obj:`numpy array`): An ``Nx4`` array where each row represents a line.
            normals (:obj:`numpy array`): An ``Nx2`` array where each row represents the normal of a line.
            color (:obj:`str`): Color name string, should be one of the pre-defined in the Segment message definition.

        Returns:
            :obj:`list` of :obj:`duckietown_msgs.msg.Segment`: List of Segment messages

        """
        segment_msg_list = []
        for x1, y1, x2, y2, norm_x, norm_y in np.hstack((lines, normals)):
            segment = Segment()
            segment.color = color
            segment.pixels_normalized[0].x = x1
            segment.pixels_normalized[0].y = y1
            segment.pixels_normalized[1].x = x2
            segment.pixels_normalized[1].y = y2
            segment.normal.x = norm_x
            segment.normal.y = norm_y
            segment_msg_list.append(segment)
        return segment_msg_list

    def _plot_ranges_histogram(self, channels):
        """Utility method for plotting color histograms and color ranges.

        Args:
            channels (:obj:`str`): The desired two channels, should be one of ``['HS','SV','HV']``

        Returns:
            :obj:`numpy array`: The resultant plot image

        """
        channel_to_axis = {"H": 0, "S": 1, "V": 2}
        axis_to_range = {0: 180, 1: 256, 2: 256}

        # Get which is the third channel that will not be shown in this plot
        missing_channel = "HSV".replace(channels[0],
                                        "").replace(channels[1], "")

        hsv_im = self.detector.hsv
        # Get the pixels as a list (flatten the horizontal and vertical dimensions)
        hsv_im = hsv_im.reshape((-1, 3))

        channel_idx = [
            channel_to_axis[channels[0]], channel_to_axis[channels[1]]
        ]

        # Get only the relevant channels
        x_bins = np.arange(0, axis_to_range[channel_idx[1]] + 1, 2)
        y_bins = np.arange(0, axis_to_range[channel_idx[0]] + 1, 2)
        h, _, _ = np.histogram2d(x=hsv_im[:, channel_idx[0]],
                                 y=hsv_im[:, channel_idx[1]],
                                 bins=[y_bins, x_bins])
        # Log-normalized histogram
        np.log(h, out=h, where=(h != 0))
        h = (255 * h / np.max(h)).astype(np.uint8)

        # Make a color map, for the missing channel, just take the middle of the range
        if channels not in self.colormaps:
            colormap_1, colormap_0 = np.meshgrid(x_bins[:-1], y_bins[:-1])
            colormap_2 = np.ones_like(colormap_0) * (
                axis_to_range[channel_to_axis[missing_channel]] / 2)

            channel_to_map = {
                channels[0]: colormap_0,
                channels[1]: colormap_1,
                missing_channel: colormap_2
            }

            self.colormaps[channels] = np.stack([
                channel_to_map["H"], channel_to_map["S"], channel_to_map["V"]
            ],
                                                axis=-1).astype(np.uint8)
            self.colormaps[channels] = cv2.cvtColor(self.colormaps[channels],
                                                    cv2.COLOR_HSV2BGR)

        # resulting histogram image as a blend of the two images
        im = cv2.cvtColor(h[:, :, None], cv2.COLOR_GRAY2BGR)
        im = cv2.addWeighted(im, 0.5, self.colormaps[channels], 1 - 0.5, 0.0)

        # now plot the color ranges on top
        for _, color_range in list(self.color_ranges.items()):
            # convert HSV color to BGR
            c = color_range.representative
            c = np.uint8([[[c[0], c[1], c[2]]]])
            color = cv2.cvtColor(c, cv2.COLOR_HSV2BGR).squeeze().astype(int)
            for i in range(len(color_range.low)):
                cv2.rectangle(
                    im,
                    pt1=(
                        (color_range.high[i, channel_idx[1]] / 2).astype(
                            np.uint8),
                        (color_range.high[i, channel_idx[0]] / 2).astype(
                            np.uint8),
                    ),
                    pt2=(
                        (color_range.low[i, channel_idx[1]] / 2).astype(
                            np.uint8),
                        (color_range.low[i, channel_idx[0]] / 2).astype(
                            np.uint8),
                    ),
                    color=color,
                    lineType=cv2.LINE_4,
                )
        # ---
        return im
Exemple #16
0
 def __init__(self):
     self.line_detector = LineDetector()
     self.thresh_image = None
class LineDetectorNode(object):
    def __init__(self):
        self.node_name = "Line Detector"
 
        self.image_size = rospy.get_param('~img_size')
        self.top_cutoff = rospy.get_param('~top_cutoff')
        
        self.bridge = CvBridge()
        self.detector = LineDetector()

        self.detector.hsv_white1 = np.array(rospy.get_param('~hsv_white1'))
        self.detector.hsv_white2 = np.array(rospy.get_param('~hsv_white2'))
        self.detector.hsv_yellow1 = np.array(rospy.get_param('~hsv_yellow1'))
        self.detector.hsv_yellow2 = np.array(rospy.get_param('~hsv_yellow2'))
        self.detector.hsv_red1 = np.array(rospy.get_param('~hsv_red1'))
        self.detector.hsv_red2 = np.array(rospy.get_param('~hsv_red2'))
        self.detector.hsv_red3 = np.array(rospy.get_param('~hsv_red3'))
        self.detector.hsv_red4 = np.array(rospy.get_param('~hsv_red4'))

        self.detector.dilation_kernel_size = rospy.get_param('~dilation_kernel_size')
        self.detector.canny_thresholds = rospy.get_param('~canny_thresholds')
        self.detector.hough_min_line_length = rospy.get_param('~hough_min_line_length')
        self.detector.hough_max_line_gap    = rospy.get_param('~hough_max_line_gap')
        self.detector.hough_threshold = rospy.get_param('~hough_threshold')
       
        self.sub_image = rospy.Subscriber("~image", CompressedImage, self.processImage)
        self.pub_lines = rospy.Publisher("~segment_list", SegmentList, queue_size=1)
        self.pub_image = rospy.Publisher("~image_with_lines", Image, queue_size=1)
        
        self.timer_param = rospy.Timer(rospy.Duration.from_sec(3.0),self.cbParamUpdate)

    def cbParamUpdate(self,event):
        print '====Parameters Updated====' 
        
        # S of white
        self.detector.hsv_white2 = (self.detector.hsv_white2)%256 + np.array([0, 5, 0])
        print 'HSV_white1: ' + str(self.detector.hsv_white1)
        print 'HSV_white2: ' + str(self.detector.hsv_white2)
 
        """ 
        # S of white
        self.detector.hsv_white2 = (self.detector.hsv_white2)%256 + np.array([0, 5, 0])
        print 'HSV_white1: ' + str(self.detector.hsv_white1)
        print 'HSV_white2: ' + str(self.detector.hsv_white2)
        
        # V of white
        self.detector.hsv_white1 = (self.detector.hsv_white1)%256 + np.array([0, 0, 5])
        print 'HSV_white1: ' + str(self.detector.hsv_white1)
        print 'HSV_white2: ' + str(self.detector.hsv_white2)
 
        # H of yellow
        self.detector.hsv_yellow1 = (self.detector.hsv_yellow1)%256 + np.array([1, 0, 0])
        print 'HSV_yellow1: ' + str(self.detector.hsv_yellow1)
        print 'HSV_yellow2: ' + str(self.detector.hsv_yellow2)

        self.detector.hsv_yellow2 = (self.detector.hsv_yellow2)%256 - np.array([1, 0, 0])
        print 'HSV_yellow1: ' + str(self.detector.hsv_yellow1)
        print 'HSV_yellow2: ' + str(self.detector.hsv_yellow2)
 
        # S of yellow
        self.detector.hsv_yellow1 = (self.detector.hsv_yellow1)%256 + np.array([0, 5, 0])
        print 'HSV_yellow1: ' + str(self.detector.hsv_yellow1)
        print 'HSV_yellow2: ' + str(self.detector.hsv_yellow2)
 
        # V of yellow
        self.detector.hsv_yellow1 = (self.detector.hsv_yellow1)%256 + np.array([0, 0, 5])
        print 'HSV_yellow1: ' + str(self.detector.hsv_yellow1)
        print 'HSV_yellow2: ' + str(self.detector.hsv_yellow2)
        
        # Lower threshold of Canny edge
        self.detector.canny_thresholds = self.detector.canny_thresholds%256 + np.array([5, 0])
        print 'Canny_thresholds: ' + str(self.detector.canny_thresholds)

        # Higher threshold of Canny edge
        self.detector.canny_thresholds = self.detector.canny_thresholds%256 + np.array([0, 5])
        print 'Canny_thresholds: ' + str(self.detector.canny_thresholds)

        # Minimum line length
        self.detector.hough_min_line_length = self.detector.hough_min_line_length%10 + 1  
        print 'Minimum_line_length: ' + str(self.detector.hough_min_line_length)
        
        # Maximum line gap
        self.detector.hough_max_line_gap = self.detector.detector.hough_max_line_gap%10 + 1  
        print 'Maximum_line_gap: ' + str(self.detector.detector.hough_max_line_gap)
        
        # Threshold of Hough transform
        self.detector.hough_threshold = (self.detector.hough_threshold)%50 + 5
        print 'Hough_threshold: ' + str(self.detector.hough_threshold)
        """

    def processImage(self,image_msg):
        image_cv = cv2.imdecode(np.fromstring(image_msg.data, np.uint8), cv2.CV_LOAD_IMAGE_COLOR)
        #image_cv = self.bridge.imgmsg_to_cv2(image_msg, "bgr8")
        
        # Resize and crop image
        hei_original = image_cv.shape[0]
        wid_original = image_cv.shape[1]
        if self.image_size[0]!=hei_original or self.image_size[1]!=wid_original:
            image_cv = cv2.resize(image_cv, (self.image_size[1], self.image_size[0]))
        image_cv = image_cv[self.top_cutoff:,:,:]

        # Set the image to be detected
        self.detector.setImage(image_cv)
	
        # Detect lines and normals
        lines_white, normals_white = self.detector.detectLines('white')
        lines_yellow, normals_yellow = self.detector.detectLines('yellow')
        lines_red, normals_red = self.detector.detectLines('red')

        # Draw lines and normals
        self.detector.drawLines(lines_white, (0,0,0))
        self.detector.drawLines(lines_yellow, (255,0,0))
        self.detector.drawLines(lines_red, (0,255,0))
        #self.detector.drawNormals(lines_white, normals_white)
        #self.detector.drawNormals(lines_yellow, normals_yellow)
        #self.detector.drawNormals(lines_red, normals_red)

        # Convert to normalized pixel coordinates, and add segments to segmentList
        segmentList = SegmentList()
        arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array((1./self.image_size[1], 1./self.image_size[0], 1./self.image_size[1], 1./self.image_size[0]))
  
        if len(lines_white)>0:
            #rospy.loginfo("[LineDetectorNode] number of white lines = %s" %(len(lines_white)))
            lines_normalized_white = ((lines_white + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(self.toSegmentMsg(lines_normalized_white, normals_white, Segment.WHITE))
        if len(lines_yellow)>0:
            #rospy.loginfo("[LineDetectorNode] number of yellow lines = %s" %(len(lines_yellow)))
            lines_normalized_yellow = ((lines_yellow + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(self.toSegmentMsg(lines_normalized_yellow, normals_yellow, Segment.YELLOW))
        if len(lines_red)>0:
            #rospy.loginfo("[LineDetectorNode] number of red lines = %s" %(len(lines_red)))
            lines_normalized_red = ((lines_red + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(self.toSegmentMsg(lines_normalized_red, normals_red, Segment.RED))
        
        # Publish segmentList
        self.pub_lines.publish(segmentList)
         
        # Publish the frame with lines
        image_msg = self.bridge.cv2_to_imgmsg(self.detector.getImage(), "bgr8")
        self.pub_image.publish(image_msg)

    def onShutdown(self):
            rospy.loginfo("[LineDetectorNode] Shutdown.")
            
    def toSegmentMsg(self,  lines, normals, color):
        
        segmentMsgList = []
        for x1,y1,x2,y2,norm_x,norm_y in np.hstack((lines,normals)):
            segment = Segment()
            segment.color = color
            segment.pixels_normalized[0].x = x1
            segment.pixels_normalized[0].y = y1
            segment.pixels_normalized[1].x = x2
            segment.pixels_normalized[1].y = y2
            segment.normal.x = norm_x
            segment.normal.y = norm_y
             
            segmentMsgList.append(segment)
        return segmentMsgList
Exemple #18
0
class LineDetectorNode(object):
    def __init__(self):
        self.node_name = "Line Detector"
        
        self.hei_image = self.setupParam("~hei_image", 240)
        self.wid_image = self.setupParam("~wid_image", 320)
        self.top_cutoff  = self.setupParam("~top_cutoff", 90)
        
        self.bridge = CvBridge()
        self.detector = LineDetector()
        
        self.sub_image = rospy.Subscriber("~image", CompressedImage, self.processImage)
        self.pub_lines = rospy.Publisher("~segment_list", SegmentList, queue_size=1)
        self.pub_image = rospy.Publisher("~image_with_lines", Image, queue_size=1)

    def setupParam(self, param_name, default_value):
        value = rospy.get_param(param_name, default_value)
        rospy.set_param(param_name, value) # Write to parameter server for transparancy
        rospy.loginfo("[%s] %s = %s " %(self.node_name,param_name,value))
        return value

    def processImage(self,image_msg):
        image_cv = cv2.imdecode(np.fromstring(image_msg.data, np.uint8), cv2.CV_LOAD_IMAGE_COLOR)
        #image_cv = self.bridge.imgmsg_to_cv2(image_msg, "bgr8")
        
        # Resize and crop image
        hei_original = image_cv.shape[0]
        wid_original = image_cv.shape[1]
        if self.hei_image!=hei_original or self.wid_image!=wid_original:
            image_cv = cv2.resize(image_cv, (self.wid_image, self.hei_image))
        image_cv = image_cv[self.top_cutoff:,:,:]

        # Set the image to be detected
        self.detector.setImage(image_cv)
	
        # Detect lines and normals
        lines_white, normals_white = self.detector.detectLines('white')
        lines_yellow, normals_yellow = self.detector.detectLines('yellow')
        lines_red, normals_red = self.detector.detectLines('red')

        # Draw lines and normals
        self.detector.drawLines(lines_white, (0,0,0))
        self.detector.drawLines(lines_yellow, (255,0,0))
        self.detector.drawLines(lines_red, (0,255,0))
        #self.detector.drawNormals(lines_white, normals_white)
        #self.detector.drawNormals(lines_yellow, normals_yellow)
        #self.detector.drawNormals(lines_red, normals_red)

        # Convert to position in original resolution, and add segments to segmentList
        segmentList = SegmentList()
        arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array((1.*wid_original/self.wid_image, 1.*hei_original/self.hei_image, 1.*wid_original/self.wid_image, 1.*hei_original/self.hei_image))
  
        if len(lines_white)>0:
            rospy.loginfo("[LineDetectorNode] number of white lines = %s" %(len(lines_white)))
            lines_white = ((lines_white + arr_cutoff) * arr_ratio).astype('int')
            segmentList.segments.extend(self.toSegmentMsg(lines_white, normals_white, Segment.WHITE, wid_original, hei_original))
        if len(lines_yellow)>0:
            rospy.loginfo("[LineDetectorNode] number of yellow lines = %s" %(len(lines_yellow)))
            lines_yellow = ((lines_yellow + arr_cutoff) * arr_ratio).astype('int')
            segmentList.segments.extend(self.toSegmentMsg(lines_yellow, normals_yellow, Segment.YELLOW, wid_original, hei_original))
        if len(lines_red)>0:
            rospy.loginfo("[LineDetectorNode] number of red lines = %s" %(len(lines_red)))
            lines_red = ((lines_red + arr_cutoff) * arr_ratio).astype('int')
            segmentList.segments.extend(self.toSegmentMsg(lines_red, normals_red, Segment.RED, wid_original, hei_original))
        
        # Publish segmentList
        self.pub_lines.publish(segmentList)
         
        # Publish the frame with lines
        image_msg = self.bridge.cv2_to_imgmsg(self.detector.getImage(), "bgr8")
        self.pub_image.publish(image_msg)

    def onShutdown(self):
            rospy.loginfo("[LineDetectorNode] Shutdown.")
            
    def toSegmentMsg(self,  lines, normals, color, wid, hei):
        
        segmentMsgList = []
        for u1,v1,u2,v2,norm_u,norm_v in np.hstack((lines,normals)):
            segment = Segment()
            segment.color = color
            segment.pixels[0].u = int(u1)
            segment.pixels[0].v = int(v1)
            segment.pixels[1].u = int(u2)
            segment.pixels[1].v = int(v2)
            segment.pixels_normalized[0].x = u1/wid
            segment.pixels_normalized[0].y = v1/hei
            segment.pixels_normalized[1].x = u2/wid
            segment.pixels_normalized[1].y = v2/hei
            segment.normal.x = norm_u
            segment.normal.y = norm_v
             
            segmentMsgList.append(segment)
        return segmentMsgList
Exemple #19
0
        if self.cv_image is None:
            print('image msg received')
        self.cv_image = self.bridge.imgmsg_to_cv2(image_msg,
                                                  desired_encoding="bgr8")

    def getImage(self):
        return self.cv_image


# print(__name__)
if __name__ == '__main__':
    import cv2
    import time
    from line_detector import LineDetector

    detector = LineDetector()

    subscriber = RosImageSubscriber()
    count = 1
    start = time.time()
    while True:
        # image_frame = subscriber.get_frame(GetFrameRequest())
        # cv_image = subscriber.bridge.imgmsg_to_cv2(image_frame.image, desired_encoding="bgr8")
        cv_image = subscriber.cv_image
        rect_image = cv_image

        detection, image_color = detector.detect_hsv(rect_image,
                                                     detector.colors[u'黄色'])
        count = count + 1
        if (count > 100):
            end = time.time()
class LineDetectorNode(object):
    def __init__(self):
        self.node_name = "Line Detector"

        self.image_size = rospy.get_param('~img_size')
        self.top_cutoff = rospy.get_param('~top_cutoff')

        self.bridge = CvBridge()
        self.detector = LineDetector()

        self.detector.hsv_white1 = np.array(rospy.get_param('~hsv_white1'))
        self.detector.hsv_white2 = np.array(rospy.get_param('~hsv_white2'))
        self.detector.hsv_yellow1 = np.array(rospy.get_param('~hsv_yellow1'))
        self.detector.hsv_yellow2 = np.array(rospy.get_param('~hsv_yellow2'))
        self.detector.hsv_red1 = np.array(rospy.get_param('~hsv_red1'))
        self.detector.hsv_red2 = np.array(rospy.get_param('~hsv_red2'))
        self.detector.hsv_red3 = np.array(rospy.get_param('~hsv_red3'))
        self.detector.hsv_red4 = np.array(rospy.get_param('~hsv_red4'))

        self.detector.dilation_kernel_size = rospy.get_param(
            '~dilation_kernel_size')
        self.detector.canny_thresholds = rospy.get_param('~canny_thresholds')
        self.detector.hough_min_line_length = rospy.get_param(
            '~hough_min_line_length')
        self.detector.hough_max_line_gap = rospy.get_param(
            '~hough_max_line_gap')
        self.detector.hough_threshold = rospy.get_param('~hough_threshold')

        self.sub_image = rospy.Subscriber("~image", CompressedImage,
                                          self.processImage)
        self.pub_lines = rospy.Publisher("~segment_list",
                                         SegmentList,
                                         queue_size=1)
        self.pub_image = rospy.Publisher("~image_with_lines",
                                         Image,
                                         queue_size=1)

        self.timer_param = rospy.Timer(rospy.Duration.from_sec(3.0),
                                       self.cbParamUpdate)

    def cbParamUpdate(self, event):
        print '====Parameters Updated===='
        """ 
        # S of white
        self.detector.hsv_white2 = (self.detector.hsv_white2)%256 + np.array([0, 5, 0])
        print 'HSV_white1: ' + str(self.detector.hsv_white1)
        print 'HSV_white2: ' + str(self.detector.hsv_white2)
        
        # V of white
        self.detector.hsv_white1 = (self.detector.hsv_white1)%256 - np.array([0, 0, 10])
        print 'HSV_white1: ' + str(self.detector.hsv_white1)
        print 'HSV_white2: ' + str(self.detector.hsv_white2)
 
        # H of yellow
        self.detector.hsv_yellow1 = (self.detector.hsv_yellow1)%256 + np.array([1, 0, 0])
        print 'HSV_yellow1: ' + str(self.detector.hsv_yellow1)
        print 'HSV_yellow2: ' + str(self.detector.hsv_yellow2)

        self.detector.hsv_yellow2 = (self.detector.hsv_yellow2)%256 - np.array([1, 0, 0])
        print 'HSV_yellow1: ' + str(self.detector.hsv_yellow1)
        print 'HSV_yellow2: ' + str(self.detector.hsv_yellow2)
 
        # S of yellow
        self.detector.hsv_yellow1 = (self.detector.hsv_yellow1)%256 + np.array([0, 5, 0])
        print 'HSV_yellow1: ' + str(self.detector.hsv_yellow1)
        print 'HSV_yellow2: ' + str(self.detector.hsv_yellow2)
 
        # V of yellow
        self.detector.hsv_yellow1 = (self.detector.hsv_yellow1)%256 + np.array([0, 0, 5])
        print 'HSV_yellow1: ' + str(self.detector.hsv_yellow1)
        print 'HSV_yellow2: ' + str(self.detector.hsv_yellow2)
        
        # Lower threshold of Canny edge
        self.detector.canny_thresholds = np.array(self.detector.canny_thresholds)%256 + np.array([5, 0])
        print 'Canny_thresholds: ' + str(self.detector.canny_thresholds)

        # Higher threshold of Canny edge
        self.detector.canny_thresholds = np.array(self.detector.canny_thresholds)%256 + np.array([0, 5])
        print 'Canny_thresholds: ' + str(self.detector.canny_thresholds)

        # Minimum line length
        self.detector.hough_min_line_length = self.detector.hough_min_line_length%10 + 1  
        print 'Minimum_line_length: ' + str(self.detector.hough_min_line_length)
        
        # Maximum line gap
        self.detector.hough_max_line_gap = self.detector.detector.hough_max_line_gap%10 + 1  
        print 'Maximum_line_gap: ' + str(self.detector.detector.hough_max_line_gap)
        
        # Threshold of Hough transform
        self.detector.hough_threshold = (self.detector.hough_threshold)%50 + 5
        print 'Hough_threshold: ' + str(self.detector.hough_threshold)
        """

    def processImage(self, image_msg):
        image_cv = cv2.imdecode(np.fromstring(image_msg.data, np.uint8),
                                cv2.CV_LOAD_IMAGE_COLOR)
        #image_cv = self.bridge.imgmsg_to_cv2(image_msg, "bgr8")

        # Resize and crop image
        hei_original = image_cv.shape[0]
        wid_original = image_cv.shape[1]
        if self.image_size[0] != hei_original or self.image_size[
                1] != wid_original:
            image_cv = cv2.resize(image_cv,
                                  (self.image_size[1], self.image_size[0]))
        image_cv = image_cv[self.top_cutoff:, :, :]

        # Set the image to be detected
        self.detector.setImage(image_cv)

        # Detect lines and normals
        lines_white, normals_white = self.detector.detectLines('white')
        lines_yellow, normals_yellow = self.detector.detectLines('yellow')
        lines_red, normals_red = self.detector.detectLines('red')

        # Draw lines and normals
        self.detector.drawLines(lines_white, (0, 0, 0))
        self.detector.drawLines(lines_yellow, (255, 0, 0))
        self.detector.drawLines(lines_red, (0, 255, 0))
        #self.detector.drawNormals(lines_white, normals_white)
        #self.detector.drawNormals(lines_yellow, normals_yellow)
        #self.detector.drawNormals(lines_red, normals_red)

        # Convert to normalized pixel coordinates, and add segments to segmentList
        segmentList = SegmentList()
        arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array(
            (1. / self.image_size[1], 1. / self.image_size[0],
             1. / self.image_size[1], 1. / self.image_size[0]))

        if len(lines_white) > 0:
            #rospy.loginfo("[LineDetectorNode] number of white lines = %s" %(len(lines_white)))
            lines_normalized_white = ((lines_white + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_white, normals_white,
                                  Segment.WHITE))
        if len(lines_yellow) > 0:
            #rospy.loginfo("[LineDetectorNode] number of yellow lines = %s" %(len(lines_yellow)))
            lines_normalized_yellow = ((lines_yellow + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_yellow, normals_yellow,
                                  Segment.YELLOW))
        if len(lines_red) > 0:
            #rospy.loginfo("[LineDetectorNode] number of red lines = %s" %(len(lines_red)))
            lines_normalized_red = ((lines_red + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_red, normals_red,
                                  Segment.RED))

        # Publish segmentList
        self.pub_lines.publish(segmentList)

        # Publish the frame with lines
        image_msg = self.bridge.cv2_to_imgmsg(self.detector.getImage(), "bgr8")
        self.pub_image.publish(image_msg)

    def onShutdown(self):
        rospy.loginfo("[LineDetectorNode] Shutdown.")

    def toSegmentMsg(self, lines, normals, color):

        segmentMsgList = []
        for x1, y1, x2, y2, norm_x, norm_y in np.hstack((lines, normals)):
            segment = Segment()
            segment.color = color
            segment.pixels_normalized[0].x = x1
            segment.pixels_normalized[0].y = y1
            segment.pixels_normalized[1].x = x2
            segment.pixels_normalized[1].y = y2
            segment.normal.x = norm_x
            segment.normal.y = norm_y

            segmentMsgList.append(segment)
        return segmentMsgList
Exemple #21
0
class GridDetector:
    def __init__(self):
        self.line_detector = LineDetector()
        self.thresh_image = None

    def find_grid(self, image, show_img=True):
        self.thresh_image = self.preprocess_image(image)
        filtered_image = self.filter_small_contours(self.thresh_image)

        lines = self.line_detector.find_lines(filtered_image)
        if lines is None:
            return None

        points_finder = PointsFinder(filtered_image, lines)
        points = points_finder.get_points()
        lines_with_points = points_finder.get_lines_with_points()

        corner_finder = CornerFinder()
        final_corners = corner_finder.estimate_corners(lines_with_points)

        if show_img:
            colored_image = self.draw_image(
                filtered_image, lines, points,
                corner_finder.get_middle_lines_ids(),
                corner_finder.get_border_lines_ids())
            cv2.imshow('Line & points', colored_image)

        return final_corners

    def draw_image(self, thresh, lines, points, middle_lines, border_lines):
        colored_image = cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR)
        self.draw_lines(colored_image, lines, middle_lines, border_lines)
        self.draw_points(colored_image, points)

        return colored_image

    @staticmethod
    def preprocess_image(raw_image):
        gray = cv2.cvtColor(raw_image, cv2.COLOR_BGR2GRAY)
        return cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                     cv2.THRESH_BINARY, 57, 5)

    @staticmethod
    def filter_small_contours(thresh):
        thresh = np.copy(thresh)
        cnts = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0] if len(cnts) == 2 else cnts[1]
        for c in cnts:
            area = cv2.contourArea(c)
            if area < 1500:
                cv2.drawContours(thresh, [c], -1, 255, -1)

        return 255 - thresh

    @staticmethod
    def draw_lines(colored_image, lines, middle_lines_id, border_lines_id):
        line_id = 0
        for line in lines:
            rho = line[0][0]
            theta = line[0][1]
            a = np.cos(theta)
            b = np.sin(theta)
            x0 = a * rho
            y0 = b * rho
            pt1 = (int(x0 + 1000 * (-b)), int(y0 + 1000 * a))
            pt2 = (int(x0 - 1000 * (-b)), int(y0 - 1000 * a))
            if line_id in middle_lines_id:
                cv2.line(colored_image, pt1, pt2, (255, 0, 255), 1,
                         cv2.LINE_AA)
            elif line_id in border_lines_id:
                cv2.line(colored_image, pt1, pt2, (0, 255, 0), 1, cv2.LINE_AA)
            else:
                cv2.line(colored_image, pt1, pt2, (0, 0, 255), 1, cv2.LINE_AA)
            line_id += 1

    @staticmethod
    def draw_points(img, points):
        for ps, t, l1, l2 in points:
            color = (0, 255, 0)
            if t == PointsFinder.INTERSECTION_CORNER:
                color = (0, 255, 255)
            elif t == PointsFinder.INTERSECTION_CROSS:
                color = (255, 0, 255)

            cv2.circle(img, ps, 5, color, 2)