コード例 #1
0
class LineDetectorNode(object):
    def __init__(self):
        self.node_name = "Line Detector"

        self.image_size = rospy.get_param('~img_size')
        self.top_cutoff = rospy.get_param('~top_cutoff')

        self.bridge = CvBridge()
        self.detector = LineDetector()

        self.detector.hsv_white1 = np.array(rospy.get_param('~hsv_white1'))
        self.detector.hsv_white2 = np.array(rospy.get_param('~hsv_white2'))
        self.detector.hsv_yellow1 = np.array(rospy.get_param('~hsv_yellow1'))
        self.detector.hsv_yellow2 = np.array(rospy.get_param('~hsv_yellow2'))
        self.detector.hsv_red1 = np.array(rospy.get_param('~hsv_red1'))
        self.detector.hsv_red2 = np.array(rospy.get_param('~hsv_red2'))
        self.detector.hsv_red3 = np.array(rospy.get_param('~hsv_red3'))
        self.detector.hsv_red4 = np.array(rospy.get_param('~hsv_red4'))

        self.detector.dilation_kernel_size = rospy.get_param(
            '~dilation_kernel_size')
        self.detector.canny_thresholds = rospy.get_param('~canny_thresholds')
        self.detector.hough_min_line_length = rospy.get_param(
            '~hough_min_line_length')
        self.detector.hough_max_line_gap = rospy.get_param(
            '~hough_max_line_gap')
        self.detector.hough_threshold = rospy.get_param('~hough_threshold')

        self.sub_image = rospy.Subscriber("~image", CompressedImage,
                                          self.processImage)
        self.pub_lines = rospy.Publisher("~segment_list",
                                         SegmentList,
                                         queue_size=1)
        self.pub_image = rospy.Publisher("~image_with_lines",
                                         Image,
                                         queue_size=1)

        self.timer_param = rospy.Timer(rospy.Duration.from_sec(3.0),
                                       self.cbParamUpdate)

    def cbParamUpdate(self, event):
        print '====Parameters Updated===='
        """ 
        # S of white
        self.detector.hsv_white2 = (self.detector.hsv_white2)%256 + np.array([0, 5, 0])
        print 'HSV_white1: ' + str(self.detector.hsv_white1)
        print 'HSV_white2: ' + str(self.detector.hsv_white2)
        
        # V of white
        self.detector.hsv_white1 = (self.detector.hsv_white1)%256 - np.array([0, 0, 10])
        print 'HSV_white1: ' + str(self.detector.hsv_white1)
        print 'HSV_white2: ' + str(self.detector.hsv_white2)
 
        # H of yellow
        self.detector.hsv_yellow1 = (self.detector.hsv_yellow1)%256 + np.array([1, 0, 0])
        print 'HSV_yellow1: ' + str(self.detector.hsv_yellow1)
        print 'HSV_yellow2: ' + str(self.detector.hsv_yellow2)

        self.detector.hsv_yellow2 = (self.detector.hsv_yellow2)%256 - np.array([1, 0, 0])
        print 'HSV_yellow1: ' + str(self.detector.hsv_yellow1)
        print 'HSV_yellow2: ' + str(self.detector.hsv_yellow2)
 
        # S of yellow
        self.detector.hsv_yellow1 = (self.detector.hsv_yellow1)%256 + np.array([0, 5, 0])
        print 'HSV_yellow1: ' + str(self.detector.hsv_yellow1)
        print 'HSV_yellow2: ' + str(self.detector.hsv_yellow2)
 
        # V of yellow
        self.detector.hsv_yellow1 = (self.detector.hsv_yellow1)%256 + np.array([0, 0, 5])
        print 'HSV_yellow1: ' + str(self.detector.hsv_yellow1)
        print 'HSV_yellow2: ' + str(self.detector.hsv_yellow2)
        
        # Lower threshold of Canny edge
        self.detector.canny_thresholds = np.array(self.detector.canny_thresholds)%256 + np.array([5, 0])
        print 'Canny_thresholds: ' + str(self.detector.canny_thresholds)

        # Higher threshold of Canny edge
        self.detector.canny_thresholds = np.array(self.detector.canny_thresholds)%256 + np.array([0, 5])
        print 'Canny_thresholds: ' + str(self.detector.canny_thresholds)

        # Minimum line length
        self.detector.hough_min_line_length = self.detector.hough_min_line_length%10 + 1  
        print 'Minimum_line_length: ' + str(self.detector.hough_min_line_length)
        
        # Maximum line gap
        self.detector.hough_max_line_gap = self.detector.detector.hough_max_line_gap%10 + 1  
        print 'Maximum_line_gap: ' + str(self.detector.detector.hough_max_line_gap)
        
        # Threshold of Hough transform
        self.detector.hough_threshold = (self.detector.hough_threshold)%50 + 5
        print 'Hough_threshold: ' + str(self.detector.hough_threshold)
        """

    def processImage(self, image_msg):
        image_cv = cv2.imdecode(np.fromstring(image_msg.data, np.uint8),
                                cv2.CV_LOAD_IMAGE_COLOR)
        #image_cv = self.bridge.imgmsg_to_cv2(image_msg, "bgr8")

        # Resize and crop image
        hei_original = image_cv.shape[0]
        wid_original = image_cv.shape[1]
        if self.image_size[0] != hei_original or self.image_size[
                1] != wid_original:
            image_cv = cv2.resize(image_cv,
                                  (self.image_size[1], self.image_size[0]))
        image_cv = image_cv[self.top_cutoff:, :, :]

        # Set the image to be detected
        self.detector.setImage(image_cv)

        # Detect lines and normals
        lines_white, normals_white = self.detector.detectLines('white')
        lines_yellow, normals_yellow = self.detector.detectLines('yellow')
        lines_red, normals_red = self.detector.detectLines('red')

        # Draw lines and normals
        self.detector.drawLines(lines_white, (0, 0, 0))
        self.detector.drawLines(lines_yellow, (255, 0, 0))
        self.detector.drawLines(lines_red, (0, 255, 0))
        #self.detector.drawNormals(lines_white, normals_white)
        #self.detector.drawNormals(lines_yellow, normals_yellow)
        #self.detector.drawNormals(lines_red, normals_red)

        # Convert to normalized pixel coordinates, and add segments to segmentList
        segmentList = SegmentList()
        arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array(
            (1. / self.image_size[1], 1. / self.image_size[0],
             1. / self.image_size[1], 1. / self.image_size[0]))

        if len(lines_white) > 0:
            #rospy.loginfo("[LineDetectorNode] number of white lines = %s" %(len(lines_white)))
            lines_normalized_white = ((lines_white + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_white, normals_white,
                                  Segment.WHITE))
        if len(lines_yellow) > 0:
            #rospy.loginfo("[LineDetectorNode] number of yellow lines = %s" %(len(lines_yellow)))
            lines_normalized_yellow = ((lines_yellow + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_yellow, normals_yellow,
                                  Segment.YELLOW))
        if len(lines_red) > 0:
            #rospy.loginfo("[LineDetectorNode] number of red lines = %s" %(len(lines_red)))
            lines_normalized_red = ((lines_red + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_red, normals_red,
                                  Segment.RED))

        # Publish segmentList
        self.pub_lines.publish(segmentList)

        # Publish the frame with lines
        image_msg = self.bridge.cv2_to_imgmsg(self.detector.getImage(), "bgr8")
        self.pub_image.publish(image_msg)

    def onShutdown(self):
        rospy.loginfo("[LineDetectorNode] Shutdown.")

    def toSegmentMsg(self, lines, normals, color):

        segmentMsgList = []
        for x1, y1, x2, y2, norm_x, norm_y in np.hstack((lines, normals)):
            segment = Segment()
            segment.color = color
            segment.pixels_normalized[0].x = x1
            segment.pixels_normalized[0].y = y1
            segment.pixels_normalized[1].x = x2
            segment.pixels_normalized[1].y = y2
            segment.normal.x = norm_x
            segment.normal.y = norm_y

            segmentMsgList.append(segment)
        return segmentMsgList
コード例 #2
0
class LineDetectorNode(object):
    def __init__(self):
        self.node_name = "Line Detector"

        # Thread lock
        self.thread_lock = threading.Lock()

        # Constructor of line detector
        self.bridge = CvBridge()
        self.detector = LineDetector()
        self.wb = WhiteBalance()
        self.flag_wb_ref = False

        # Parameters
        self.flag_wb = rospy.get_param('~white_balance')

        self.image_size = rospy.get_param('~img_size')
        self.top_cutoff = rospy.get_param('~top_cutoff')

        self.detector.hsv_white1 = np.array(rospy.get_param('~hsv_white1'))
        self.detector.hsv_white2 = np.array(rospy.get_param('~hsv_white2'))
        self.detector.hsv_yellow1 = np.array(rospy.get_param('~hsv_yellow1'))
        self.detector.hsv_yellow2 = np.array(rospy.get_param('~hsv_yellow2'))
        self.detector.hsv_red1 = np.array(rospy.get_param('~hsv_red1'))
        self.detector.hsv_red2 = np.array(rospy.get_param('~hsv_red2'))
        self.detector.hsv_red3 = np.array(rospy.get_param('~hsv_red3'))
        self.detector.hsv_red4 = np.array(rospy.get_param('~hsv_red4'))

        self.detector.dilation_kernel_size = rospy.get_param(
            '~dilation_kernel_size')
        self.detector.canny_thresholds = rospy.get_param('~canny_thresholds')
        self.detector.hough_min_line_length = rospy.get_param(
            '~hough_min_line_length')
        self.detector.hough_max_line_gap = rospy.get_param(
            '~hough_max_line_gap')
        self.detector.hough_threshold = rospy.get_param('~hough_threshold')

        # Publishers
        self.pub_lines = rospy.Publisher("~segment_list",
                                         SegmentList,
                                         queue_size=1)
        self.pub_image = rospy.Publisher("~image_with_lines",
                                         Image,
                                         queue_size=1)

        # Verbose option
        self.verbose = rospy.get_param('~verbose')
        if self.verbose:
            self.toc_pre = rospy.get_time()

        # Subscribers
        self.sub_image = rospy.Subscriber("~image",
                                          CompressedImage,
                                          self.cbImage,
                                          queue_size=1)
        rospy.loginfo("[%s] Initialized." % (self.node_name))

    def cbImage(self, image_msg):
        # Start a daemon thread to process the image
        thread = threading.Thread(target=self.processImage, args=(image_msg, ))
        thread.setDaemon(True)
        thread.start()
        # Returns rightaway

    def processImage(self, image_msg):
        if not self.thread_lock.acquire(False):
            # Return immediately if the thread is locked
            return

        # Verbose
        if self.verbose:
            rospy.loginfo(
                "[%s] Latency received = %.3f ms" %
                (self.node_name,
                 (rospy.get_time() - image_msg.header.stamp.to_sec()) *
                 1000.0))

        # time_start = rospy.Time.now()
        # time_start = event.last_real
        # msg_age = time_start - image_msg.header.stamp
        # rospy.loginfo("[LineDetector] image age: %s" %msg_age.to_sec())

        # Decode from compressed image
        # with OpenCV
        image_cv = cv2.imdecode(np.fromstring(image_msg.data, np.uint8),
                                cv2.CV_LOAD_IMAGE_COLOR)

        # with PIL Image
        # image_cv = jpeg.JPEG(np.fromstring(image_msg.data, np.uint8)).decode()

        # with libjpeg-turbo
        # Convert from uncompressed image message
        # image_cv = self.bridge.imgmsg_to_cv2(image_msg, "bgr8")

        # Verbose
        if self.verbose:
            self.tic = rospy.get_time()
            rospy.loginfo(
                "[%s] Latency image decompressed = %.3f ms" %
                (self.node_name,
                 (self.tic - image_msg.header.stamp.to_sec()) * 1000.0))

        # White balancing: set reference image to estimate parameters
        if self.flag_wb and (not self.flag_wb_ref):
            # set reference image to estimate parameters
            self.wb.setRefImg(image_cv)
            rospy.loginfo("[%s] White balance: parameters computed." %
                          (self.node_name))
            print self.wb.norm_bgr
            self.flag_wb_ref = True

        # Resize and crop image
        hei_original = image_cv.shape[0]
        wid_original = image_cv.shape[1]
        if self.image_size[0] != hei_original or self.image_size[
                1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv,
                                  (self.image_size[1], self.image_size[0]),
                                  interpolation=cv2.INTER_NEAREST)
        image_cv = image_cv[self.top_cutoff:, :, :]

        # White balancing
        if self.flag_wb and self.flag_wb_ref:
            self.wb.correctImg(image_cv)

        # Set the image to be detected
        self.detector.setImage(image_cv)

        # Detect lines and normals
        lines_white, normals_white = self.detector.detectLines('white')
        lines_yellow, normals_yellow = self.detector.detectLines('yellow')
        lines_red, normals_red = self.detector.detectLines('red')

        # Draw lines and normals
        self.detector.drawLines(lines_white, (0, 0, 0))
        self.detector.drawLines(lines_yellow, (255, 0, 0))
        self.detector.drawLines(lines_red, (0, 255, 0))
        #self.detector.drawNormals(lines_white, normals_white)
        #self.detector.drawNormals(lines_yellow, normals_yellow)
        #self.detector.drawNormals(lines_red, normals_red)

        # SegmentList constructor
        segmentList = SegmentList()
        segmentList.header.stamp = image_msg.header.stamp

        # Convert to normalized pixel coordinates, and add segments to segmentList
        arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array(
            (1. / self.image_size[1], 1. / self.image_size[0],
             1. / self.image_size[1], 1. / self.image_size[0]))
        if len(lines_white) > 0:
            lines_normalized_white = ((lines_white + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_white, normals_white,
                                  Segment.WHITE))
        if len(lines_yellow) > 0:
            lines_normalized_yellow = ((lines_yellow + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_yellow, normals_yellow,
                                  Segment.YELLOW))
        if len(lines_red) > 0:
            lines_normalized_red = ((lines_red + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_red, normals_red,
                                  Segment.RED))

        # Verbose
        if self.verbose:
            self.toc = rospy.get_time()
            rospy.loginfo("[%s] Image processing time: %.3f ms" %
                          (self.node_name, (self.toc - self.tic) * 1000.0))
            rospy.loginfo("[%s] Number of white segments = %d" %
                          (self.node_name, len(lines_white)))
            rospy.loginfo("[%s] number of yellow segments = %d" %
                          (self.node_name, len(lines_yellow)))
            rospy.loginfo("[%s] number of red segments = %d" %
                          (self.node_name, len(lines_red)))
            self.toc_pre = self.toc

        # Publish segmentList
        self.pub_lines.publish(segmentList)
        # time_spent = rospy.Time.now() - time_start
        # rospy.loginfo("[LineDetectorNode] Spent: %s" %(time_spent.to_sec()))

        # Publish the frame with lines
        image_msg_out = self.bridge.cv2_to_imgmsg(self.detector.getImage(),
                                                  "bgr8")
        image_msg_out.header.stamp = image_msg.header.stamp
        self.pub_image.publish(image_msg_out)
        # time_spent = rospy.Time.now() - time_start
        # rospy.loginfo("[LineDetectorNode] Spent on img: %s" %(time_spent.to_sec()))

        # Verbose
        if self.verbose:
            rospy.loginfo(
                "[%s] Latency sent = %.3f ms" %
                (self.node_name,
                 (rospy.get_time() - image_msg.header.stamp.to_sec()) *
                 1000.0))

        # Release the thread lock
        self.thread_lock.release()

    def onShutdown(self):
        rospy.loginfo("[LineDetectorNode] Shutdown.")

    def toSegmentMsg(self, lines, normals, color):

        segmentMsgList = []
        for x1, y1, x2, y2, norm_x, norm_y in np.hstack((lines, normals)):
            segment = Segment()
            segment.color = color
            segment.pixels_normalized[0].x = x1
            segment.pixels_normalized[0].y = y1
            segment.pixels_normalized[1].x = x2
            segment.pixels_normalized[1].y = y2
            segment.normal.x = norm_x
            segment.normal.y = norm_y

            segmentMsgList.append(segment)
        return segmentMsgList
コード例 #3
0
ファイル: line_detector_node.py プロジェクト: arii/Software
class LineDetectorNode(object):
    def __init__(self):
        self.node_name = "Line Detector"
        
        self.hei_image = self.setupParam("~hei_image", 240)
        self.wid_image = self.setupParam("~wid_image", 320)
        self.top_cutoff  = self.setupParam("~top_cutoff", 90)
        
        self.bridge = CvBridge()
        self.detector = LineDetector()
        
        self.sub_image = rospy.Subscriber("~image", CompressedImage, self.processImage)
        self.pub_lines = rospy.Publisher("~segment_list", SegmentList, queue_size=1)
        self.pub_image = rospy.Publisher("~image_with_lines", Image, queue_size=1)

    def setupParam(self, param_name, default_value):
        value = rospy.get_param(param_name, default_value)
        rospy.set_param(param_name, value) # Write to parameter server for transparancy
        rospy.loginfo("[%s] %s = %s " %(self.node_name,param_name,value))
        return value

    def processImage(self,image_msg):
        image_cv = cv2.imdecode(np.fromstring(image_msg.data, np.uint8), cv2.CV_LOAD_IMAGE_COLOR)
        #image_cv = self.bridge.imgmsg_to_cv2(image_msg, "bgr8")
        
        # Resize and crop image
        hei_original = image_cv.shape[0]
        wid_original = image_cv.shape[1]
        if self.hei_image!=hei_original or self.wid_image!=wid_original:
            image_cv = cv2.resize(image_cv, (self.wid_image, self.hei_image))
        image_cv = image_cv[self.top_cutoff:,:,:]

        # Set the image to be detected
        self.detector.setImage(image_cv)
	
        # Detect lines and normals
        lines_white, normals_white = self.detector.detectLines('white')
        lines_yellow, normals_yellow = self.detector.detectLines('yellow')
        lines_red, normals_red = self.detector.detectLines('red')

        # Draw lines and normals
        self.detector.drawLines(lines_white, (0,0,0))
        self.detector.drawLines(lines_yellow, (255,0,0))
        self.detector.drawLines(lines_red, (0,255,0))
        #self.detector.drawNormals(lines_white, normals_white)
        #self.detector.drawNormals(lines_yellow, normals_yellow)
        #self.detector.drawNormals(lines_red, normals_red)

        # Convert to position in original resolution, and add segments to segmentList
        segmentList = SegmentList()
        arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array((1.*wid_original/self.wid_image, 1.*hei_original/self.hei_image, 1.*wid_original/self.wid_image, 1.*hei_original/self.hei_image))
  
        if len(lines_white)>0:
            rospy.loginfo("[LineDetectorNode] number of white lines = %s" %(len(lines_white)))
            lines_white = ((lines_white + arr_cutoff) * arr_ratio).astype('int')
            segmentList.segments.extend(self.toSegmentMsg(lines_white, normals_white, Segment.WHITE, wid_original, hei_original))
        if len(lines_yellow)>0:
            rospy.loginfo("[LineDetectorNode] number of yellow lines = %s" %(len(lines_yellow)))
            lines_yellow = ((lines_yellow + arr_cutoff) * arr_ratio).astype('int')
            segmentList.segments.extend(self.toSegmentMsg(lines_yellow, normals_yellow, Segment.YELLOW, wid_original, hei_original))
        if len(lines_red)>0:
            rospy.loginfo("[LineDetectorNode] number of red lines = %s" %(len(lines_red)))
            lines_red = ((lines_red + arr_cutoff) * arr_ratio).astype('int')
            segmentList.segments.extend(self.toSegmentMsg(lines_red, normals_red, Segment.RED, wid_original, hei_original))
        
        # Publish segmentList
        self.pub_lines.publish(segmentList)
         
        # Publish the frame with lines
        image_msg = self.bridge.cv2_to_imgmsg(self.detector.getImage(), "bgr8")
        self.pub_image.publish(image_msg)

    def onShutdown(self):
            rospy.loginfo("[LineDetectorNode] Shutdown.")
            
    def toSegmentMsg(self,  lines, normals, color, wid, hei):
        
        segmentMsgList = []
        for u1,v1,u2,v2,norm_u,norm_v in np.hstack((lines,normals)):
            segment = Segment()
            segment.color = color
            segment.pixels[0].u = int(u1)
            segment.pixels[0].v = int(v1)
            segment.pixels[1].u = int(u2)
            segment.pixels[1].v = int(v2)
            segment.pixels_normalized[0].x = u1/wid
            segment.pixels_normalized[0].y = v1/hei
            segment.pixels_normalized[1].x = u2/wid
            segment.pixels_normalized[1].y = v2/hei
            segment.normal.x = norm_u
            segment.normal.y = norm_v
             
            segmentMsgList.append(segment)
        return segmentMsgList
コード例 #4
0
class LineDetectorNode(object):
    def __init__(self):
        self.node_name = "Line Detector"
 
        self.image_size = rospy.get_param('~img_size')
        self.top_cutoff = rospy.get_param('~top_cutoff')
        
        self.bridge = CvBridge()
        self.detector = LineDetector()

        self.detector.hsv_white1 = np.array(rospy.get_param('~hsv_white1'))
        self.detector.hsv_white2 = np.array(rospy.get_param('~hsv_white2'))
        self.detector.hsv_yellow1 = np.array(rospy.get_param('~hsv_yellow1'))
        self.detector.hsv_yellow2 = np.array(rospy.get_param('~hsv_yellow2'))
        self.detector.hsv_red1 = np.array(rospy.get_param('~hsv_red1'))
        self.detector.hsv_red2 = np.array(rospy.get_param('~hsv_red2'))
        self.detector.hsv_red3 = np.array(rospy.get_param('~hsv_red3'))
        self.detector.hsv_red4 = np.array(rospy.get_param('~hsv_red4'))

        self.detector.dilation_kernel_size = rospy.get_param('~dilation_kernel_size')
        self.detector.canny_thresholds = rospy.get_param('~canny_thresholds')
        self.detector.hough_min_line_length = rospy.get_param('~hough_min_line_length')
        self.detector.hough_max_line_gap    = rospy.get_param('~hough_max_line_gap')
        self.detector.hough_threshold = rospy.get_param('~hough_threshold')
       
        self.sub_image = rospy.Subscriber("~image", CompressedImage, self.processImage)
        self.pub_lines = rospy.Publisher("~segment_list", SegmentList, queue_size=1)
        self.pub_image = rospy.Publisher("~image_with_lines", Image, queue_size=1)
        
        self.timer_param = rospy.Timer(rospy.Duration.from_sec(3.0),self.cbParamUpdate)

    def cbParamUpdate(self,event):
        print '====Parameters Updated====' 
        
        # S of white
        self.detector.hsv_white2 = (self.detector.hsv_white2)%256 + np.array([0, 5, 0])
        print 'HSV_white1: ' + str(self.detector.hsv_white1)
        print 'HSV_white2: ' + str(self.detector.hsv_white2)
 
        """ 
        # S of white
        self.detector.hsv_white2 = (self.detector.hsv_white2)%256 + np.array([0, 5, 0])
        print 'HSV_white1: ' + str(self.detector.hsv_white1)
        print 'HSV_white2: ' + str(self.detector.hsv_white2)
        
        # V of white
        self.detector.hsv_white1 = (self.detector.hsv_white1)%256 + np.array([0, 0, 5])
        print 'HSV_white1: ' + str(self.detector.hsv_white1)
        print 'HSV_white2: ' + str(self.detector.hsv_white2)
 
        # H of yellow
        self.detector.hsv_yellow1 = (self.detector.hsv_yellow1)%256 + np.array([1, 0, 0])
        print 'HSV_yellow1: ' + str(self.detector.hsv_yellow1)
        print 'HSV_yellow2: ' + str(self.detector.hsv_yellow2)

        self.detector.hsv_yellow2 = (self.detector.hsv_yellow2)%256 - np.array([1, 0, 0])
        print 'HSV_yellow1: ' + str(self.detector.hsv_yellow1)
        print 'HSV_yellow2: ' + str(self.detector.hsv_yellow2)
 
        # S of yellow
        self.detector.hsv_yellow1 = (self.detector.hsv_yellow1)%256 + np.array([0, 5, 0])
        print 'HSV_yellow1: ' + str(self.detector.hsv_yellow1)
        print 'HSV_yellow2: ' + str(self.detector.hsv_yellow2)
 
        # V of yellow
        self.detector.hsv_yellow1 = (self.detector.hsv_yellow1)%256 + np.array([0, 0, 5])
        print 'HSV_yellow1: ' + str(self.detector.hsv_yellow1)
        print 'HSV_yellow2: ' + str(self.detector.hsv_yellow2)
        
        # Lower threshold of Canny edge
        self.detector.canny_thresholds = self.detector.canny_thresholds%256 + np.array([5, 0])
        print 'Canny_thresholds: ' + str(self.detector.canny_thresholds)

        # Higher threshold of Canny edge
        self.detector.canny_thresholds = self.detector.canny_thresholds%256 + np.array([0, 5])
        print 'Canny_thresholds: ' + str(self.detector.canny_thresholds)

        # Minimum line length
        self.detector.hough_min_line_length = self.detector.hough_min_line_length%10 + 1  
        print 'Minimum_line_length: ' + str(self.detector.hough_min_line_length)
        
        # Maximum line gap
        self.detector.hough_max_line_gap = self.detector.detector.hough_max_line_gap%10 + 1  
        print 'Maximum_line_gap: ' + str(self.detector.detector.hough_max_line_gap)
        
        # Threshold of Hough transform
        self.detector.hough_threshold = (self.detector.hough_threshold)%50 + 5
        print 'Hough_threshold: ' + str(self.detector.hough_threshold)
        """

    def processImage(self,image_msg):
        image_cv = cv2.imdecode(np.fromstring(image_msg.data, np.uint8), cv2.CV_LOAD_IMAGE_COLOR)
        #image_cv = self.bridge.imgmsg_to_cv2(image_msg, "bgr8")
        
        # Resize and crop image
        hei_original = image_cv.shape[0]
        wid_original = image_cv.shape[1]
        if self.image_size[0]!=hei_original or self.image_size[1]!=wid_original:
            image_cv = cv2.resize(image_cv, (self.image_size[1], self.image_size[0]))
        image_cv = image_cv[self.top_cutoff:,:,:]

        # Set the image to be detected
        self.detector.setImage(image_cv)
	
        # Detect lines and normals
        lines_white, normals_white = self.detector.detectLines('white')
        lines_yellow, normals_yellow = self.detector.detectLines('yellow')
        lines_red, normals_red = self.detector.detectLines('red')

        # Draw lines and normals
        self.detector.drawLines(lines_white, (0,0,0))
        self.detector.drawLines(lines_yellow, (255,0,0))
        self.detector.drawLines(lines_red, (0,255,0))
        #self.detector.drawNormals(lines_white, normals_white)
        #self.detector.drawNormals(lines_yellow, normals_yellow)
        #self.detector.drawNormals(lines_red, normals_red)

        # Convert to normalized pixel coordinates, and add segments to segmentList
        segmentList = SegmentList()
        arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array((1./self.image_size[1], 1./self.image_size[0], 1./self.image_size[1], 1./self.image_size[0]))
  
        if len(lines_white)>0:
            #rospy.loginfo("[LineDetectorNode] number of white lines = %s" %(len(lines_white)))
            lines_normalized_white = ((lines_white + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(self.toSegmentMsg(lines_normalized_white, normals_white, Segment.WHITE))
        if len(lines_yellow)>0:
            #rospy.loginfo("[LineDetectorNode] number of yellow lines = %s" %(len(lines_yellow)))
            lines_normalized_yellow = ((lines_yellow + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(self.toSegmentMsg(lines_normalized_yellow, normals_yellow, Segment.YELLOW))
        if len(lines_red)>0:
            #rospy.loginfo("[LineDetectorNode] number of red lines = %s" %(len(lines_red)))
            lines_normalized_red = ((lines_red + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(self.toSegmentMsg(lines_normalized_red, normals_red, Segment.RED))
        
        # Publish segmentList
        self.pub_lines.publish(segmentList)
         
        # Publish the frame with lines
        image_msg = self.bridge.cv2_to_imgmsg(self.detector.getImage(), "bgr8")
        self.pub_image.publish(image_msg)

    def onShutdown(self):
            rospy.loginfo("[LineDetectorNode] Shutdown.")
            
    def toSegmentMsg(self,  lines, normals, color):
        
        segmentMsgList = []
        for x1,y1,x2,y2,norm_x,norm_y in np.hstack((lines,normals)):
            segment = Segment()
            segment.color = color
            segment.pixels_normalized[0].x = x1
            segment.pixels_normalized[0].y = y1
            segment.pixels_normalized[1].x = x2
            segment.pixels_normalized[1].y = y2
            segment.normal.x = norm_x
            segment.normal.y = norm_y
             
            segmentMsgList.append(segment)
        return segmentMsgList
コード例 #5
0
class LineDetectorNode(object):
    def __init__(self):
        self.node_name = "Line Detector"

        # Thread lock 
        self.thread_lock = threading.Lock()
       
        # Constructor of line detector 
        self.bridge = CvBridge()
        self.detector = LineDetector()
        self.wb = WhiteBalance()
        self.flag_wb_ref = False
       
        # Parameters
        self.flag_wb = rospy.get_param('~white_balance')
        
        self.image_size = rospy.get_param('~img_size')
        self.top_cutoff = rospy.get_param('~top_cutoff')
  
        self.detector.hsv_white1 = np.array(rospy.get_param('~hsv_white1'))
        self.detector.hsv_white2 = np.array(rospy.get_param('~hsv_white2'))
        self.detector.hsv_yellow1 = np.array(rospy.get_param('~hsv_yellow1'))
        self.detector.hsv_yellow2 = np.array(rospy.get_param('~hsv_yellow2'))
        self.detector.hsv_red1 = np.array(rospy.get_param('~hsv_red1'))
        self.detector.hsv_red2 = np.array(rospy.get_param('~hsv_red2'))
        self.detector.hsv_red3 = np.array(rospy.get_param('~hsv_red3'))
        self.detector.hsv_red4 = np.array(rospy.get_param('~hsv_red4'))

        self.detector.dilation_kernel_size = rospy.get_param('~dilation_kernel_size')
        self.detector.canny_thresholds = rospy.get_param('~canny_thresholds')
        self.detector.hough_min_line_length = rospy.get_param('~hough_min_line_length')
        self.detector.hough_max_line_gap    = rospy.get_param('~hough_max_line_gap')
        self.detector.hough_threshold = rospy.get_param('~hough_threshold')

        # Publishers
        self.pub_lines = rospy.Publisher("~segment_list", SegmentList, queue_size=1)
        self.pub_image = rospy.Publisher("~image_with_lines", Image, queue_size=1)
       
        # Verbose option 
        self.verbose = rospy.get_param('~verbose')
        if self.verbose:
            self.toc_pre = rospy.get_time()   

        # Subscribers
        self.sub_image = rospy.Subscriber("~image", CompressedImage, self.cbImage, queue_size=1)
        rospy.loginfo("[%s] Initialized." %(self.node_name))

    def cbImage(self,image_msg):
        # Start a daemon thread to process the image
        thread = threading.Thread(target=self.processImage,args=(image_msg,))
        thread.setDaemon(True)
        thread.start()
        # Returns rightaway

    def processImage(self,image_msg):
        if not self.thread_lock.acquire(False):
            # Return immediately if the thread is locked
            return
        
        # Verbose
        if self.verbose:
            rospy.loginfo("[%s] Latency received = %.3f ms" %(self.node_name, (rospy.get_time()-image_msg.header.stamp.to_sec()) * 1000.0))
        
        # time_start = rospy.Time.now()
        # time_start = event.last_real
        # msg_age = time_start - image_msg.header.stamp
        # rospy.loginfo("[LineDetector] image age: %s" %msg_age.to_sec())

        # Decode from compressed image
        # with OpenCV
        image_cv = cv2.imdecode(np.fromstring(image_msg.data, np.uint8), cv2.CV_LOAD_IMAGE_COLOR)
        
        # with PIL Image
        # image_cv = jpeg.JPEG(np.fromstring(image_msg.data, np.uint8)).decode()
        
        # with libjpeg-turbo
        # Convert from uncompressed image message
        # image_cv = self.bridge.imgmsg_to_cv2(image_msg, "bgr8")
        
        # Verbose
        if self.verbose:
            self.tic = rospy.get_time()   
            rospy.loginfo("[%s] Latency image decompressed = %.3f ms" %(self.node_name, (self.tic-image_msg.header.stamp.to_sec()) * 1000.0))
        
        # White balancing: set reference image to estimate parameters
        if self.flag_wb and (not self.flag_wb_ref):
            # set reference image to estimate parameters
            self.wb.setRefImg(image_cv)
            rospy.loginfo("[%s] White balance: parameters computed." %(self.node_name))
            print self.wb.norm_bgr
            self.flag_wb_ref = True

        # Resize and crop image
        hei_original = image_cv.shape[0]
        wid_original = image_cv.shape[1]
        if self.image_size[0]!=hei_original or self.image_size[1]!=wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv, (self.image_size[1], self.image_size[0]), interpolation=cv2.INTER_NEAREST)
        image_cv = image_cv[self.top_cutoff:,:,:]

        # White balancing
        if self.flag_wb and self.flag_wb_ref:
            self.wb.correctImg(image_cv)

        # Set the image to be detected
        self.detector.setImage(image_cv)
	
        # Detect lines and normals
        lines_white, normals_white = self.detector.detectLines('white')
        lines_yellow, normals_yellow = self.detector.detectLines('yellow')
        lines_red, normals_red = self.detector.detectLines('red')

        # Draw lines and normals
        self.detector.drawLines(lines_white, (0,0,0))
        self.detector.drawLines(lines_yellow, (255,0,0))
        self.detector.drawLines(lines_red, (0,255,0))
        #self.detector.drawNormals(lines_white, normals_white)
        #self.detector.drawNormals(lines_yellow, normals_yellow)
        #self.detector.drawNormals(lines_red, normals_red)

        # SegmentList constructor
        segmentList = SegmentList()
        segmentList.header.stamp = image_msg.header.stamp
        
        # Convert to normalized pixel coordinates, and add segments to segmentList
        arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array((1./self.image_size[1], 1./self.image_size[0], 1./self.image_size[1], 1./self.image_size[0]))
        if len(lines_white)>0:
            lines_normalized_white = ((lines_white + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(self.toSegmentMsg(lines_normalized_white, normals_white, Segment.WHITE))
        if len(lines_yellow)>0:
            lines_normalized_yellow = ((lines_yellow + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(self.toSegmentMsg(lines_normalized_yellow, normals_yellow, Segment.YELLOW))
        if len(lines_red)>0:
            lines_normalized_red = ((lines_red + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(self.toSegmentMsg(lines_normalized_red, normals_red, Segment.RED))
        
        # Verbose
        if self.verbose:
            self.toc = rospy.get_time()
            rospy.loginfo("[%s] Image processing time: %.3f ms" %(self.node_name, (self.toc-self.tic)*1000.0))
            rospy.loginfo("[%s] Number of white segments = %d" %(self.node_name, len(lines_white)))
            rospy.loginfo("[%s] number of yellow segments = %d" %(self.node_name, len(lines_yellow)))
            rospy.loginfo("[%s] number of red segments = %d" %(self.node_name, len(lines_red)))
            self.toc_pre = self.toc
 
        # Publish segmentList
        self.pub_lines.publish(segmentList)
        # time_spent = rospy.Time.now() - time_start
        # rospy.loginfo("[LineDetectorNode] Spent: %s" %(time_spent.to_sec()))
         
        # Publish the frame with lines
        image_msg_out = self.bridge.cv2_to_imgmsg(self.detector.getImage(), "bgr8")
        image_msg_out.header.stamp = image_msg.header.stamp
        self.pub_image.publish(image_msg_out)
        # time_spent = rospy.Time.now() - time_start
        # rospy.loginfo("[LineDetectorNode] Spent on img: %s" %(time_spent.to_sec()))

        # Verbose
        if self.verbose:
            rospy.loginfo("[%s] Latency sent = %.3f ms" %(self.node_name, (rospy.get_time()-image_msg.header.stamp.to_sec()) * 1000.0))

        # Release the thread lock
        self.thread_lock.release()

    def onShutdown(self):
            rospy.loginfo("[LineDetectorNode] Shutdown.")
            
    def toSegmentMsg(self,  lines, normals, color):
        
        segmentMsgList = []
        for x1,y1,x2,y2,norm_x,norm_y in np.hstack((lines,normals)):
            segment = Segment()
            segment.color = color
            segment.pixels_normalized[0].x = x1
            segment.pixels_normalized[0].y = y1
            segment.pixels_normalized[1].x = x2
            segment.pixels_normalized[1].y = y2
            segment.normal.x = norm_x
            segment.normal.y = norm_y
             
            segmentMsgList.append(segment)
        return segmentMsgList