def __init__(self):
        self.node_name = rospy.get_name()
        self.image_lock = threading.Lock()

        self.ai = AntiInstagram()

        # XXX: read parameters
        # XXX: parameters need to go inside config file, ask aleks about heirarchy
        self.interval = self.setup_parameter("~ai_interval", 10)
        self.color_balance_percentage = self.setup_parameter(
            "~cb_percentage", 0.8)  # XXX: change in all launch files
        self.output_scale = self.setup_parameter(
            "~scale_percent", 0.4)  # XXX: change in all launch files
        self.calculation_scale = self.setup_parameter("~resize", 0.2)

        self.bridge = CvBridge()

        self.image = None

        rospy.Timer(rospy.Duration(self.interval),
                    self.calculate_new_parameters)

        self.uncorrected_image_subscriber = rospy.Subscriber(
            '~uncorrected_image/compressed',
            CompressedImage,
            self.process_image,
            buff_size=921600,
            queue_size=1)

        self.corrected_image_publisher = rospy.Publisher(
            "~corrected_image/compressed", CompressedImage, queue_size=1)
	def __init__(self):
		self.node_name = rospy.get_name()

		self.active = True
		self.image_pub_switch = rospy.get_param("~publish_corrected_image",False)
		
        # Initialize publishers and subscribers
		self.pub_image = rospy.Publisher("~corrected_image",Image,queue_size=1)
		self.pub_health = rospy.Publisher("~health",AntiInstagramHealth,queue_size=1,latch=True)
		self.pub_transform = rospy.Publisher("~transform",AntiInstagramTransform,queue_size=1,latch=True)

		#self.sub_switch = rospy.Subscriber("~switch",BoolStamped, self.cbSwitch, queue_size=1)
		#self.sub_image = rospy.Subscriber("~uncorrected_image",Image,self.cbNewImage,queue_size=1)
		self.sub_image = rospy.Subscriber("~uncorrected_image", CompressedImage, self.cbNewImage,queue_size=1)
		self.sub_click = rospy.Subscriber("~click", BoolStamped, self.cbClick, queue_size=1)

		# Verbose option 
		self.verbose = rospy.get_param('~verbose',True)  

		# Initialize health message
		self.health = AntiInstagramHealth()

		# Initialize transform message
		self.transform = AntiInstagramTransform()
		# FIXME: read default from configuration and publish it

		self.ai = AntiInstagram()
		self.corrected_image = Image()
		self.bridge = CvBridge()

		self.image_msg = None
		self.click_on = False
    def __init__(self):
        self.node_name = "LineDetectorNode"

        self.bw_1 = 0
        self.bw_2 = 0

        # Thread lock
        self.thread_lock = threading.Lock()

        # Constructor of line detector
        self.bridge = CvBridge()

        self.active = True

        self.stats = Stats()

        # Only be verbose every 10 cycles
        self.intermittent_interval = 100
        self.intermittent_counter = 0

        # color correction
        self.ai = AntiInstagram()

        # these will be added if it becomes verbose
        self.pub_edge = None
        self.pub_colorSegment = None

        self.detector = None
        self.verbose = None
        self.updateParams(None)

        # Publishers
        self.pub_lines = rospy.Publisher("~segment_list",
                                         SegmentList,
                                         queue_size=1)
        self.pub_image = rospy.Publisher("~image_with_lines",
                                         Image,
                                         queue_size=1)

        # Subscribers
        self.sub_image = rospy.Subscriber("~image",
                                          CompressedImage,
                                          self.cbImage,
                                          queue_size=1)
        self.sub_transform = rospy.Subscriber("~transform",
                                              AntiInstagramTransform,
                                              self.cbTransform,
                                              queue_size=1)
        self.sub_switch = rospy.Subscriber("~switch",
                                           BoolStamped,
                                           self.cbSwitch,
                                           queue_size=1)

        rospy.loginfo("[%s] Initialized (verbose = %s)." %
                      (self.node_name, self.verbose))

        rospy.Timer(rospy.Duration.from_sec(2.0), self.updateParams)
Example #4
0
    def __init__(self):
        self.node_name = rospy.get_name()

        self.active = True
        self.locked = False

        self.image_pub_switch = rospy.get_param("~publish_corrected_image",
                                                False)

        # Initialize publishers and subscribers
        self.pub_image = rospy.Publisher("~corrected_image",
                                         Image,
                                         queue_size=1)
        self.pub_health = rospy.Publisher("~health",
                                          AntiInstagramHealth,
                                          queue_size=1,
                                          latch=True)
        self.pub_transform = rospy.Publisher("~transform",
                                             AntiInstagramTransform,
                                             queue_size=1,
                                             latch=True)

        #self.sub_switch = rospy.Subscriber("~switch",BoolStamped, self.cbSwitch, queue_size=1)
        #self.sub_image = rospy.Subscriber("~uncorrected_image",Image,self.cbNewImage,queue_size=1)
        self.sub_image = rospy.Subscriber("~uncorrected_image",
                                          CompressedImage,
                                          self.cbNewImage,
                                          queue_size=1)
        self.sub_click = rospy.Subscriber("~click",
                                          BoolStamped,
                                          self.cbClick,
                                          queue_size=1)

        self.trans_timer = rospy.Timer(rospy.Duration.from_sec(20),
                                       self.cbPubTrans, True)
        # Verbose option
        self.verbose = rospy.get_param('line_detector_node/verbose', True)

        # Initialize health message
        self.health = AntiInstagramHealth()

        # Initialize transform message
        self.transform = AntiInstagramTransform()
        # FIXME: read default from configuration and publish it
        self.ai_scale = np.array(
            [2.2728408473337893, 2.2728273205024614, 2.272844346401005])
        self.ai_shift = np.array(
            [21.47181119272393, 37.14653160247276, 4.089311860796786])

        self.ai = AntiInstagram()
        self.corrected_image = Image()
        self.bridge = CvBridge()

        self.image_msg = None
        self.click_on = False
Example #5
0
    def __init__(self):
        EasyNode.__init__(self, "line_detector2", "line_detector_node2")

        self.detector = None
        self.bridge = CvBridge()
        self.ai = AntiInstagram()
        self.active = True

        # Only be verbose every 10 cycles
        self.intermittent_interval = 100
        self.intermittent_counter = 0
Example #6
0
    def __init__(self):
        self.node_name = "Line Detector2"

        # Thread lock
        self.thread_lock = threading.Lock()

        # Constructor of line detector
        self.bridge = CvBridge()
        self.detector = LineDetector2()

        # Parameters
        self.active = True

        self.intermittent_interval = 100
        self.intermittent_counter = 0

        self.updateParams(None)

        # color correction
        self.ai = AntiInstagram()

        # Publishers
        self.pub_lines = rospy.Publisher("~segment_list",
                                         SegmentList,
                                         queue_size=1)
        self.pub_image = rospy.Publisher("~image_with_lines",
                                         Image,
                                         queue_size=1)

        # Verbose option
        if self.verbose:
            self.pub_edge = rospy.Publisher("~edge", Image, queue_size=1)
            self.pub_colorSegment = rospy.Publisher("~segment",
                                                    Image,
                                                    queue_size=1)

        # Subscribers
        self.sub_image = rospy.Subscriber("~image",
                                          CompressedImage,
                                          self.cbImage,
                                          queue_size=1)
        self.sub_transform = rospy.Subscriber("~transform",
                                              AntiInstagramTransform,
                                              self.cbTransform,
                                              queue_size=1)
        self.sub_switch = rospy.Subscriber("~switch",
                                           BoolStamped,
                                           self.cbSwitch,
                                           queue_size=1)
        rospy.loginfo("[%s] Initialized." % (self.node_name))

        self.timer = rospy.Timer(rospy.Duration.from_sec(1.0),
                                 self.updateParams)
Example #7
0
    def test_anti_instagram(self):
        ai = AntiInstagram()

        #### TEST SINGLE TRANSFORM ####
        error_threshold = 500
        # load test images
        failure = False

        imagesetf = [
            "inputimage0.jpg",
            "inputimage1.jpg",
        ]
        gtimagesetf = [
            "groundtruthimage0.jpg",
            "groundtruthimage1.jpg",
        ]

        package_root = get_rospkg_root('anti_instagram')
        def add_dir(x):
            return os.path.join(package_root, 'scripts', x)

        imagesetf = map(add_dir, imagesetf)
        gtimagesetf = map(add_dir, gtimagesetf)


        imageset = map(load_image, imagesetf)
        gtimageset = map(load_image, gtimagesetf)
        errors = self.correctImages(ai, imageset, gtimageset, error_threshold)
        logger.info("Test Image Errors: %s" % errors)

        self.assertLess(max(errors), error_threshold)
    def __init__(self):
        self.node_name = "LineDetectorNode"

        # Thread lock 
        self.thread_lock = threading.Lock()
    
        # Constructor of line detector 
        self.bridge = CvBridge()

        self.active = True
        self.time_switch = True

        self.stats = Stats()

        # Only be verbose every 10 cycles
        self.intermittent_interval = 100
        self.intermittent_counter = 0

        # color correction
        self.ai = AntiInstagram()

        # these will be added if it becomes verbose
        self.pub_edge = None
        self.pub_colorSegment = None

        self.detector = None
        self.verbose = None
        self.updateParams(None)
   
        self.blue = 0
        self.yellow = 0
        self.count = 0
        
 
        # Publishers
        self.pub_lines = rospy.Publisher("~segment_list", SegmentList, queue_size=1)
        self.pub_image = rospy.Publisher("~image_with_lines", Image, queue_size=1)
        self.pub_lane = rospy.Publisher("~have_lines", BoolStamped, queue_size=1, latch=True)
        self.pub_car_cmd = rospy.Publisher("~car_cmd",Twist2DStamped,queue_size=1)
       
        # Subscribers
        self.sub_image = rospy.Subscriber("~image", CompressedImage, self.cbImage, queue_size=1)
        self.sub_transform = rospy.Subscriber("~transform", AntiInstagramTransform, self.cbTransform, queue_size=1)
        self.sub_switch = rospy.Subscriber("~switch", BoolStamped, self.cbSwitch, queue_size=1)

        rospy.loginfo("[%s] Initialized (verbose = %s)." %(self.node_name, self.verbose))

        rospy.Timer(rospy.Duration.from_sec(2.0), self.updateParams)
class LineDetectorNode(object):
    def __init__(self):
        self.node_name = "LineDetectorNode"

        self.bw_1 = 0
        self.bw_2 = 0

        # Thread lock
        self.thread_lock = threading.Lock()

        # Constructor of line detector
        self.bridge = CvBridge()

        self.active = True

        self.stats = Stats()

        # Only be verbose every 10 cycles
        self.intermittent_interval = 100
        self.intermittent_counter = 0

        # color correction
        self.ai = AntiInstagram()

        # these will be added if it becomes verbose
        self.pub_edge = None
        self.pub_colorSegment = None

        self.detector = None
        self.verbose = None
        self.updateParams(None)

        # Publishers
        self.pub_lines = rospy.Publisher("~segment_list",
                                         SegmentList,
                                         queue_size=1)
        self.pub_image = rospy.Publisher("~image_with_lines",
                                         Image,
                                         queue_size=1)

        # Subscribers
        self.sub_image = rospy.Subscriber("~image",
                                          CompressedImage,
                                          self.cbImage,
                                          queue_size=1)
        self.sub_transform = rospy.Subscriber("~transform",
                                              AntiInstagramTransform,
                                              self.cbTransform,
                                              queue_size=1)
        self.sub_switch = rospy.Subscriber("~switch",
                                           BoolStamped,
                                           self.cbSwitch,
                                           queue_size=1)

        rospy.loginfo("[%s] Initialized (verbose = %s)." %
                      (self.node_name, self.verbose))

        rospy.Timer(rospy.Duration.from_sec(2.0), self.updateParams)

    def updateParams(self, _event):
        old_verbose = self.verbose
        self.verbose = rospy.get_param('~verbose', True)
        # self.loginfo('verbose = %r' % self.verbose)
        if self.verbose != old_verbose:
            self.loginfo('Verbose is now %r' % self.verbose)

        self.image_size = rospy.get_param('~img_size')
        self.top_cutoff = rospy.get_param('~top_cutoff')

        if self.detector is None:
            c = rospy.get_param('~detector')
            assert isinstance(c, list) and len(c) == 2, c

            #         if str(self.detector_config) != str(c):
            self.loginfo('new detector config: %s' % str(c))

            self.detector = instantiate(c[0], c[1])


#             self.detector_config = c

        if self.verbose and self.pub_edge is None:
            self.pub_edge = rospy.Publisher("~edge", Image, queue_size=1)
            self.pub_colorSegment = rospy.Publisher("~colorSegment",
                                                    Image,
                                                    queue_size=1)

    def cbSwitch(self, switch_msg):
        self.active = switch_msg.data

    def cbImage(self, image_msg):
        self.stats.received()

        if not self.active:
            return
        # Start a daemon thread to process the image
        thread = threading.Thread(target=self.processImage, args=(image_msg, ))
        thread.setDaemon(True)
        thread.start()
        # Returns rightaway

    def cbTransform(self, transform_msg):
        self.ai.shift = transform_msg.s[0:3]
        self.ai.scale = transform_msg.s[3:6]

        self.loginfo("AntiInstagram transform received")

    def loginfo(self, s):
        rospy.loginfo('[%s] %s' % (self.node_name, s))

    def intermittent_log_now(self):
        return self.intermittent_counter % self.intermittent_interval == 1

    def intermittent_log(self, s):
        if not self.intermittent_log_now():
            return
        self.loginfo('%3d:%s' % (self.intermittent_counter, s))

    def processImage(self, image_msg):
        if not self.thread_lock.acquire(False):
            self.stats.skipped()
            # Return immediately if the thread is locked
            return

        try:
            self.processImage_(image_msg)
        finally:
            # Release the thread lock
            self.thread_lock.release()

    def processImage_(self, image_msg):

        self.stats.processed()

        if self.intermittent_log_now():
            self.intermittent_log(self.stats.info())
            self.stats.reset()

        tk = TimeKeeper(image_msg)

        self.intermittent_counter += 1

        # Decode from compressed image with OpenCV
        try:
            image_cv = image_cv_from_jpg(image_msg.data)
        except ValueError as e:
            self.loginfo('Could not decode image: %s' % e)
            return

        tk.completed('decoded')

        # Resize and crop image
        hei_original, wid_original = image_cv.shape[0:2]

        if self.image_size[0] != hei_original or self.image_size[
                1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv,
                                  (self.image_size[1], self.image_size[0]),
                                  interpolation=cv2.INTER_NEAREST)
        image_cv = image_cv[self.top_cutoff:, :, :]

        tk.completed('resized')

        # apply color correction: AntiInstagram
        image_cv_corr = self.ai.applyTransform(image_cv)
        image_cv_corr = cv2.convertScaleAbs(image_cv_corr)

        tk.completed('corrected')

        # Set the image to be detected
        self.detector.setImage(image_cv_corr)

        # Detect lines and normals

        gray = cv2.cvtColor(image_cv_corr, cv2.COLOR_BGR2GRAY)
        edges = cv2.Canny(gray, 100, 200, apertureSize=3)
        lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 50, np.empty(1), 3, 1)

        hsv_yellow1 = (25, 50, 50)
        hsv_yellow2 = (45, 255, 255)

        hsv_blue1 = (100, 90, 80)
        hsv_blue2 = (150, 255, 155)

        #change color space to HSV
        hsv = cv2.cvtColor(image_cv, cv2.COLOR_BGR2HSV)

        #find the color
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))

        yellow = cv2.inRange(hsv, hsv_yellow1, hsv_yellow2)
        yellow = cv2.dilate(yellow, kernel)
        self.bw_1 = yellow

        blue = cv2.inRange(hsv, hsv_blue1, hsv_blue2)
        blue = cv2.dilate(blue, kernel)
        self.bw_2 = blue

        edge_color_yellow = cv2.bitwise_and(yellow, edges)
        edge_color_blue = cv2.bitwise_and(blue, edges)

        lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 10, np.empty(1), 3, 1)
        lines_yellow = cv2.HoughLinesP(edge_color_yellow, 1, np.pi / 180, 10,
                                       np.empty(1), 3, 1)
        lines_blue = cv2.HoughLinesP(edge_color_blue, 1, np.pi / 180, 10,
                                     np.empty(1), 3, 1)

        if lines_yellow is not None:
            lines_yellow = np.array(lines_yellow[0])
            print "found yellow lines"

        else:
            lines_yellow = []
            print "no yellow lines"

        if lines_blue is not None:
            lines_blue = np.array(lines_blue[0])
            print "found blue lines"

        else:
            lines_blue = []
            print "no blue lines"

        arr_cutoff = np.array((0, 40, 0, 40))
        arr_ratio = np.array((1. / 120, 1. / 160, 1. / 120, 1. / 160))

        lines_1 = lines_yellow
        lines_2 = lines_blue

        normals = []
        centers = []
        if len(lines_2) > 0:
            #find the normalized coordinates
            lines_normalized = ((lines_1 + arr_cutoff) * arr_ratio)

            #find the unit vector
            length = np.sum((lines_1[:, 0:2] - lines_1[:, 2:4])**2,
                            axis=1,
                            keepdims=True)**0.5
            dx = 1. * (lines_1[:, 3:4] - lines_1[:, 1:2]) / length
            dy = 1. * (lines_1[:, 0:1] - lines_1[:, 2:3]) / length

            #find the center point
            centers = np.hstack([(lines_1[:, 0:1] + lines_1[:, 2:3]) / 2,
                                 (lines_1[:, 1:2] + lines_1[:, 3:4]) / 2])

            #find the vectors' direction
            x3 = (centers[:, 0:1] - 4. * dx).astype('int')
            x3[x3 < 0] = 0
            x3[x3 >= 160] = 160 - 1

            y3 = (centers[:, 1:2] - 4. * dy).astype('int')
            y3[y3 < 0] = 0
            y3[y3 >= 120] = 120 - 1

            x4 = (centers[:, 0:1] + 4. * dx).astype('int')
            x4[x4 < 0] = 0
            x4[x4 >= 160] = 160 - 1

            y4 = (centers[:, 1:2] + 4. * dy).astype('int')
            y4[y4 < 0] = 0
            y4[y4 >= 120] = 120 - 1

            flag_signs = (np.logical_and(
                self.bw_2[y3, x3] > 0,
                self.bw_2[y4, x4] > 0)).astype('int') * 2 - 1
            normals = np.hstack([dx, dy]) * flag_signs

            flag = ((lines_1[:, 2] - lines_1[:, 0]) * normals[:, 1] -
                    (lines_1[:, 3] - lines_1[:, 1]) * normals[:, 0]) > 0
            for i in range(len(lines_1)):
                if flag[i]:
                    x1, y1, x2, y2 = lines_1[i, :]
                    lines_1[i, :] = [x2, y2, x1, y1]

        tk.completed('detected')

        # SegmentList constructor
        segmentList = SegmentList()
        segmentList.header.stamp = image_msg.header.stamp

        # Convert to normalized pixel coordinates, and add segments to segmentList
        arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array(
            (1. / self.image_size[1], 1. / self.image_size[0],
             1. / self.image_size[1], 1. / self.image_size[0]))
        if len(lines_2) > 0:
            lines_normalized_blue = ((lines_2 + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized, normals, centers, 0))

        self.intermittent_log('# segments: white %3d ' % (len(lines_2)))

        tk.completed('prepared')

        # Publish segmentList
        self.pub_lines.publish(segmentList)
        tk.completed('--pub_lines--')

        # VISUALIZATION only below

        if self.verbose:

            # Draw lines and normals
            image_with_lines = np.copy(image_cv_corr)
            if len(lines_1) > 0:
                for x1, y1, x2, y2 in lines_1:
                    cx = int(x1 + x2) / 2
                    cy = int(y1 + y2) / 2
                    if cx > 160:
                        cx = 160 - 1
                    elif cx < 0:
                        cx = 0
                    if cy > 120:
                        cy = 120 - 1
                    elif cy < 0:
                        cy = 0
                    if (self.bw_2[cy, cx - 1] == 255
                            and self.bw_1[cy, cx + 1] == 255):
                        cv2.line(image_with_lines, (x1, y1), (x2, y2),
                                 (255, 255, 255))
                        cv2.circle(image_with_lines, (x1, y1), 1,
                                   (0, 255, 0))  #green circle
                        cv2.circle(image_with_lines, (x2, y2), 1,
                                   (255, 0, 0))  #red circle
                    if (self.bw_2[cy, cx + 1] == 255
                            and self.bw_1[cy, cx - 1] == 255):
                        cv2.line(image_with_lines, (x1, y1), (x2, y2),
                                 (255, 255, 255))
                        cv2.circle(image_with_lines, (x1, y1), 1,
                                   (0, 255, 0))  #green circle
                        cv2.circle(image_with_lines, (x2, y2), 1,
                                   (255, 0, 0))  #red circle
            #drawLines(image_with_lines, (lines_2), (255, 255, 255))
            #drawLines(image_with_lines, yellow.lines, (255, 0, 0))
            #drawLines(image_with_lines, red.lines, (0, 255, 0))

            tk.completed('drawn')

            # Publish the frame with lines
            image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
            image_msg_out.header.stamp = image_msg.header.stamp
            self.pub_image.publish(image_msg_out)

            tk.completed('pub_image')

            #         if self.verbose:
            #colorSegment = color_segment(self.bw_1, self.bw_2)
            #edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges, "mono8")
            #colorSegment_msg_out = self.bridge.cv2_to_imgmsg(colorSegment, "bgr8")
            #self.pub_edge.publish(edge_msg_out)
            #self.pub_colorSegment.publish(colorSegment_msg_out)

            tk.completed('pub_edge/pub_segment')

        self.intermittent_log(tk.getall())

    def onShutdown(self):
        self.loginfo("Shutdown.")

    def toSegmentMsg(self, lines, normals, centers, color):
        segmentMsgList = []
        for x1, y1, x2, y2, norm_x, norm_y in np.hstack((lines, normals)):
            segment = Segment()
            segment.color = color
            segment.pixels_normalized[0].x = x1
            segment.pixels_normalized[0].y = y1
            segment.pixels_normalized[1].x = x2
            segment.pixels_normalized[1].y = y2
            segment.normal.x = norm_x
            segment.normal.y = norm_y

            #if  self.bw_2[y4,x4]>0 :
            #   segment = Segment()
            #  segment.color = color
            # segment.pixels_normalized[0].x = x1
            #segment.pixels_normalized[0].y = y1
            #segment.pixels_normalized[1].x = x2
            #segment.pixels_normalized[1].y = y2
            #segment.normal.x = norm_x
            #segment.normal.y = norm_y
            segmentMsgList.append(segment)
        return segmentMsgList
class LineDetectorNode(object):
    def __init__(self):
        self.node_name = "LineDetectorNode"

        # Thread lock 
        self.thread_lock = threading.Lock()
    
        # Constructor of line detector 
        self.bridge = CvBridge()

        self.active = True
        self.time_switch = True

        self.stats = Stats()

        # Only be verbose every 10 cycles
        self.intermittent_interval = 100
        self.intermittent_counter = 0

        # color correction
        self.ai = AntiInstagram()

        # these will be added if it becomes verbose
        self.pub_edge = None
        self.pub_colorSegment = None

        self.detector = None
        self.verbose = None
        self.updateParams(None)
   
        self.blue = 0
        self.yellow = 0
        self.count = 0
        
 
        # Publishers
        self.pub_lines = rospy.Publisher("~segment_list", SegmentList, queue_size=1)
        self.pub_image = rospy.Publisher("~image_with_lines", Image, queue_size=1)
        self.pub_lane = rospy.Publisher("~have_lines", BoolStamped, queue_size=1, latch=True)
        self.pub_car_cmd = rospy.Publisher("~car_cmd",Twist2DStamped,queue_size=1)
       
        # Subscribers
        self.sub_image = rospy.Subscriber("~image", CompressedImage, self.cbImage, queue_size=1)
        self.sub_transform = rospy.Subscriber("~transform", AntiInstagramTransform, self.cbTransform, queue_size=1)
        self.sub_switch = rospy.Subscriber("~switch", BoolStamped, self.cbSwitch, queue_size=1)

        rospy.loginfo("[%s] Initialized (verbose = %s)." %(self.node_name, self.verbose))

        rospy.Timer(rospy.Duration.from_sec(2.0), self.updateParams)


    def updateParams(self, _event):
        old_verbose = self.verbose
        self.verbose = rospy.get_param('~verbose', True)
        # self.loginfo('verbose = %r' % self.verbose)
        if self.verbose != old_verbose:
            self.loginfo('Verbose is now %r' % self.verbose)

        self.image_size = rospy.get_param('~img_size')
        self.top_cutoff = rospy.get_param('~top_cutoff')

        if self.detector is None:
            c = rospy.get_param('~detector')
            assert isinstance(c, list) and len(c) == 2, c
        
#         if str(self.detector_config) != str(c):
            self.loginfo('new detector config: %s' % str(c))

            self.detector = instantiate(c[0], c[1])
#             self.detector_config = c

        if self.verbose and self.pub_edge is None:
            self.pub_edge = rospy.Publisher("~edge", Image, queue_size=1)
            self.pub_colorSegment = rospy.Publisher("~colorSegment", Image, queue_size=1)


    def cbSwitch(self, switch_msg):
        self.active = switch_msg.data

    def cbImage(self, image_msg):
        self.stats.received()

        if not self.active:
            return 
        # Start a daemon thread to process the image
        thread = threading.Thread(target=self.processImage,args=(image_msg,))
        thread.setDaemon(True)
        thread.start()
        # Returns rightaway

    def cbTransform(self, transform_msg):
        self.ai.shift = transform_msg.s[0:3]
        self.ai.scale = transform_msg.s[3:6]

        self.loginfo("AntiInstagram transform received")

    def loginfo(self, s):
        rospy.loginfo('[%s] %s' % (self.node_name, s))

    def intermittent_log_now(self):
        return self.intermittent_counter % self.intermittent_interval == 1
    
    def intermittent_log(self, s):
        if not self.intermittent_log_now():
            return
        self.loginfo('%3d:%s' % (self.intermittent_counter, s))

    def processImage(self, image_msg):
        if not self.thread_lock.acquire(False):
            self.stats.skipped()
            # Return immediately if the thread is locked
            return

        try:
            self.processImage_(image_msg)
        finally:
            # Release the thread lock
            self.thread_lock.release()

    def processImage_(self, image_msg):

        self.stats.processed()

        if self.intermittent_log_now():
            self.intermittent_log(self.stats.info())
            self.stats.reset()

        tk = TimeKeeper(image_msg)
        
        self.intermittent_counter += 1

        # Decode from compressed image with OpenCV
        try:
            image_cv = image_cv_from_jpg(image_msg.data)
        except ValueError as e:
            self.loginfo('Could not decode image: %s' % e)
            return

        tk.completed('decoded')

        # Resize and crop image
        hei_original, wid_original = image_cv.shape[0:2]

        if self.image_size[0] != hei_original or self.image_size[1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv, (self.image_size[1], self.image_size[0]),
                                   interpolation=cv2.INTER_NEAREST)
        image_cv = image_cv[self.top_cutoff:,:,:]

        tk.completed('resized')

        # apply color correction: AntiInstagram
        image_cv_corr = self.ai.applyTransform(image_cv)
        image_cv_corr = cv2.convertScaleAbs(image_cv_corr)

        tk.completed('corrected')

        # set up parameter

        hsv_blue1 = (100,50,50)
        hsv_blue2 = (150,255,255)
        hsv_yellow1 = (25,50,50)
        hsv_yellow2 = (45,255,255)


        # Set the image to be detected
        gray = cv2.cvtColor(image_cv_corr,cv2.COLOR_BGR2GRAY)
        edges = cv2.Canny(gray, 80, 200, apertureSize = 3)
    
        hsv = cv2.cvtColor(image_cv_corr, cv2.COLOR_BGR2HSV)
        yellow = cv2.inRange(hsv, hsv_yellow1, hsv_yellow2)
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3, 3))
        yellow = cv2.dilate(yellow, kernel)

        blue = cv2.inRange(hsv, hsv_blue1, hsv_blue2)
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3, 3))
        blue = cv2.dilate(blue, kernel)

        # Detect lines and normals

        edge_color_yellow = cv2.bitwise_and(yellow, edges)
        lines_yellow = cv2.HoughLinesP(edge_color_yellow, 1, np.pi/180, 10, np.empty(1), 3, 1)
        if lines_yellow is not None:
            lines_yellow = np.array(lines_yellow[0])
        
        else:
            lines_yellow = []

        #edge_color_blue = cv2.bitwise_and(blue, edges)
        #lines_blue = cv2.HoughLinesP(edge_color_blue, 1, np.pi/180, 10, np.empty(1), 3, 1)
        #if lines_blue is not None:
            #lines_blue = np.array(lines_blue[0])

        #else:
            #lines_blue = []

	#print "***************** ",image_cv.shape," *********************"
        #bw_yellow = yellow
        #bw_blue = blue

        self.blue = blue
        self.yellow = yellow
        if len(lines_yellow) > 0:
            lines_yellow,normals_yellow = self.normals(lines_yellow,yellow)

        #if len(lines_blue) > 0:
            #lines_blue,normals_blue = self.normals(lines_blue,bw_blue)


        tk.completed('detected')
     
        # SegmentList constructor
        segmentList = SegmentList()
        segmentList.header.stamp = image_msg.header.stamp
        
        # Convert to normalized pixel coordinates, and add segments to segmentList
        
        
        if len(lines_yellow) > 0:
            segmentList.segments.extend(self.toSegmentMsg(lines_yellow, normals_yellow, Segment.YELLOW))
        if len(segmentList.segments) == 0:

                    if self.time_switch == False:
                        msgg = BoolStamped()
                        msgg.data = False
                        self.pub_lane.publish(msgg)
                        self.time_switch = True


                    car_control_msg = Twist2DStamped()
                    car_control_msg.v = 0.0
                    car_control_msg.omega = 0.0
                    self.pub_car_cmd.publish(car_control_msg)
                

        #if len(lines_blue) > 0:
            #segmentList.segments.extend(self.toSegmentMsg(lines_blue, normals_blue, Segment.YELLOW))


        self.intermittent_log('# segments:yellow %3d' % (len(lines_yellow)))

        tk.completed('prepared')

        # Publish segmentList
        self.pub_lines.publish(segmentList)

        # VISUALIZATION only below
        
        if self.verbose:

            # Draw lines and normals
            image_with_lines = np.copy(image_cv_corr)
            for x1,y1,x2,y2,norm_x,norm_y in np.hstack((lines_yellow,normals_yellow)):
                x1 = int(x1) 
                x2 = int(x2)
                y1 = int(y1)
                y2 = int(y2)
        
                ox= int((x1+x2)/2)
                oy= int((y1+y2)/2)
        
                cx = (ox+3*norm_x).astype('int')
                cy = (oy+3*norm_y).astype('int') 
        
                ccx = (ox-3*norm_x).astype('int')
                ccy = (oy-3*norm_y).astype('int') 
        
                if cx >158:
                    cx = 158
                elif cx <1:
                    cx = 1
                if ccx >158:
                    ccx = 158
                elif ccx <1:
                    ccx = 1
 
                if cy >=79:
                    cy = 79
                elif cy <1:
                    cy = 1
                if ccy >=79:
                    ccy = 79
                elif ccy <1:
                    ccy = 1

        
                if (blue[cy, cx] == 255 and yellow[ccy,ccx] ==255) or (yellow[cy, cx] == 255 and blue[ccy,ccx] ==255):

                    cv2.line(image_with_lines, (x1,y1), (x2,y2), (0,0,255), 2)
                    cv2.circle(image_with_lines, (x1,y1), 2, (0,255,0))
                    cv2.circle(image_with_lines, (x2,y2), 2, (255,0,0))            

            tk.completed('drawn')

            # Publish the frame with lines
            image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
            image_msg_out.header.stamp = image_msg.header.stamp
            self.pub_image.publish(image_msg_out)

            tk.completed('pub_image')

#         if self.verbose:
            #colorSegment = color_segment(white.area, red.area, yellow.area) 
            #edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges, "mono8")
            #colorSegment_msg_out = self.bridge.cv2_to_imgmsg(colorSegment, "bgr8")
            #self.pub_edge.publish(edge_msg_out)
            #self.pub_colorSegment.publish(colorSegment_msg_out)

            tk.completed('pub_edge/pub_segment')


        self.intermittent_log(tk.getall())


    def onShutdown(self):
        self.loginfo("Shutdown.")
            
    def toSegmentMsg(self,  lines, normals, color):

    	arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array((1./self.image_size[1], 1./self.image_size[0], 1./self.image_size[1], 1./self.image_size[0]))
        
        segmentMsgList = []
        for x1,y1,x2,y2,norm_x,norm_y in np.hstack((lines,normals)):

            ox= int((x1+x2)/2)
            oy= int((y1+y2)/2)
        
            cx = (ox+3*norm_x).astype('int')
            cy = (oy+3*norm_y).astype('int') 
        
            ccx = (ox-3*norm_x).astype('int')
            ccy = (oy-3*norm_y).astype('int') 
        
            if cx >158:
                cx = 158
            elif cx <1:
                cx = 1
            if ccx >158:
                ccx = 158
            elif ccx <1:
                ccx = 1

            if cy >=79:
                cy = 79
            elif cy <1:
                cy = 1
            if ccy >=79:
                ccy = 79
            elif ccy <1:
                ccy = 1

        
            if (self.blue[cy, cx] == 255 and self.yellow[ccy,ccx] ==255) or (self.yellow[cy, cx] == 255 and self.blue[ccy,ccx] ==255):

                [x1,y1,x2,y2] = (([x1,y1,x2,y2] + arr_cutoff) * arr_ratio)

                segment = Segment()
                segment.color = color
                segment.pixels_normalized[0].x = x1
                segment.pixels_normalized[0].y = y1
                segment.pixels_normalized[1].x = x2
                segment.pixels_normalized[1].y = y2
                segment.normal.x = norm_x
                segment.normal.y = norm_y
                segmentMsgList.append(segment)

                if self.time_switch == True:
                    msgg = BoolStamped()
                    msgg.data = True
                    self.pub_lane.publish(msgg)
                    self.time_switch = False
                    self.count = 0

            

        return segmentMsgList

    def normals(self, lines,bw): 

        if len(lines) >0:

            normals = []

            centers = []

            #find the dx dy

            length = np.sum((lines[:, 0:2] -lines[:, 2:4])**2, axis=1, keepdims=True)**0.5

            dx = 1.* (lines[:,3:4]-lines[:,1:2])/length

            dy = 1.* (lines[:,0:1]-lines[:,2:3])/length

            centers = np.hstack([(lines[:,0:1]+lines[:,2:3])/2, (lines[:,1:2]+lines[:,3:4])/2])


            x3 = (centers[:,0:1] - 3.*dx).astype('int')

            x3[x3<0]=0

            x3[x3>=160]=160-1

            y3 = (centers[:,1:2] - 3.*dy).astype('int')
    
            y3[y3<0]=0

            y3[y3>=80]=80-1

            x4 = (centers[:,0:1] + 3.*dx).astype('int')
        
            x4[x4<0]=0
    
            x4[x4>=160]=160-1

            y4 = (centers[:,1:2] + 3.*dy).astype('int')

            y4[y4<0]=0

            y4[y4>=80]=80-1

            flag_signs = (np.logical_and(bw[y3,x3]>0, bw[y4,x4]==0)).astype('int')*2-1

            normals = np.hstack([dx, dy]) * flag_signs  

            flag = ((lines[:,2]-lines[:,0])*normals[:,1] - (lines[:,3]-lines[:,1])*normals[:,0])>0

            for i in range(len(lines)):

                if flag[i]:

                    x1,y1,x2,y2 = lines[i, :]

                    lines[i, :] = [x2,y2,x1,y1]

            return lines,normals
Example #11
0
class AntiInstagramNode():
    def __init__(self):
        self.node_name = rospy.get_name()

        self.active = True
        self.locked = False

        self.image_pub_switch = rospy.get_param("~publish_corrected_image",
                                                False)

        # Initialize publishers and subscribers
        self.pub_image = rospy.Publisher("~corrected_image",
                                         Image,
                                         queue_size=1)
        self.pub_health = rospy.Publisher("~health",
                                          AntiInstagramHealth,
                                          queue_size=1,
                                          latch=True)
        self.pub_transform = rospy.Publisher("~transform",
                                             AntiInstagramTransform,
                                             queue_size=1,
                                             latch=True)

        #self.sub_switch = rospy.Subscriber("~switch",BoolStamped, self.cbSwitch, queue_size=1)
        #self.sub_image = rospy.Subscriber("~uncorrected_image",Image,self.cbNewImage,queue_size=1)
        self.sub_image = rospy.Subscriber("~uncorrected_image",
                                          CompressedImage,
                                          self.cbNewImage,
                                          queue_size=1)
        self.sub_click = rospy.Subscriber("~click",
                                          BoolStamped,
                                          self.cbClick,
                                          queue_size=1)

        self.trans_timer = rospy.Timer(rospy.Duration.from_sec(20),
                                       self.cbPubTrans, True)
        # Verbose option
        self.verbose = rospy.get_param('line_detector_node/verbose', True)

        # Initialize health message
        self.health = AntiInstagramHealth()

        # Initialize transform message
        self.transform = AntiInstagramTransform()
        # FIXME: read default from configuration and publish it
        self.ai_scale = np.array(
            [2.2728408473337893, 2.2728273205024614, 2.272844346401005])
        self.ai_shift = np.array(
            [21.47181119272393, 37.14653160247276, 4.089311860796786])

        self.ai = AntiInstagram()
        self.corrected_image = Image()
        self.bridge = CvBridge()

        self.image_msg = None
        self.click_on = False

    def cbPubTrans(self, _):
        self.transform.s[0], self.transform.s[1], self.transform.s[
            2] = self.ai_shift
        self.transform.s[3], self.transform.s[4], self.transform.s[
            5] = self.ai_scale

        self.pub_transform.publish(self.transform)
        rospy.loginfo('ai: Color transform published.')

    def cbNewImage(self, image_msg):
        # memorize image
        self.image_msg = image_msg

        if self.image_pub_switch:
            tk = TimeKeeper(image_msg)
            cv_image = self.bridge.imgmsg_to_cv2(image_msg, "bgr8")

            corrected_image_cv2 = self.ai.applyTransform(cv_image)
            tk.completed('applyTransform')

            corrected_image_cv2 = np.clip(corrected_image_cv2, 0,
                                          255).astype(np.uint8)
            self.corrected_image = self.bridge.cv2_to_imgmsg(
                corrected_image_cv2, "bgr8")

            tk.completed('encode')

            self.pub_image.publish(self.corrected_image)

            tk.completed('published')

            if self.verbose:
                rospy.loginfo('ai:\n' + tk.getall())

    def cbClick(self, _):
        # if we have seen an image:
        if self.image_msg is not None:
            self.click_on = not self.click_on
            if self.click_on:
                self.processImage(self.image_msg)
            else:
                self.transform.s = [0, 0, 0, 1, 1, 1]
                self.pub_transform.publish(self.transform)
                rospy.loginfo('ai: Color transform is turned OFF!')

    def processImage(self, msg):
        '''
        Inputs:
            msg - CompressedImage - uncorrected image from raspberry pi camera
            
        Uses anti_instagram library to adjust msg so that it looks like the same
        color temperature as a duckietown reference image. Calculates health of the node
        and publishes the corrected image and the health state. Health somehow corresponds
        to how good of a transformation it is.
        '''

        rospy.loginfo('ai: Computing color transform...')
        tk = TimeKeeper(msg)

        #cv_image = self.bridge.imgmsg_to_cv2(msg,"bgr8")
        try:
            cv_image = image_cv_from_jpg(msg.data)
        except ValueError as e:
            rospy.loginfo('Anti_instagram cannot decode image: %s' % e)
            return

        tk.completed('converted')

        self.ai.calculateTransform(cv_image)

        tk.completed('calculateTransform')

        # if health is much below the threshold value, do not update the color correction and log it.
        if self.ai.health <= 0.001:
            # health is not good

            rospy.loginfo("Health is not good")

        else:
            self.health.J1 = self.ai.health
            self.transform.s[0], self.transform.s[1], self.transform.s[
                2] = self.ai.shift
            self.transform.s[3], self.transform.s[4], self.transform.s[
                5] = self.ai.scale

            rospy.set_param('antiins_shift', self.ai.shift.tolist())
            rospy.set_param('antiins_scale', self.ai.scale.tolist())
            self.pub_health.publish(self.health)
            self.pub_transform.publish(self.transform)
            rospy.loginfo('ai: Color transform published.')
class LineDetectorNode(object):
    def __init__(self):
        self.node_name = "LineDetectorNode"

        # Thread lock 
        self.thread_lock = threading.Lock()
       
        # Constructor of line detector 
        self.bridge = CvBridge()

        self.active = True

        self.stats = Stats()

        # Only be verbose every 10 cycles
        self.intermittent_interval = 100
        self.intermittent_counter = 0

        # color correction
        self.ai = AntiInstagram()

        # these will be added if it becomes verbose
        self.pub_edge = None
        self.pub_colorSegment = None

        self.detector = None
        self.verbose = None
        self.updateParams(None)
            
        # Publishers
        self.pub_lines = rospy.Publisher("~segment_list", SegmentList, queue_size=1)
        self.pub_image = rospy.Publisher("~image_with_lines", Image, queue_size=1)
       
        # Subscribers
        self.sub_image = rospy.Subscriber("~image", CompressedImage, self.cbImage, queue_size=1)
        self.sub_transform = rospy.Subscriber("~transform", AntiInstagramTransform, self.cbTransform, queue_size=1)
        self.sub_switch = rospy.Subscriber("~switch", BoolStamped, self.cbSwitch, queue_size=1)

        rospy.loginfo("[%s] Initialized (verbose = %s)." %(self.node_name, self.verbose))

        rospy.Timer(rospy.Duration.from_sec(2.0), self.updateParams)


    def updateParams(self, _event):
        old_verbose = self.verbose
        self.verbose = rospy.get_param('~verbose', True)
        # self.loginfo('verbose = %r' % self.verbose)
        if self.verbose != old_verbose:
            self.loginfo('Verbose is now %r' % self.verbose)

        self.image_size = rospy.get_param('~img_size')
        self.top_cutoff = rospy.get_param('~top_cutoff')

        if self.detector is None:
            c = rospy.get_param('~detector')
            assert isinstance(c, list) and len(c) == 2, c
        
#         if str(self.detector_config) != str(c):
            self.loginfo('new detector config: %s' % str(c))

            self.detector = instantiate(c[0], c[1])
#             self.detector_config = c

        if self.verbose and self.pub_edge is None:
            self.pub_edge = rospy.Publisher("~edge", Image, queue_size=1)
            self.pub_colorSegment = rospy.Publisher("~colorSegment", Image, queue_size=1)


    def cbSwitch(self, switch_msg):
        self.active = switch_msg.data

    def cbImage(self, image_msg):
        self.stats.received()

        if not self.active:
            return 
        # Start a daemon thread to process the image
        thread = threading.Thread(target=self.processImage,args=(image_msg,))
        thread.setDaemon(True)
        thread.start()
        # Returns rightaway

    def cbTransform(self, transform_msg):
        self.ai.shift = transform_msg.s[0:3]
        self.ai.scale = transform_msg.s[3:6]

        self.loginfo("AntiInstagram transform received")

    def loginfo(self, s):
        rospy.loginfo('[%s] %s' % (self.node_name, s))

    def intermittent_log_now(self):
        return self.intermittent_counter % self.intermittent_interval == 1
    
    def intermittent_log(self, s):
        if not self.intermittent_log_now():
            return
        self.loginfo('%3d:%s' % (self.intermittent_counter, s))

    def processImage(self, image_msg):
        if not self.thread_lock.acquire(False):
            self.stats.skipped()
            # Return immediately if the thread is locked
            return

        try:
            self.processImage_(image_msg)
        finally:
            # Release the thread lock
            self.thread_lock.release()

    def processImage_(self, image_msg):

        self.stats.processed()

        if self.intermittent_log_now():
            self.intermittent_log(self.stats.info())
            self.stats.reset()

        tk = TimeKeeper(image_msg)
        
        self.intermittent_counter += 1

        # Decode from compressed image with OpenCV
        try:
            image_cv = image_cv_from_jpg(image_msg.data)
        except ValueError as e:
            self.loginfo('Could not decode image: %s' % e)
            return

        tk.completed('decoded')

        # Resize and crop image
        hei_original, wid_original = image_cv.shape[0:2]

        if self.image_size[0] != hei_original or self.image_size[1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv, (self.image_size[1], self.image_size[0]),
                                   interpolation=cv2.INTER_NEAREST)
        image_cv = image_cv[self.top_cutoff:,:,:]

        tk.completed('resized')

        # apply color correction: AntiInstagram
        image_cv_corr = self.ai.applyTransform(image_cv)
        image_cv_corr = cv2.convertScaleAbs(image_cv_corr)

        tk.completed('corrected')

        # Set the image to be detected
        self.detector.setImage(image_cv_corr)

        # Detect lines and normals

        white = self.detector.detectLines('white')
        yellow = self.detector.detectLines('yellow')
        red = self.detector.detectLines('red')

        tk.completed('detected')
     
        # SegmentList constructor
        segmentList = SegmentList()
        segmentList.header.stamp = image_msg.header.stamp
        
        # Convert to normalized pixel coordinates, and add segments to segmentList
        arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array((1./self.image_size[1], 1./self.image_size[0], 1./self.image_size[1], 1./self.image_size[0]))
        if len(white.lines) > 0:
            lines_normalized_white = ((white.lines + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(self.toSegmentMsg(lines_normalized_white, white.normals, Segment.WHITE))
        if len(yellow.lines) > 0:
            lines_normalized_yellow = ((yellow.lines + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(self.toSegmentMsg(lines_normalized_yellow, yellow.normals, Segment.YELLOW))
        if len(red.lines) > 0:
            lines_normalized_red = ((red.lines + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(self.toSegmentMsg(lines_normalized_red, red.normals, Segment.RED))
        
        self.intermittent_log('# segments: white %3d yellow %3d red %3d' % (len(white.lines),
                len(yellow.lines), len(red.lines)))
        
        tk.completed('prepared')

        # Publish segmentList
        self.pub_lines.publish(segmentList)
        tk.completed('--pub_lines--')

        # VISUALIZATION only below
        
        if self.verbose:

            # Draw lines and normals
            image_with_lines = np.copy(image_cv_corr)
            drawLines(image_with_lines, white.lines, (0, 0, 0))
            drawLines(image_with_lines, yellow.lines, (255, 0, 0))
            drawLines(image_with_lines, red.lines, (0, 255, 0))

            tk.completed('drawn')

            # Publish the frame with lines
            image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
            image_msg_out.header.stamp = image_msg.header.stamp
            self.pub_image.publish(image_msg_out)

            tk.completed('pub_image')

#         if self.verbose:
            colorSegment = color_segment(white.area, red.area, yellow.area) 
            edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges, "mono8")
            colorSegment_msg_out = self.bridge.cv2_to_imgmsg(colorSegment, "bgr8")
            self.pub_edge.publish(edge_msg_out)
            self.pub_colorSegment.publish(colorSegment_msg_out)

            tk.completed('pub_edge/pub_segment')


        self.intermittent_log(tk.getall())


    def onShutdown(self):
        self.loginfo("Shutdown.")
            
    def toSegmentMsg(self,  lines, normals, color):
        qtable = zeros(7,15,3)
        round1 = 0

        segmentMsgList = []
        for x1,y1,x2,y2,norm_x,norm_y in np.hstack((lines,normals)):
            segment = Segment()
            segment.color = color

            map_seg = zeros(7,15)

            for i in range(round(x1*15),round(x2*15))
                map_seg(i,round(i*(y2-y1)/(x2-x1)))=1

            t_pre = map_seg(6,1)

            for i in 15
                t = map_seg(6,i)
                if (t < t_pre)  
                    s = i
                elif (t > t_pre)  
                    e = i
                t_pre = t

            gaol_x = 6
            gaol_y = (s+e)/2


            while round1<50

                map_matrix = map_seg
                round1 = round1+1
                position_x = 2
                position_y = 8
                count=0
                while ~(position_x == gaol_x && position_y == gaol_y)
                    a=0.9
                    b=0.8
                    t_pre = map_seg(position_x,1)
                    for i in 15
                        t = map_seg(position_x,i)
                        if     (t < t_pre)  
                            s = i
                        elif (t > t_pre)  
                            e = i
                        t_pre = t

                    dis = (position_y-s) - (e-position_y)
                    if (dis <= 0)  
                        reward =  5*dis
                    elif (dis >  0)  
                        reward =  -5*dis
                    count = count+1
                    rand_action = round( random.randint(1,3) )
                    max_q = max([qtable(position_x,position_y,1) qtable(position_x,position_y,2) qtable(position_x,position_y,3) ])
                    max_index = values.index(max([qtable(position_x,position_y,1) qtable(position_x,position_y,2) qtable(position_x,position_y,3) ]))
                    if( qtable(position_x,position_y,rand_action) >= qtable(position_x,position_y,max_index) )
                        action = rand_action
                    else
                        action = max_index
                    map_matrix(position_x,position_y) = count

                    pre_position_x = position_x
                    pre_position_y = position_y

                    if action ==1
                        position_x = pre_position_x+1
                    elif action ==2
                        position_y = pre_position_y-1
                    elif action ==3
                        position_y = pre_position_y+1

                    if(map_seg(position_x,position_y) == 1)
                        position_x = pre_position_x
                        position_y = pre_position_y
                        reward=-100
                        b=0


                    if(position_x == gaol_x && position_y == gaol_y)
                        reward=100
                        b=0


                    max_qtable = max([qtable(position_x,position_y,1) qtable(position_x,position_y,2) qtable(position_x,position_y,3) ])

                    max_qtable_index = values.index(max([qtable(position_x,position_y,1) qtable(position_x,position_y,2) qtable(position_x,position_y,3) ]))

                    old_q=qtable(pre_position_x,pre_position_y,action)
                    new_q=old_q+a*(reward+b*max_qtable-old_q)

                    qtable(pre_position_x,pre_position_y,action)=new_q
                    for i in range(1,15)
                        for j in range(1,7)
                            if map_matrix(i,j)!=0
                                x1=i/15.
                                y1=j/7. 
                                break           

                    for i in range(15,1)[::-1]
                        for j in range(7,1)[::-1]
                            if map_matrix(i,j)!=0
                                x2=i/15.
                                y2=j/7.
                                break

            segment.pixels_normalized[0].x = x1
            segment.pixels_normalized[0].y = y1
            segment.pixels_normalized[1].x = x2
            segment.pixels_normalized[1].y = y2
            segment.normal.x = norm_x
            segment.normal.y = norm_y

            


             
            segmentMsgList.append(segment)
        return segmentMsgList
class AntiInstagramNode():
	def __init__(self):
		self.node_name = rospy.get_name()

		self.active = True
		self.image_pub_switch = rospy.get_param("~publish_corrected_image",False)
		
        # Initialize publishers and subscribers
		self.pub_image = rospy.Publisher("~corrected_image",Image,queue_size=1)
		self.pub_health = rospy.Publisher("~health",AntiInstagramHealth,queue_size=1,latch=True)
		self.pub_transform = rospy.Publisher("~transform",AntiInstagramTransform,queue_size=1,latch=True)

		#self.sub_switch = rospy.Subscriber("~switch",BoolStamped, self.cbSwitch, queue_size=1)
		#self.sub_image = rospy.Subscriber("~uncorrected_image",Image,self.cbNewImage,queue_size=1)
		self.sub_image = rospy.Subscriber("~uncorrected_image", CompressedImage, self.cbNewImage,queue_size=1)
		self.sub_click = rospy.Subscriber("~click", BoolStamped, self.cbClick, queue_size=1)

		# Verbose option 
		self.verbose = rospy.get_param('~verbose',True)  

		# Initialize health message
		self.health = AntiInstagramHealth()

		# Initialize transform message
		self.transform = AntiInstagramTransform()
		# FIXME: read default from configuration and publish it

		self.ai = AntiInstagram()
		self.corrected_image = Image()
		self.bridge = CvBridge()

		self.image_msg = None
		self.click_on = False

	def cbNewImage(self,image_msg):
		# memorize image
		self.image_msg = image_msg

		if self.image_pub_switch:
			tk = TimeKeeper(image_msg)
			cv_image = self.bridge.imgmsg_to_cv2(image_msg, "bgr8")

			corrected_image_cv2 = self.ai.applyTransform(cv_image)
			tk.completed('applyTransform')

			corrected_image_cv2 = np.clip(corrected_image_cv2, 0, 255).astype(np.uint8)
			self.corrected_image = self.bridge.cv2_to_imgmsg(corrected_image_cv2, "bgr8")

			tk.completed('encode')

			self.pub_image.publish(self.corrected_image)

			tk.completed('published')

			if self.verbose:
				rospy.loginfo('ai:\n' + tk.getall())

	def cbClick(self, _):
		# if we have seen an image:
		if self.image_msg is not None:
			self.click_on = not self.click_on
			if self.click_on:
				self.processImage(self.image_msg)
			else:
				self.transform.s = [0,0,0,1,1,1]
				self.pub_transform.publish(self.transform)
				rospy.loginfo('ai: Color transform is turned OFF!')
		else:
			rospy.loginfo('No image seen yet')
		
	def processImage(self,msg):
		'''
		Inputs:
			msg - CompressedImage - uncorrected image from raspberry pi camera
		Uses anti_instagram library to adjust msg so that it looks like the same
		color temperature as a duckietown reference image. Calculates health of the node
		and publishes the corrected image and the health state. Health somehow corresponds
		to how good of a transformation it is.
		'''

		rospy.loginfo('ai: Computing color transform...')
		tk = TimeKeeper(msg)

		#cv_image = self.bridge.imgmsg_to_cv2(msg,"bgr8")
		try:
			cv_image = image_cv_from_jpg(msg.data)
		except ValueError as e:
			rospy.loginfo('Anti_instagram cannot decode image: %s' % e)
			return

		tk.completed('converted')

		self.ai.calculateTransform(cv_image)

		tk.completed('calculateTransform')


		# FIXME !!!
		if False:
			# health is not good

			rospy.loginfo("Health is not good")

		else:
			self.health.J1 = self.ai.health
			self.transform.s[0], self.transform.s[1], self.transform.s[2] = self.ai.shift
			self.transform.s[3], self.transform.s[4], self.transform.s[5] = self.ai.scale

			self.pub_health.publish(self.health)
			self.pub_transform.publish(self.transform)
			rospy.loginfo('ai: Color transform published!!!')
# check if dir exists, create if not
if not os.path.exists(outdir):
    os.makedirs(outdir)


# read the image
input_img = cv2.imread(file, cv2.IMREAD_UNCHANGED)

#lets do the masking!
#surf = identifyLaneSurface(input_img, use_hsv=False, grad_thresh=20)
#input_img = np.expand_dims(surf, -1) * input_img
#cv2.imshow('mask', input_img)
#cv2.waitKey(0)

# create instance of AntiInstagram
ai = AntiInstagram()

ai.calculateTransform(input_img)
print('Transform Calculation completed!')

corrected_img = ai.applyTransform(input_img)
print('Transform applied!')


# write the corrected image
date = datetime.datetime.now().strftime("%H-%M-%S")
path = outdir + '/' + str(date) + '_corrected.jpg'

cv2.imwrite(path, corrected_img)

Example #15
0
class LineDetectorNode(object):
    def __init__(self):
        self.node_name = "LineDetectorNode"

        # Thread lock 
        self.thread_lock = threading.Lock()
       
        # Constructor of line detector 
        self.bridge = CvBridge()

        self.active = True

        self.stats = Stats()

        # Only be verbose every 10 cycles
        self.intermittent_interval = 100
        self.intermittent_counter = 0

        # color correction
        self.ai = AntiInstagram()

        # these will be added if it becomes verbose
        self.pub_edge = None
        self.pub_colorSegment = None

        self.detector = None
        self.verbose = None
        self.updateParams(None)
            
        # Publishers
        self.pub_lines = rospy.Publisher("~segment_list", SegmentList, queue_size=1)
        self.pub_image = rospy.Publisher("~image_with_lines", Image, queue_size=1)
       
        # Subscribers
        self.sub_image = rospy.Subscriber("~image", CompressedImage, self.cbImage, queue_size=1)
        self.sub_transform = rospy.Subscriber("~transform", AntiInstagramTransform, self.cbTransform, queue_size=1)
        self.sub_switch = rospy.Subscriber("~switch", BoolStamped, self.cbSwitch, queue_size=1)

        rospy.loginfo("[%s] Initialized (verbose = %s)." %(self.node_name, self.verbose))

        rospy.Timer(rospy.Duration.from_sec(2.0), self.updateParams)


    def updateParams(self, _event):
        old_verbose = self.verbose
        self.verbose = rospy.get_param('~verbose', True)
        # self.loginfo('verbose = %r' % self.verbose)
        if self.verbose != old_verbose:
            self.loginfo('Verbose is now %r' % self.verbose)

        self.image_size = rospy.get_param('~img_size')
        self.top_cutoff = rospy.get_param('~top_cutoff')

        if self.detector is None:
            c = rospy.get_param('~detector')
            assert isinstance(c, list) and len(c) == 2, c
        
#         if str(self.detector_config) != str(c):
            self.loginfo('new detector config: %s' % str(c))

            self.detector = instantiate(c[0], c[1])
#             self.detector_config = c

        if self.verbose and self.pub_edge is None:
            self.pub_edge = rospy.Publisher("~edge", Image, queue_size=1)
            self.pub_colorSegment = rospy.Publisher("~colorSegment", Image, queue_size=1)


    def cbSwitch(self, switch_msg):
        self.active = switch_msg.data

    def cbImage(self, image_msg):
        self.stats.received()

        if not self.active:
            return 
        # Start a daemon thread to process the image
        thread = threading.Thread(target=self.processImage,args=(image_msg,))
        thread.setDaemon(True)
        thread.start()
        # Returns rightaway

    def cbTransform(self, transform_msg):
        self.ai.shift = transform_msg.s[0:3]
        self.ai.scale = transform_msg.s[3:6]

        self.loginfo("AntiInstagram transform received")

    def loginfo(self, s):
        rospy.loginfo('[%s] %s' % (self.node_name, s))

    def intermittent_log_now(self):
        return self.intermittent_counter % self.intermittent_interval == 1
    
    def intermittent_log(self, s):
        if not self.intermittent_log_now():
            return
        self.loginfo('%3d:%s' % (self.intermittent_counter, s))

    def processImage(self, image_msg):
        if not self.thread_lock.acquire(False):
            self.stats.skipped()
            # Return immediately if the thread is locked
            return

        try:
            self.processImage_(image_msg)
        finally:
            # Release the thread lock
            self.thread_lock.release()

    def processImage_(self, image_msg):

        self.stats.processed()

        if self.intermittent_log_now():
            self.intermittent_log(self.stats.info())
            self.stats.reset()

        tk = TimeKeeper(image_msg)
        
        self.intermittent_counter += 1

        # Decode from compressed image with OpenCV
        try:
            image_cv = image_cv_from_jpg(image_msg.data)
        except ValueError as e:
            self.loginfo('Could not decode image: %s' % e)
            return

        tk.completed('decoded')

        # Resize and crop image
        hei_original, wid_original = image_cv.shape[0:2]

        if self.image_size[0] != hei_original or self.image_size[1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv, (self.image_size[1], self.image_size[0]),
                                   interpolation=cv2.INTER_NEAREST)
        image_cv = image_cv[self.top_cutoff:,:,:]

        tk.completed('resized')

        # apply color correction: AntiInstagram
        image_cv_corr = self.ai.applyTransform(image_cv)
        image_cv_corr = cv2.convertScaleAbs(image_cv_corr)

        tk.completed('corrected')

        # Set the image to be detected
        self.detector.setImage(image_cv_corr)

        # Detect lines and normals

        white = self.detector.detectLines('white')
        yellow = self.detector.detectLines('yellow')
        red = self.detector.detectLines('red')
def setup():
    img = random_image(Params.shape[0], Params.shape[1])
    ai = AntiInstagram()
    return ai, img
Example #17
0
class AntiInstagramNode():
    def __init__(self):
        self.node_name = rospy.get_name()
        self.image_lock = threading.Lock()

        self.ai = AntiInstagram()

        # XXX: read parameters
        # XXX: parameters need to go inside config file, ask aleks about heirarchy
        self.interval = self.setup_parameter("~ai_interval", 10)
        self.color_balance_percentage = self.setup_parameter(
            "~cb_percentage", 0.8)  # XXX: change in all launch files
        self.output_scale = self.setup_parameter(
            "~scale_percent", 0.4)  # XXX: change in all launch files
        self.calculation_scale = self.setup_parameter("~resize", 0.2)

        self.bridge = CvBridge()

        self.image = None

        rospy.Timer(rospy.Duration(self.interval),
                    self.calculate_new_parameters)

        self.uncorrected_image_subscriber = rospy.Subscriber(
            '~uncorrected_image/compressed',
            CompressedImage,
            self.process_image,
            buff_size=921600,
            queue_size=1)

        self.corrected_image_publisher = rospy.Publisher(
            "~corrected_image/compressed", CompressedImage, queue_size=1)

    def process_image(self, image_msg):
        try:
            self.image_lock.acquire()
            image = bgr_from_jpg(image_msg.data)
            self.image = image
            self.image_lock.release()
        except ValueError as e:
            rospy.loginfo('Anti_instagram cannot decode image: %s' % e)
            self.image_lock.release()
            return

        color_balanced_image = self.ai.apply_color_balance(
            image, self.output_scale)

        if color_balanced_image is None:
            self.calculate_new_parameters(None)
            return

        corrected_image = self.bridge.cv2_to_compressed_imgmsg(
            color_balanced_image)
        corrected_image.header.stamp = image_msg.header.stamp
        self.corrected_image_publisher.publish(corrected_image)

    def calculate_new_parameters(self, event):
        self.image_lock.acquire()
        image = self.image
        self.image_lock.release()

        if image is None:
            rospy.loginfo("[%s] Waiting for first image!" % self.node_name)
            return

        self.ai.calculate_color_balance_thresholds(
            image, self.calculation_scale, self.color_balance_percentage)

        rospy.loginfo("[%s] New parameters computed" % self.node_name)

    def setup_parameter(self, param_name, default_value):
        value = rospy.get_param(param_name, default_value)
        rospy.set_param(param_name, value)
        rospy.loginfo("[%s] %s = %s " % (self.node_name, param_name, value))
        return value
def main():
    # check number of arguments
    if len(sys.argv) != 3:
        print(
            'This program expects exactly two arguments: an image filename and an output directory.'
        )
        sys.exit()

    # store inputs
    file = sys.argv[1]
    outdir = sys.argv[2]

    # check if file exists
    if not os.path.isfile(file):
        print('file not found')
        sys.exit(2)

    # check if dir exists, create if not
    if not os.path.exists(outdir):
        os.makedirs(outdir)

    # read the image
    input_img = cv2.imread(file, cv2.IMREAD_UNCHANGED)

    # lets do the masking!
    surf = identifyLaneSurface(input_img, use_hsv=False, grad_thresh=20)
    input_img = np.expand_dims(surf, -1) * input_img
    print('masked image!')
    cv2.imshow('mask', input_img)
    cv2.waitKey(0)

    # create instance of AntiInstagram
    ai = AntiInstagram()

    ai.calculateTransform(input_img)
    print('Transform Calculation completed!')

    corrected_img = ai.applyTransform(input_img)
    print('Transform applied!')
    cv2.imshow('mask', corrected_img)
    cv2.waitKey(0)

    #  polygons for pic1_smaller.jpg and pic2_smaller.jpg (hardcoded for now)
    polygon_black = [(280, 570), (220, 760), (870, 750), (810, 580)]
    polygon_white = [(781, 431), (975, 660), (1040, 633), (827, 418)]
    polygon_yellow = [(131, 523), (67, 597), (99, 609), (161, 530)]
    polygon_red = [(432, 282), (418, 337), (577, 338), (568, 283)]
    """
    #  polygons for pic3_smaller.jpg and pic2_smaller.jpg (hardcoded for now)
    polygon_black = [(280, 570), (220, 760), (870, 750), (810, 580)]
    polygon_white = [(900, 520), (1000, 640), (1060, 620), (970, 515)]
    polygon_yellow = [(234, 430), (190, 485), (230, 490), (270, 430)]
    polygon_red = [(285, 435), (250, 490), (830, 480), (800, 437)]


    #  polygons for pic4_smaller.jpg (hardcoded for now)
    polygon_black = [(316, 414), (215, 623), (783, 605), (673, 422)]
    polygon_white = [(710, 388), (947, 656), (1018, 620), (788, 400)]
    polygon_yellow = [(148, 474), (94, 537), (133, 542), (184, 475)]
    polygon_red = [(285, 435), (250, 490), (830, 480), (800, 437)]


    #  polygons for pic5_smaller.jpg (hardcoded for now)
    polygon_black = [(354, 418), (291, 612), (804, 590), (677, 396)]
    polygon_white = [(783, 424), (949, 602), (1002, 564), (840, 420)]
    polygon_yellow = [(344, 307), (331, 319), (354, 319), (366, 306)]
    polygon_red = [(135, 325), (119, 332), (325, 316), (332, 309)]
    """

    # create dictionary containing colors
    polygons = {
        'black': polygon_black,
        'white': polygon_white,
        'yellow': polygon_yellow,
        'red': polygon_red
    }

    # initialize estimateError class
    E = estimateError(corrected_img)
    # set polygons
    E.createPolygon(polygons)
    # estimate error
    E.GetErrorEstimation()

    # write the corrected image
    date = datetime.datetime.now().strftime("%H-%M-%S")
    path = outdir + '/' + str(date) + '_polygon.jpg'

    cv2.imwrite(path, E.out_image)
class LineDetectorNode(object):
    def __init__(self):
        self.node_name = "LineDetectorNode"

        # Thread lock 
        self.thread_lock = threading.Lock()
       
        # Constructor of line detector 
        self.bridge = CvBridge()

        self.active = True

        self.stats = Stats()

        # Only be verbose every 10 cycles
        self.intermittent_interval = 100
        self.intermittent_counter = 0

        # color correction
        self.ai = AntiInstagram()

        # these will be added if it becomes verbose
        self.pub_edge = None
        self.pub_colorSegment = None

        self.detector = None
        self.verbose = None
        self.updateParams(None)
            
        # Publishers
        self.pub_lines = rospy.Publisher("~segment_list", SegmentList, queue_size=1)
        self.pub_image = rospy.Publisher("~image_with_lines", Image, queue_size=1)
       
        # Subscribers
        self.sub_image = rospy.Subscriber("~image", CompressedImage, self.cbImage, queue_size=1)
        self.sub_transform = rospy.Subscriber("~transform", AntiInstagramTransform, self.cbTransform, queue_size=1)
        self.sub_switch = rospy.Subscriber("~switch", BoolStamped, self.cbSwitch, queue_size=1)

        rospy.loginfo("[%s] Initialized (verbose = %s)." %(self.node_name, self.verbose))

        rospy.Timer(rospy.Duration.from_sec(2.0), self.updateParams)


    def updateParams(self, _event):
        old_verbose = self.verbose
        self.verbose = rospy.get_param('~verbose', True)
        # self.loginfo('verbose = %r' % self.verbose)
        if self.verbose != old_verbose:
            self.loginfo('Verbose is now %r' % self.verbose)

        self.image_size = rospy.get_param('~img_size')
        self.top_cutoff = rospy.get_param('~top_cutoff')

        if self.detector is None:
            c = rospy.get_param('~detector')
            assert isinstance(c, list) and len(c) == 2, c
        
#         if str(self.detector_config) != str(c):
            self.loginfo('new detector config: %s' % str(c))

            self.detector = instantiate(c[0], c[1])
#             self.detector_config = c

        if self.verbose and self.pub_edge is None:
            self.pub_edge = rospy.Publisher("~edge", Image, queue_size=1)
            self.pub_colorSegment = rospy.Publisher("~colorSegment", Image, queue_size=1)


    def cbSwitch(self, switch_msg):
        self.active = switch_msg.data

    def cbImage(self, image_msg):
        self.stats.received()

        if not self.active:
            return 
        # Start a daemon thread to process the image
        thread = threading.Thread(target=self.processImage,args=(image_msg,))
        thread.setDaemon(True)
        thread.start()
        # Returns rightaway

    def cbTransform(self, transform_msg):
        self.ai.shift = transform_msg.s[0:3]
        self.ai.scale = transform_msg.s[3:6]

        self.loginfo("AntiInstagram transform received")

    def loginfo(self, s):
        rospy.loginfo('[%s] %s' % (self.node_name, s))

    def intermittent_log_now(self):
        return self.intermittent_counter % self.intermittent_interval == 1
    
    def intermittent_log(self, s):
        if not self.intermittent_log_now():
            return
        self.loginfo('%3d:%s' % (self.intermittent_counter, s))

    def processImage(self, image_msg):
        if not self.thread_lock.acquire(False):
            self.stats.skipped()
            # Return immediately if the thread is locked
            return

        try:
            self.processImage_(image_msg)
        finally:
            # Release the thread lock
            self.thread_lock.release()

    def processImage_(self, image_msg):

        self.stats.processed()

        if self.intermittent_log_now():
            self.intermittent_log(self.stats.info())
            self.stats.reset()

        tk = TimeKeeper(image_msg)
        
        self.intermittent_counter += 1

        # Decode from compressed image with OpenCV
        try:
            image_cv = image_cv_from_jpg(image_msg.data)
        except ValueError as e:
            self.loginfo('Could not decode image: %s' % e)
            return

        tk.completed('decoded')

        # Resize and crop image
        hei_original, wid_original = image_cv.shape[0:2]

        if self.image_size[0] != hei_original or self.image_size[1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv, (self.image_size[1], self.image_size[0]),
                                   interpolation=cv2.INTER_NEAREST)
        image_cv = image_cv[self.top_cutoff:,:,:]

        tk.completed('resized')

        # apply color correction: AntiInstagram
        image_cv_corr = self.ai.applyTransform(image_cv)
        image_cv_corr = cv2.convertScaleAbs(image_cv_corr)

        tk.completed('corrected')

        # Set the image to be detected
        self.detector.setImage(image_cv_corr)

        # Detect lines and normals

        white = self.detector.detectLines('white')
        yellow = self.detector.detectLines('yellow')
        red = self.detector.detectLines('red')

        tk.completed('detected')
     
        # SegmentList constructor
        segmentList = SegmentList()
        segmentList.header.stamp = image_msg.header.stamp
        
        # Convert to normalized pixel coordinates, and add segments to segmentList
        arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array((1./self.image_size[1], 1./self.image_size[0], 1./self.image_size[1], 1./self.image_size[0]))
        if len(white.lines) > 0:
            lines_normalized_white = ((white.lines + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(self.toSegmentMsg(lines_normalized_white, white.normals, Segment.WHITE))
        if len(yellow.lines) > 0:
            lines_normalized_yellow = ((yellow.lines + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(self.toSegmentMsg(lines_normalized_yellow, yellow.normals, Segment.YELLOW))
        if len(red.lines) > 0:
            lines_normalized_red = ((red.lines + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(self.toSegmentMsg(lines_normalized_red, red.normals, Segment.RED))
        
        self.intermittent_log('# segments: white %3d yellow %3d red %3d' % (len(white.lines),
                len(yellow.lines), len(red.lines)))
        
        tk.completed('prepared')

        # Publish segmentList
        self.pub_lines.publish(segmentList)
        tk.completed('--pub_lines--')

        # VISUALIZATION only below
        
        if self.verbose:

            # Draw lines and normals
            image_with_lines = np.copy(image_cv_corr)
            drawLines(image_with_lines, white.lines, (0, 0, 0))
            drawLines(image_with_lines, yellow.lines, (255, 0, 0))
            drawLines(image_with_lines, red.lines, (0, 255, 0))

            tk.completed('drawn')

            # Publish the frame with lines
            image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
            image_msg_out.header.stamp = image_msg.header.stamp
            self.pub_image.publish(image_msg_out)

            tk.completed('pub_image')

#         if self.verbose:
            colorSegment = color_segment(white.area, red.area, yellow.area) 
            edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges, "mono8")
            colorSegment_msg_out = self.bridge.cv2_to_imgmsg(colorSegment, "bgr8")
            self.pub_edge.publish(edge_msg_out)
            self.pub_colorSegment.publish(colorSegment_msg_out)

            tk.completed('pub_edge/pub_segment')


        self.intermittent_log(tk.getall())


    def onShutdown(self):
        self.loginfo("Shutdown.")
            
    def toSegmentMsg(self,  lines, normals, color):
        
        segmentMsgList = []
        for x1,y1,x2,y2,norm_x,norm_y in np.hstack((lines,normals)):
            segment = Segment()
            segment.color = color
            segment.pixels_normalized[0].x = x1
            segment.pixels_normalized[0].y = y1
            segment.pixels_normalized[1].x = x2
            segment.pixels_normalized[1].y = y2
            segment.normal.x = norm_x
            segment.normal.y = norm_y
             
            segmentMsgList.append(segment)
        return segmentMsgList
class LineDetectorNode(object):
    def __init__(self):
        self.node_name = "LineDetectorNode"

        # Thread lock
        self.thread_lock = threading.Lock()

        # Constructor of line detector
        self.bridge = CvBridge()

        self.active = True
        self.time_switch = True

        self.stats = Stats()

        # Only be verbose every 10 cycles
        self.intermittent_interval = 100
        self.intermittent_counter = 0

        # color correction
        self.ai = AntiInstagram()

        # these will be added if it becomes verbose
        self.pub_edge = None
        self.pub_colorSegment = None

        self.detector = None
        self.verbose = None
        self.updateParams(None)

        self.blue = 0
        self.yellow = 0
        self.count = 0

        # Publishers
        self.pub_lines = rospy.Publisher("~segment_list",
                                         SegmentList,
                                         queue_size=1)
        self.pub_image = rospy.Publisher("~image_with_lines",
                                         Image,
                                         queue_size=1)
        self.pub_lane = rospy.Publisher("~have_lines",
                                        BoolStamped,
                                        queue_size=1,
                                        latch=True)
        self.pub_car_cmd = rospy.Publisher("~car_cmd",
                                           Twist2DStamped,
                                           queue_size=1)

        # Subscribers
        self.sub_image = rospy.Subscriber("~image",
                                          CompressedImage,
                                          self.cbImage,
                                          queue_size=1)
        self.sub_transform = rospy.Subscriber("~transform",
                                              AntiInstagramTransform,
                                              self.cbTransform,
                                              queue_size=1)
        self.sub_switch = rospy.Subscriber("~switch",
                                           BoolStamped,
                                           self.cbSwitch,
                                           queue_size=1)

        rospy.loginfo("[%s] Initialized (verbose = %s)." %
                      (self.node_name, self.verbose))

        rospy.Timer(rospy.Duration.from_sec(2.0), self.updateParams)

    def updateParams(self, _event):
        old_verbose = self.verbose
        self.verbose = rospy.get_param('~verbose', True)
        # self.loginfo('verbose = %r' % self.verbose)
        if self.verbose != old_verbose:
            self.loginfo('Verbose is now %r' % self.verbose)

        self.image_size = rospy.get_param('~img_size')
        self.top_cutoff = rospy.get_param('~top_cutoff')

        if self.detector is None:
            c = rospy.get_param('~detector')
            assert isinstance(c, list) and len(c) == 2, c

            #         if str(self.detector_config) != str(c):
            self.loginfo('new detector config: %s' % str(c))

            self.detector = instantiate(c[0], c[1])
#             self.detector_config = c

        if self.verbose and self.pub_edge is None:
            self.pub_edge = rospy.Publisher("~edge", Image, queue_size=1)
            self.pub_colorSegment = rospy.Publisher("~colorSegment",
                                                    Image,
                                                    queue_size=1)

    def cbSwitch(self, switch_msg):
        self.active = switch_msg.data

    def cbImage(self, image_msg):
        self.stats.received()

        if not self.active:
            return
        # Start a daemon thread to process the image
        thread = threading.Thread(target=self.processImage, args=(image_msg, ))
        thread.setDaemon(True)
        thread.start()
        # Returns rightaway

    def cbTransform(self, transform_msg):
        self.ai.shift = transform_msg.s[0:3]
        self.ai.scale = transform_msg.s[3:6]

        self.loginfo("AntiInstagram transform received")

    def loginfo(self, s):
        rospy.loginfo('[%s] %s' % (self.node_name, s))

    def intermittent_log_now(self):
        return self.intermittent_counter % self.intermittent_interval == 1

    def intermittent_log(self, s):
        if not self.intermittent_log_now():
            return
        self.loginfo('%3d:%s' % (self.intermittent_counter, s))

    def processImage(self, image_msg):
        if not self.thread_lock.acquire(False):
            self.stats.skipped()
            # Return immediately if the thread is locked
            return

        try:
            self.processImage_(image_msg)
        finally:
            # Release the thread lock
            self.thread_lock.release()

    def processImage_(self, image_msg):

        self.stats.processed()

        if self.intermittent_log_now():
            self.intermittent_log(self.stats.info())
            self.stats.reset()

        tk = TimeKeeper(image_msg)

        self.intermittent_counter += 1

        # Decode from compressed image with OpenCV
        try:
            image_cv = image_cv_from_jpg(image_msg.data)
        except ValueError as e:
            self.loginfo('Could not decode image: %s' % e)
            return

        tk.completed('decoded')

        # Resize and crop image
        hei_original, wid_original = image_cv.shape[0:2]

        if self.image_size[0] != hei_original or self.image_size[
                1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv,
                                  (self.image_size[1], self.image_size[0]),
                                  interpolation=cv2.INTER_NEAREST)
        image_cv = image_cv[self.top_cutoff:, :, :]

        tk.completed('resized')

        # apply color correction: AntiInstagram
        image_cv_corr = self.ai.applyTransform(image_cv)
        image_cv_corr = cv2.convertScaleAbs(image_cv_corr)

        tk.completed('corrected')

        # set up parameter

        hsv_blue1 = (100, 50, 50)
        hsv_blue2 = (150, 255, 255)
        hsv_yellow1 = (25, 50, 50)
        hsv_yellow2 = (45, 255, 255)

        # Set the image to be detected
        gray = cv2.cvtColor(image_cv_corr, cv2.COLOR_BGR2GRAY)
        edges = cv2.Canny(gray, 80, 200, apertureSize=3)

        hsv = cv2.cvtColor(image_cv_corr, cv2.COLOR_BGR2HSV)
        yellow = cv2.inRange(hsv, hsv_yellow1, hsv_yellow2)
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
        yellow = cv2.dilate(yellow, kernel)

        blue = cv2.inRange(hsv, hsv_blue1, hsv_blue2)
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
        blue = cv2.dilate(blue, kernel)

        # Detect lines and normals

        edge_color_yellow = cv2.bitwise_and(yellow, edges)
        lines_yellow = cv2.HoughLinesP(edge_color_yellow, 1, np.pi / 180, 10,
                                       np.empty(1), 3, 1)
        if lines_yellow is not None:
            lines_yellow = np.array(lines_yellow[0])

        else:
            lines_yellow = []

        #edge_color_blue = cv2.bitwise_and(blue, edges)
        #lines_blue = cv2.HoughLinesP(edge_color_blue, 1, np.pi/180, 10, np.empty(1), 3, 1)
        #if lines_blue is not None:
        #lines_blue = np.array(lines_blue[0])

        #else:
        #lines_blue = []

#print "***************** ",image_cv.shape," *********************"
#bw_yellow = yellow
#bw_blue = blue

        self.blue = blue
        self.yellow = yellow
        if len(lines_yellow) > 0:
            lines_yellow, normals_yellow = self.normals(lines_yellow, yellow)

        #if len(lines_blue) > 0:
        #lines_blue,normals_blue = self.normals(lines_blue,bw_blue)

        tk.completed('detected')

        # SegmentList constructor
        segmentList = SegmentList()
        segmentList.header.stamp = image_msg.header.stamp

        # Convert to normalized pixel coordinates, and add segments to segmentList

        if len(lines_yellow) > 0:
            segmentList.segments.extend(
                self.toSegmentMsg(lines_yellow, normals_yellow,
                                  Segment.YELLOW))

        #if len(lines_blue) > 0:
        #segmentList.segments.extend(self.toSegmentMsg(lines_blue, normals_blue, Segment.YELLOW))

        self.intermittent_log('# segments:yellow %3d' % (len(lines_yellow)))

        tk.completed('prepared')

        # Publish segmentList
        self.pub_lines.publish(segmentList)

        # VISUALIZATION only below

        if self.verbose:

            # Draw lines and normals
            image_with_lines = np.copy(image_cv_corr)
            for x1, y1, x2, y2, norm_x, norm_y in np.hstack(
                (lines_yellow, normals_yellow)):
                x1 = int(x1)
                x2 = int(x2)
                y1 = int(y1)
                y2 = int(y2)

                ox = int((x1 + x2) / 2)
                oy = int((y1 + y2) / 2)

                cx = (ox + 3 * norm_x).astype('int')
                cy = (oy + 3 * norm_y).astype('int')

                ccx = (ox - 3 * norm_x).astype('int')
                ccy = (oy - 3 * norm_y).astype('int')

                if cx > 158:
                    cx = 158
                elif cx < 1:
                    cx = 1
                if ccx > 158:
                    ccx = 158
                elif ccx < 1:
                    ccx = 1

                if cy >= 79:
                    cy = 79
                elif cy < 1:
                    cy = 1
                if ccy >= 79:
                    ccy = 79
                elif ccy < 1:
                    ccy = 1

                if (blue[cy, cx] == 255 and yellow[ccy, ccx] == 255) or (
                        yellow[cy, cx] == 255 and blue[ccy, ccx] == 255):

                    cv2.line(image_with_lines, (x1, y1), (x2, y2), (0, 0, 255),
                             2)
                    cv2.circle(image_with_lines, (x1, y1), 2, (0, 255, 0))
                    cv2.circle(image_with_lines, (x2, y2), 2, (255, 0, 0))

            tk.completed('drawn')

            # Publish the frame with lines
            image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
            image_msg_out.header.stamp = image_msg.header.stamp
            self.pub_image.publish(image_msg_out)

            tk.completed('pub_image')

            #         if self.verbose:
            #colorSegment = color_segment(white.area, red.area, yellow.area)
            #edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges, "mono8")
            #colorSegment_msg_out = self.bridge.cv2_to_imgmsg(colorSegment, "bgr8")
            #self.pub_edge.publish(edge_msg_out)
            #self.pub_colorSegment.publish(colorSegment_msg_out)

            tk.completed('pub_edge/pub_segment')

        self.intermittent_log(tk.getall())

    def onShutdown(self):
        self.loginfo("Shutdown.")

    def toSegmentMsg(self, lines, normals, color):

        arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array(
            (1. / self.image_size[1], 1. / self.image_size[0],
             1. / self.image_size[1], 1. / self.image_size[0]))

        segmentMsgList = []
        for x1, y1, x2, y2, norm_x, norm_y in np.hstack((lines, normals)):

            ox = int((x1 + x2) / 2)
            oy = int((y1 + y2) / 2)

            cx = (ox + 3 * norm_x).astype('int')
            cy = (oy + 3 * norm_y).astype('int')

            ccx = (ox - 3 * norm_x).astype('int')
            ccy = (oy - 3 * norm_y).astype('int')

            if cx > 158:
                cx = 158
            elif cx < 1:
                cx = 1
            if ccx > 158:
                ccx = 158
            elif ccx < 1:
                ccx = 1

            if cy >= 79:
                cy = 79
            elif cy < 1:
                cy = 1
            if ccy >= 79:
                ccy = 79
            elif ccy < 1:
                ccy = 1

            if (self.blue[cy, cx] == 255 and self.yellow[ccy, ccx] == 255) or (
                    self.yellow[cy, cx] == 255 and self.blue[ccy, ccx] == 255):

                [x1, y1, x2,
                 y2] = (([x1, y1, x2, y2] + arr_cutoff) * arr_ratio)

                segment = Segment()
                segment.color = color
                segment.pixels_normalized[0].x = x1
                segment.pixels_normalized[0].y = y1
                segment.pixels_normalized[1].x = x2
                segment.pixels_normalized[1].y = y2
                segment.normal.x = norm_x
                segment.normal.y = norm_y
                segmentMsgList.append(segment)

                if self.time_switch == True:
                    msgg = BoolStamped()
                    msgg.data = True
                    self.pub_lane.publish(msgg)
                    self.time_switch = False
                    self.count = 0

            else:
                self.count += 1

                if self.count > 5:
                    print "****************count = ", self.count, " *******************"
                    if self.time_switch == False:
                        msgg = BoolStamped()
                        msgg.data = False
                        self.pub_lane.publish(msgg)
                        self.time_switch = True

                car_control_msg = Twist2DStamped()
                car_control_msg.v = 0.0
                car_control_msg.omega = 0.0
                self.pub_car_cmd.publish(car_control_msg)

        return segmentMsgList

    def normals(self, lines, bw):

        if len(lines) > 0:

            normals = []

            centers = []

            #find the dx dy

            length = np.sum((lines[:, 0:2] - lines[:, 2:4])**2,
                            axis=1,
                            keepdims=True)**0.5

            dx = 1. * (lines[:, 3:4] - lines[:, 1:2]) / length

            dy = 1. * (lines[:, 0:1] - lines[:, 2:3]) / length

            centers = np.hstack([(lines[:, 0:1] + lines[:, 2:3]) / 2,
                                 (lines[:, 1:2] + lines[:, 3:4]) / 2])

            x3 = (centers[:, 0:1] - 3. * dx).astype('int')

            x3[x3 < 0] = 0

            x3[x3 >= 160] = 160 - 1

            y3 = (centers[:, 1:2] - 3. * dy).astype('int')

            y3[y3 < 0] = 0

            y3[y3 >= 80] = 80 - 1

            x4 = (centers[:, 0:1] + 3. * dx).astype('int')

            x4[x4 < 0] = 0

            x4[x4 >= 160] = 160 - 1

            y4 = (centers[:, 1:2] + 3. * dy).astype('int')

            y4[y4 < 0] = 0

            y4[y4 >= 80] = 80 - 1

            flag_signs = (np.logical_and(bw[y3, x3] > 0, bw[y4, x4]
                                         == 0)).astype('int') * 2 - 1

            normals = np.hstack([dx, dy]) * flag_signs

            flag = ((lines[:, 2] - lines[:, 0]) * normals[:, 1] -
                    (lines[:, 3] - lines[:, 1]) * normals[:, 0]) > 0

            for i in range(len(lines)):

                if flag[i]:

                    x1, y1, x2, y2 = lines[i, :]

                    lines[i, :] = [x2, y2, x1, y1]

            return lines, normals
Example #21
0
class ColorDetectorNode(object):
    def __init__(self):
        self.node_name = "ColorDetectorNode"

        # Thread lock 
        self.thread_lock = threading.Lock()
    
        # Constructor of line detector 
        self.bridge = CvBridge()

        self.active = True


        self.stats = Stats()

        # Only be verbose every 10 cycles
        self.intermittent_interval = 100
        self.intermittent_counter = 0

        # color correction
        self.ai = AntiInstagram()
 
        # these will be added if it becomes verbose
        self.pub_edge = None
        self.pub_colorSegment = None

        self.detector = None
        self.verbose = None
        self.updateParams(None)
   
         
        # Publishers
        self.pub_lane_recovery = rospy.Publisher("~lane_recovery", BoolStamped, queue_size=1)
        self.pub_image_green = rospy.Publisher("~green_hsv", Image, queue_size=1)
        self.pub_image_blue = rospy.Publisher("~blue_hsv", Image, queue_size=1)
        
        self.pub_car_cmd = rospy.Publisher("~car_cmd",Twist2DStamped,queue_size=1)
       
        # Subscribers
        self.sub_image = rospy.Subscriber("~image", CompressedImage, self.cbImage, queue_size=1)
        self.sub_transform = rospy.Subscriber("~transform", AntiInstagramTransform, self.cbTransform, queue_size=1)
        self.sub_switch = rospy.Subscriber("~switch", BoolStamped, self.cbSwitch, queue_size=1)

        rospy.loginfo("[%s] Initialized (verbose = %s)." %(self.node_name, self.verbose))

        rospy.Timer(rospy.Duration.from_sec(2.0), self.updateParams)


    def updateParams(self, _event):
        old_verbose = self.verbose
        self.verbose = rospy.get_param('~verbose', True)
        # self.loginfo('verbose = %r' % self.verbose)
        if self.verbose != old_verbose:
            self.loginfo('Verbose is now %r' % self.verbose)

        self.image_size = rospy.get_param('~img_size')
        self.top_cutoff = rospy.get_param('~top_cutoff')

        if self.detector is None:
            c = rospy.get_param('~detector')
            assert isinstance(c, list) and len(c) == 2, c
        
#         if str(self.detector_config) != str(c):
            self.loginfo('new detector config: %s' % str(c))

            self.detector = instantiate(c[0], c[1])
#             self.detector_config = c




    def cbSwitch(self, switch_msg):
        self.active = switch_msg.data

    def cbImage(self, image_msg):
    	self.stop()

        self.stats.received()

        if not self.active:
            #print "******************** no color detector *********************"
            return 
        # Start a daemon thread to process the image
        thread = threading.Thread(target=self.processImage,args=(image_msg,))
        thread.setDaemon(True)
        thread.start()
        # Returns rightaway

    def cbTransform(self, transform_msg):
        self.ai.shift = transform_msg.s[0:3]
        self.ai.scale = transform_msg.s[3:6]

        self.loginfo("AntiInstagram transform received")

    def loginfo(self, s):
        rospy.loginfo('[%s] %s' % (self.node_name, s))

    def intermittent_log_now(self):
        return self.intermittent_counter % self.intermittent_interval == 1
    
    def intermittent_log(self, s):
        if not self.intermittent_log_now():
            return
        self.loginfo('%3d:%s' % (self.intermittent_counter, s))

    def processImage(self, image_msg):
        if not self.thread_lock.acquire(False):
            self.stats.skipped()
            # Return immediately if the thread is locked
            return

        try:
            self.processImage_(image_msg)
        finally:
            # Release the thread lock
            self.thread_lock.release()

    def processImage_(self, image_msg):

        self.stats.processed()

        if self.intermittent_log_now():
            self.intermittent_log(self.stats.info())
            self.stats.reset()

        tk = TimeKeeper(image_msg)
        
        self.intermittent_counter += 1

        # Decode from compressed image with OpenCV
        try:
            image_cv = image_cv_from_jpg(image_msg.data)
        except ValueError as e:
            self.loginfo('Could not decode image: %s' % e)
            return

        tk.completed('decoded')

        # Resize and crop image
        hei_original, wid_original = image_cv.shape[0:2]

        if self.image_size[0] != hei_original or self.image_size[1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv, (self.image_size[1], self.image_size[0]),
                                   interpolation=cv2.INTER_NEAREST)
        image_cv = image_cv[self.top_cutoff:,:,:]

        tk.completed('resized')

        # apply color correction: AntiInstagram
        image_cv_corr = self.ai.applyTransform(image_cv)
        image_cv_corr = cv2.convertScaleAbs(image_cv_corr)

        tk.completed('corrected')

        # set up parameter

        hsv_green1 = np.array([70,100,60])
        hsv_green2 = np.array([90,255,255])
        hsv_blue1 = np.array([90,80,50])
        hsv_blue2 = np.array([110,255,255])


        # Set the image to be detected
        hsv = cv2.cvtColor(image_cv_corr,cv2.COLOR_BGR2HSV)
        green = cv2.inRange(hsv,hsv_green1,hsv_green2)
        blue = cv2.inRange(hsv,hsv_blue1,hsv_blue2)
    
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3, 3))
        green = cv2.dilate(green, kernel)
        blue = cv2.dilate(blue, kernel)

        x = green[90:120,:]
        y = blue[90:120,:]
        
        msgg = BoolStamped()


        if (x==255).sum() > 250:
            print "green line detected!"
            time.sleep(4)
            print " 4 sec finish"

            msgg.data = True
            self.pub_lane_recovery.publish(msgg)


        elif (y==255).sum() > 250:
            print "blue line detected!"
            time.sleep(7)
            print " 7 sec finish"

            msgg.data = True
            self.pub_lane_recovery.publish(msgg)

        else:
            print "only red line detected"
            time.sleep(1)
            print " 1 sec finish"

            msgg.data = True
            self.pub_lane_recovery.publish(msgg)
       
        tk.completed('prepared')

        # VISUALIZATION only below
        
        if self.verbose:

                    

            tk.completed('drawn')

            # Publish the frame with lines

            image_msg_out_green = self.bridge.cv2_to_imgmsg(green, "mono8")
            image_msg_out_green.header.stamp = image_msg.header.stamp
            self.pub_image_green.publish(image_msg_out_green)

            image_msg_out_blue = self.bridge.cv2_to_imgmsg(blue, "mono8")
            image_msg_out_blue.header.stamp = image_msg.header.stamp
            self.pub_image_blue.publish(image_msg_out_blue)

            tk.completed('pub_image')




        self.intermittent_log(tk.getall())


    def onShutdown(self):
        self.loginfo("Shutdown.")
            
    def stop(self):
        car_control_msg = Twist2DStamped()
        car_control_msg.v = 0.0
        car_control_msg.omega = 0.0
        self.pub_car_cmd.publish(car_control_msg)
class LineDetectorNode(object):
    def __init__(self):
        self.node_name = "LineDetectorNode"

        self.bw_1 = 0
        self.bw_2 = 0

    
        # Thread lock 
        self.thread_lock = threading.Lock()
       
        # Constructor of line detector 
        self.bridge = CvBridge()

        self.active = True

        self.stats = Stats()

        # Only be verbose every 10 cycles
        self.intermittent_interval = 100
        self.intermittent_counter = 0

        # color correction
        self.ai = AntiInstagram()

        # these will be added if it becomes verbose
        self.pub_edge = None
        self.pub_colorSegment = None

        self.detector = None
        self.verbose = None
        self.updateParams(None)
            
        # Publishers
        self.pub_lines = rospy.Publisher("~segment_list", SegmentList, queue_size=1)
        self.pub_image = rospy.Publisher("~image_with_lines", Image, queue_size=1)
       
        # Subscribers
        self.sub_image = rospy.Subscriber("~image", CompressedImage, self.cbImage, queue_size=1)
        self.sub_transform = rospy.Subscriber("~transform", AntiInstagramTransform, self.cbTransform, queue_size=1)
        self.sub_switch = rospy.Subscriber("~switch", BoolStamped, self.cbSwitch, queue_size=1)

        rospy.loginfo("[%s] Initialized (verbose = %s)." %(self.node_name, self.verbose))

        rospy.Timer(rospy.Duration.from_sec(2.0), self.updateParams)


    def updateParams(self, _event):
        old_verbose = self.verbose
        self.verbose = rospy.get_param('~verbose', True)
        # self.loginfo('verbose = %r' % self.verbose)
        if self.verbose != old_verbose:
            self.loginfo('Verbose is now %r' % self.verbose)

        self.image_size = rospy.get_param('~img_size')
        self.top_cutoff = rospy.get_param('~top_cutoff')

        if self.detector is None:
            c = rospy.get_param('~detector')
            assert isinstance(c, list) and len(c) == 2, c
        
#         if str(self.detector_config) != str(c):
            self.loginfo('new detector config: %s' % str(c))

            self.detector = instantiate(c[0], c[1])
#             self.detector_config = c

        if self.verbose and self.pub_edge is None:
            self.pub_edge = rospy.Publisher("~edge", Image, queue_size=1)
            self.pub_colorSegment = rospy.Publisher("~colorSegment", Image, queue_size=1)


    def cbSwitch(self, switch_msg):
        self.active = switch_msg.data

    def cbImage(self, image_msg):
        self.stats.received()

        if not self.active:
            return 
        # Start a daemon thread to process the image
        thread = threading.Thread(target=self.processImage,args=(image_msg,))
        thread.setDaemon(True)
        thread.start()
        # Returns rightaway

    def cbTransform(self, transform_msg):
        self.ai.shift = transform_msg.s[0:3]
        self.ai.scale = transform_msg.s[3:6]

        self.loginfo("AntiInstagram transform received")

    def loginfo(self, s):
        rospy.loginfo('[%s] %s' % (self.node_name, s))

    def intermittent_log_now(self):
        return self.intermittent_counter % self.intermittent_interval == 1
    
    def intermittent_log(self, s):
        if not self.intermittent_log_now():
            return
        self.loginfo('%3d:%s' % (self.intermittent_counter, s))

    def processImage(self, image_msg):
        if not self.thread_lock.acquire(False):
            self.stats.skipped()
            # Return immediately if the thread is locked
            return

        try:
            self.processImage_(image_msg)
        finally:
            # Release the thread lock
            self.thread_lock.release()

    def processImage_(self, image_msg):

        self.stats.processed()

        if self.intermittent_log_now():
            self.intermittent_log(self.stats.info())
            self.stats.reset()

        tk = TimeKeeper(image_msg)
        
        self.intermittent_counter += 1

        # Decode from compressed image with OpenCV
        try:
            image_cv = image_cv_from_jpg(image_msg.data)
        except ValueError as e:
            self.loginfo('Could not decode image: %s' % e)
            return

        tk.completed('decoded')

        # Resize and crop image
        hei_original, wid_original = image_cv.shape[0:2]

        if self.image_size[0] != hei_original or self.image_size[1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv, (self.image_size[1], self.image_size[0]),
                                   interpolation=cv2.INTER_NEAREST)
        image_cv = image_cv[self.top_cutoff:,:,:]

        tk.completed('resized')

        # apply color correction: AntiInstagram
        image_cv_corr = self.ai.applyTransform(image_cv)
        image_cv_corr = cv2.convertScaleAbs(image_cv_corr)

        tk.completed('corrected')

        # Set the image to be detected
        self.detector.setImage(image_cv_corr)

        # Detect lines and normals

        gray = cv2.cvtColor(image_cv_corr,cv2.COLOR_BGR2GRAY)
        edges = cv2.Canny(gray, 100, 200, apertureSize = 3)
        lines = cv2.HoughLinesP(edges, 1, np.pi/180, 50, np.empty(1), 3, 1)

        hsv_yellow1 = (25,50,50)
        hsv_yellow2 = (45,255,255)

        hsv_blue1 = (100,90,80)
        hsv_blue2 = (150,255,155)

        #change color space to HSV
        hsv = cv2.cvtColor(image_cv, cv2.COLOR_BGR2HSV)

        #find the color
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3, 3))

        yellow = cv2.inRange(hsv, hsv_yellow1, hsv_yellow2)
        yellow = cv2.dilate(yellow, kernel)
        self.bw_1=yellow

        blue = cv2.inRange(hsv, hsv_blue1, hsv_blue2)
        blue = cv2.dilate(blue, kernel) 
        self.bw_2=blue


        edge_color_yellow = cv2.bitwise_and(yellow, edges)
        edge_color_blue   = cv2.bitwise_and(blue, edges)

        lines = cv2.HoughLinesP(edges, 1, np.pi/180, 10, np.empty(1), 3, 1)
        lines_yellow = cv2.HoughLinesP(edge_color_yellow, 1, np.pi/180, 10, np.empty(1), 3, 1)
        lines_blue = cv2.HoughLinesP(edge_color_blue, 1, np.pi/180, 10, np.empty(1), 3, 1)


        if lines_yellow is not None:
            lines_yellow = np.array(lines_yellow[0])
            print "found yellow lines"
    
        else:
            lines_yellow = []
            print "no yellow lines"
    
        if lines_blue is not None:
            lines_blue = np.array(lines_blue[0])
            print "found blue lines"

        else:
            lines_blue= []
            print "no blue lines"


        arr_cutoff = np.array((0, 40, 0, 40))
        arr_ratio = np.array((1./120, 1./160, 1./120, 1./160))

        lines_1 = lines_yellow
        lines_2 = lines_blue

        normals = []
        centers = []
        if len(lines_2)>0:
            #find the normalized coordinates
            lines_normalized = ((lines_1 + arr_cutoff) * arr_ratio)

            #find the unit vector
            length = np.sum((lines_1[:, 0:2] -lines_1[:, 2:4])**2, axis=1, keepdims=True)**0.5
            dx = 1.* (lines_1[:,3:4]-lines_1[:,1:2])/length
            dy = 1.* (lines_1[:,0:1]-lines_1[:,2:3])/length

            #find the center point
            centers = np.hstack([(lines_1[:,0:1]+lines_1[:,2:3])/2, (lines_1[:,1:2]+lines_1[:,3:4])/2])

       
            #find the vectors' direction
            x3 = (centers[:,0:1] - 4.*dx).astype('int')
            x3[x3<0]=0
            x3[x3>=160]=160-1

            y3 = (centers[:,1:2] - 4.*dy).astype('int')
            y3[y3<0]=0
            y3[y3>=120]=120-1

            x4 = (centers[:,0:1] + 4.*dx).astype('int')
            x4[x4<0]=0
            x4[x4>=160]=160-1

            y4 = (centers[:,1:2] + 4.*dy).astype('int')
            y4[y4<0]=0
            y4[y4>=120]=120-1
    
            flag_signs = (np.logical_and(self.bw_2[y3,x3]>0, self.bw_2[y4,x4]>0)).astype('int')*2-1
            normals = np.hstack([dx, dy]) * flag_signs
    
    
            flag = ((lines_1[:,2]-lines_1[:,0])*normals[:,1] - (lines_1[:,3]-lines_1[:,1])*normals[:,0])>0
            for i in range(len(lines_1)):
                if flag[i]:
                    x1,y1,x2,y2 = lines_1[i, :]
                    lines_1[i, :] = [x2,y2,x1,y1]


        tk.completed('detected')
     
        # SegmentList constructor
        segmentList = SegmentList()
        segmentList.header.stamp = image_msg.header.stamp
        
        # Convert to normalized pixel coordinates, and add segments to segmentList
        arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array((1./self.image_size[1], 1./self.image_size[0], 1./self.image_size[1], 1./self.image_size[0]))
        if len(lines_2) > 0:
            lines_normalized_blue = ((lines_2 + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(self.toSegmentMsg(lines_normalized, normals,centers, 0))

        self.intermittent_log('# segments: white %3d ' % (len(lines_2)))
        
        tk.completed('prepared')

        # Publish segmentList
        self.pub_lines.publish(segmentList)
        tk.completed('--pub_lines--')

        # VISUALIZATION only below
        
        if self.verbose:

            # Draw lines and normals
            image_with_lines = np.copy(image_cv_corr)
            if len(lines_1)>0:
                for x1,y1,x2,y2 in lines_1:    
                    cx = int(x1+x2)/2
                    cy = int(y1+y2)/2
                    if cx >160:
                        cx = 160-1
                    elif cx<0:
                        cx=0
                    if cy >120:
                        cy = 120-1
                    elif cy<0:
                        cy=0
                    if(self.bw_2[cy,cx-1]==255 and self.bw_1[cy,cx+1]==255):
                        cv2.line(image_with_lines, (x1,y1), (x2,y2), (255,255,255))
                        cv2.circle(image_with_lines, (x1,y1), 1, (0,255,0)) #green circle
                        cv2.circle(image_with_lines, (x2,y2), 1, (255,0,0)) #red circle
                    if(self.bw_2[cy,cx+1]==255 and self.bw_1[cy,cx-1]==255):
                        cv2.line(image_with_lines, (x1,y1), (x2,y2), (255,255,255))
                        cv2.circle(image_with_lines, (x1,y1), 1, (0,255,0)) #green circle
                        cv2.circle(image_with_lines, (x2,y2), 1, (255,0,0)) #red circle
            #drawLines(image_with_lines, (lines_2), (255, 255, 255))
            #drawLines(image_with_lines, yellow.lines, (255, 0, 0))
            #drawLines(image_with_lines, red.lines, (0, 255, 0))

            tk.completed('drawn')

            # Publish the frame with lines
            image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
            image_msg_out.header.stamp = image_msg.header.stamp
            self.pub_image.publish(image_msg_out)

            tk.completed('pub_image')


#         if self.verbose:
            #colorSegment = color_segment(self.bw_1, self.bw_2) 
            #edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges, "mono8")
            #colorSegment_msg_out = self.bridge.cv2_to_imgmsg(colorSegment, "bgr8")
            #self.pub_edge.publish(edge_msg_out)
            #self.pub_colorSegment.publish(colorSegment_msg_out)

            tk.completed('pub_edge/pub_segment')

        self.intermittent_log(tk.getall())


    def onShutdown(self):
        self.loginfo("Shutdown.")
            
    def toSegmentMsg(self,  lines, normals,centers, color):
        segmentMsgList = []
        for x1,y1,x2,y2,norm_x,norm_y in np.hstack((lines,normals)):
            segment = Segment()
            segment.color = color
            segment.pixels_normalized[0].x = x1
            segment.pixels_normalized[0].y = y1
            segment.pixels_normalized[1].x = x2
            segment.pixels_normalized[1].y = y2
            segment.normal.x = norm_x
            segment.normal.y = norm_y
             
                #if  self.bw_2[y4,x4]>0 :
                 #   segment = Segment()
                  #  segment.color = color
                   # segment.pixels_normalized[0].x = x1
                    #segment.pixels_normalized[0].y = y1
                    #segment.pixels_normalized[1].x = x2
                    #segment.pixels_normalized[1].y = y2
                    #segment.normal.x = norm_x
                    #segment.normal.y = norm_y
            segmentMsgList.append(segment)
        return segmentMsgList
Example #23
0
class LineDetectorNode2(EasyNode):
    def __init__(self):
        EasyNode.__init__(self, "line_detector2", "line_detector_node2")

        self.detector = None
        self.bridge = CvBridge()
        self.ai = AntiInstagram()
        self.active = True

        # Only be verbose every 10 cycles
        self.intermittent_interval = 100
        self.intermittent_counter = 0

    def on_parameters_changed(self, _first_time, updated):

        if "verbose" in updated:
            self.info(f"Verbose is now {self.config.verbose!r}")

        if "line_detector" in updated:
            db = get_easy_algo_db()
            self.detector = db.create_instance("line_detector", self.config.line_detector)

    def on_received_switch(self, context, switch_msg):
        self.active = switch_msg.data

    def on_received_transform(self, context, transform_msg):
        self.ai.shift = transform_msg.s[0:3]
        self.ai.scale = transform_msg.s[3:6]

        self.info("AntiInstagram transform received")

    def on_received_image(self, context, image_msg):
        if not self.active:
            return

        self.intermittent_counter += 1

        with context.phase("decoding"):
            # Decode from compressed image with OpenCV
            try:
                image_cv = dtu.bgr_from_jpg(image_msg.data)
            except ValueError as e:
                self.error(f"Could not decode image: {e}")
                return

        with context.phase("resizing"):
            # Resize and crop image
            hei_original, wid_original = image_cv.shape[0:2]

            if self.config.img_size[0] != hei_original or self.config.img_size[1] != wid_original:
                # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
                image_cv = cv2.resize(
                    image_cv,
                    (self.config.img_size[1], self.config.img_size[0]),
                    interpolation=cv2.INTER_NEAREST,
                )
            image_cv = image_cv[self.config.top_cutoff :, :, :]

        with context.phase("correcting"):
            # apply color correction
            image_cv_corr = self.ai.applyTransform(image_cv)
        #             image_cv_corr = cv2.convertScaleAbs(image_cv_corr)

        with context.phase("detection"):
            # Set the image to be detected
            self.detector.setImage(image_cv_corr)

            # Detect lines and normals
            white = self.detector.detectLines("white")
            yellow = self.detector.detectLines("yellow")
            red = self.detector.detectLines("red")

        with context.phase("preparing-images"):
            # SegmentList constructor
            segmentList = SegmentList()
            segmentList.header.stamp = image_msg.header.stamp

            # Convert to normalized pixel coordinates, and add segments to segmentList
            top_cutoff = self.config.top_cutoff
            s0, s1 = self.config.img_size[0], self.config.img_size[1]

            arr_cutoff = np.array((0, top_cutoff, 0, top_cutoff))
            arr_ratio = np.array((1.0 / s1, 1.0 / s0, 1.0 / s1, 1.0 / s0))
            if len(white.lines) > 0:
                lines_normalized_white = (white.lines + arr_cutoff) * arr_ratio
                segmentList.segments.extend(
                    toSegmentMsg(lines_normalized_white, white.normals, Segment.WHITE)
                )
            if len(yellow.lines) > 0:
                lines_normalized_yellow = (yellow.lines + arr_cutoff) * arr_ratio
                segmentList.segments.extend(
                    toSegmentMsg(lines_normalized_yellow, yellow.normals, Segment.YELLOW)
                )
            if len(red.lines) > 0:
                lines_normalized_red = (red.lines + arr_cutoff) * arr_ratio
                segmentList.segments.extend(toSegmentMsg(lines_normalized_red, red.normals, Segment.RED))

            self.intermittent_log(
                "# segments: white %3d yellow %3d red %3d"
                % (len(white.lines), len(yellow.lines), len(red.lines))
            )

        # Publish segmentList
        with context.phase("publishing"):
            self.publishers.segment_list.publish(segmentList)

        # VISUALIZATION only below

        if self.config.verbose:
            with context.phase("draw-lines"):
                # Draw lines and normals
                image_with_lines = np.copy(image_cv_corr)
                drawLines(image_with_lines, white.lines, (0, 0, 0))
                drawLines(image_with_lines, yellow.lines, (255, 0, 0))
                drawLines(image_with_lines, red.lines, (0, 255, 0))

            with context.phase("published-images"):
                # Publish the frame with lines
                out = dru.d8n_image_msg_from_cv_image(image_with_lines, "bgr8", same_timestamp_as=image_msg)
                self.publishers.image_with_lines.publish(out)

            with context.phase("pub_edge/pub_segment"):
                out = dru.d8n_image_msg_from_cv_image(
                    self.detector.edges, "mono8", same_timestamp_as=image_msg
                )
                self.publishers.edge.publish(out)

                colorSegment = color_segment(white.area, red.area, yellow.area)
                out = dru.d8n_image_msg_from_cv_image(colorSegment, "bgr8", same_timestamp_as=image_msg)
                self.publishers.color_segment.publish(out)

        if self.intermittent_log_now():
            self.info("stats from easy_node\n" + dtu.indent(context.get_stats(), "> "))

    def intermittent_log_now(self):
        return self.intermittent_counter % self.intermittent_interval == 1

    def intermittent_log(self, s):
        if not self.intermittent_log_now():
            return
        self.info(f"{self.intermittent_counter:3d}:{s}")
Example #24
0
class LineDetectorNode(object):
    def __init__(self):
        self.node_name = "LineDetectorNode"

        # Thread lock
        self.thread_lock = threading.Lock()

        # Constructor of line detector
        self.bridge = CvBridge()

        self.active = True

        self.stats = Stats()

        # Only be verbose every 10 cycles
        self.intermittent_interval = 100
        self.intermittent_counter = 0

        # color correction
        self.ai = AntiInstagram()

        # these will be added if it becomes verbose
        self.pub_edge = None
        self.pub_colorSegment = None

        self.detector = None
        self.verbose = None
        self.updateParams(None)

        # Publishers
        self.pub_lines = rospy.Publisher("~segment_list",
                                         SegmentList,
                                         queue_size=1)
        self.pub_image = rospy.Publisher("~image_with_lines",
                                         Image,
                                         queue_size=1)

        # Subscribers
        self.sub_image = rospy.Subscriber("~image",
                                          CompressedImage,
                                          self.cbImage,
                                          queue_size=1)
        self.sub_transform = rospy.Subscriber("~transform",
                                              AntiInstagramTransform,
                                              self.cbTransform,
                                              queue_size=1)
        self.sub_switch = rospy.Subscriber("~switch",
                                           BoolStamped,
                                           self.cbSwitch,
                                           queue_size=1)

        rospy.loginfo("[%s] Initialized (verbose = %s)." %
                      (self.node_name, self.verbose))

        rospy.Timer(rospy.Duration.from_sec(2.0), self.updateParams)

    def updateParams(self, _event):
        old_verbose = self.verbose
        self.verbose = rospy.get_param('~verbose', True)
        # self.loginfo('verbose = %r' % self.verbose)
        if self.verbose != old_verbose:
            self.loginfo('Verbose is now %r' % self.verbose)

        self.image_size = rospy.get_param('~img_size')
        self.top_cutoff = rospy.get_param('~top_cutoff')

        if self.detector is None:
            c = rospy.get_param('~detector')
            assert isinstance(c, list) and len(c) == 2, c

            #         if str(self.detector_config) != str(c):
            self.loginfo('new detector config: %s' % str(c))

            self.detector = instantiate(c[0], c[1])


#             self.detector_config = c

        if self.verbose and self.pub_edge is None:
            self.pub_edge = rospy.Publisher("~edge", Image, queue_size=1)
            self.pub_colorSegment = rospy.Publisher("~colorSegment",
                                                    Image,
                                                    queue_size=1)

    def cbSwitch(self, switch_msg):
        self.active = switch_msg.data

    def cbImage(self, image_msg):
        self.stats.received()

        if not self.active:
            return
        # Start a daemon thread to process the image
        thread = threading.Thread(target=self.processImage, args=(image_msg, ))
        thread.setDaemon(True)
        thread.start()
        # Returns rightaway

    def cbTransform(self, transform_msg):
        self.ai.shift = transform_msg.s[0:3]
        self.ai.scale = transform_msg.s[3:6]

        self.loginfo("AntiInstagram transform received")

    def loginfo(self, s):
        rospy.loginfo('[%s] %s' % (self.node_name, s))

    def intermittent_log_now(self):
        return self.intermittent_counter % self.intermittent_interval == 1

    def intermittent_log(self, s):
        if not self.intermittent_log_now():
            return
        self.loginfo('%3d:%s' % (self.intermittent_counter, s))

    def processImage(self, image_msg):
        if not self.thread_lock.acquire(False):
            self.stats.skipped()
            # Return immediately if the thread is locked
            return

        try:
            self.processImage_(image_msg)
        finally:
            # Release the thread lock
            self.thread_lock.release()

    def processImage_(self, image_msg):

        self.stats.processed()

        if self.intermittent_log_now():
            self.intermittent_log(self.stats.info())
            self.stats.reset()

        tk = TimeKeeper(image_msg)

        self.intermittent_counter += 1

        # Decode from compressed image with OpenCV
        try:
            image_cv = image_cv_from_jpg(image_msg.data)
        except ValueError as e:
            self.loginfo('Could not decode image: %s' % e)
            return

        tk.completed('decoded')

        # Resize and crop image
        hei_original, wid_original = image_cv.shape[0:2]

        if self.image_size[0] != hei_original or self.image_size[
                1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv,
                                  (self.image_size[1], self.image_size[0]),
                                  interpolation=cv2.INTER_NEAREST)
        image_cv = image_cv[self.top_cutoff:, :, :]

        tk.completed('resized')

        # apply color correction: AntiInstagram
        image_cv_corr = self.ai.applyTransform(image_cv)
        image_cv_corr = cv2.convertScaleAbs(image_cv_corr)

        tk.completed('corrected')

        # Set the image to be detected
        self.detector.setImage(image_cv_corr)

        # Detect lines and normals

        white = self.detector.detectLines('white')
        yellow = self.detector.detectLines('yellow')
        red = self.detector.detectLines('red')

        tk.completed('detected')

        # SegmentList constructor
        segmentList = SegmentList()
        segmentList.header.stamp = image_msg.header.stamp

        # Convert to normalized pixel coordinates, and add segments to segmentList
        arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array(
            (1. / self.image_size[1], 1. / self.image_size[0],
             1. / self.image_size[1], 1. / self.image_size[0]))
        if len(white.lines) > 0:
            lines_normalized_white = ((white.lines + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_white, white.normals,
                                  Segment.WHITE))
        if len(yellow.lines) > 0:
            lines_normalized_yellow = ((yellow.lines + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_yellow, yellow.normals,
                                  Segment.YELLOW))
        if len(red.lines) > 0:
            lines_normalized_red = ((red.lines + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_red, red.normals,
                                  Segment.RED))

        self.intermittent_log(
            '# segments: white %3d yellow %3d red %3d' %
            (len(white.lines), len(yellow.lines), len(red.lines)))

        #self.loginfo("self.verbose %d" % self.verbose)

        tk.completed('prepared')

        # Publish segmentList
        self.pub_lines.publish(segmentList)
        tk.completed('--pub_lines--')

        # VISUALIZATION only below

        if self.verbose:

            # Draw lines and normals
            image_with_lines = np.copy(image_cv_corr)
            drawLines(image_with_lines, white.lines, (0, 0, 0))
            drawLines(image_with_lines, yellow.lines, (255, 0, 0))
            drawLines(image_with_lines, red.lines, (0, 255, 0))

            tk.completed('drawn')

            # Publish the frame with lines
            image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
            image_msg_out.header.stamp = image_msg.header.stamp
            self.pub_image.publish(image_msg_out)

            tk.completed('pub_image')

            #         if self.verbose:
            colorSegment = color_segment(white.area, red.area, yellow.area)
            edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges,
                                                     "mono8")
            colorSegment_msg_out = self.bridge.cv2_to_imgmsg(
                colorSegment, "bgr8")
            self.pub_edge.publish(edge_msg_out)
            self.pub_colorSegment.publish(colorSegment_msg_out)

            tk.completed('pub_edge/pub_segment')

        self.intermittent_log(tk.getall())

    def onShutdown(self):
        self.loginfo("Shutdown.")

    def toSegmentMsg(self, lines, normals, color):

        segmentMsgList = []
        for x1, y1, x2, y2, norm_x, norm_y in np.hstack((lines, normals)):
            segment = Segment()
            segment.color = color
            segment.pixels_normalized[0].x = x1
            segment.pixels_normalized[0].y = y1
            segment.pixels_normalized[1].x = x2
            segment.pixels_normalized[1].y = y2
            segment.normal.x = norm_x
            segment.normal.y = norm_y

            segmentMsgList.append(segment)
        return segmentMsgList
Example #25
0
class LineDetectorNode2(object):
    def __init__(self):
        self.node_name = "Line Detector2"

        # Thread lock
        self.thread_lock = threading.Lock()

        # Constructor of line detector
        self.bridge = CvBridge()
        self.detector = LineDetector2()

        # Parameters
        self.active = True

        self.intermittent_interval = 100
        self.intermittent_counter = 0

        self.updateParams(None)

        # color correction
        self.ai = AntiInstagram()

        # Publishers
        self.pub_lines = rospy.Publisher("~segment_list",
                                         SegmentList,
                                         queue_size=1)
        self.pub_image = rospy.Publisher("~image_with_lines",
                                         Image,
                                         queue_size=1)

        # Verbose option
        if self.verbose:
            self.pub_edge = rospy.Publisher("~edge", Image, queue_size=1)
            self.pub_colorSegment = rospy.Publisher("~segment",
                                                    Image,
                                                    queue_size=1)

        # Subscribers
        self.sub_image = rospy.Subscriber("~image",
                                          CompressedImage,
                                          self.cbImage,
                                          queue_size=1)
        self.sub_transform = rospy.Subscriber("~transform",
                                              AntiInstagramTransform,
                                              self.cbTransform,
                                              queue_size=1)
        self.sub_switch = rospy.Subscriber("~switch",
                                           BoolStamped,
                                           self.cbSwitch,
                                           queue_size=1)
        rospy.loginfo("[%s] Initialized." % (self.node_name))

        self.timer = rospy.Timer(rospy.Duration.from_sec(1.0),
                                 self.updateParams)

    def updateParams(self, event):
        self.verbose = rospy.get_param('~verbose', True)

        self.image_size = rospy.get_param('~img_size')
        self.top_cutoff = rospy.get_param('~top_cutoff')

        self.detector.hsv_white1 = np.array(rospy.get_param('~hsv_white1'))
        self.detector.hsv_white2 = np.array(rospy.get_param('~hsv_white2'))
        self.detector.hsv_yellow1 = np.array(rospy.get_param('~hsv_yellow1'))
        self.detector.hsv_yellow2 = np.array(rospy.get_param('~hsv_yellow2'))
        self.detector.hsv_red1 = np.array(rospy.get_param('~hsv_red1'))
        self.detector.hsv_red2 = np.array(rospy.get_param('~hsv_red2'))
        self.detector.hsv_red3 = np.array(rospy.get_param('~hsv_red3'))
        self.detector.hsv_red4 = np.array(rospy.get_param('~hsv_red4'))

        self.detector.dilation_kernel_size = rospy.get_param(
            '~dilation_kernel_size')
        self.detector.canny_thresholds = rospy.get_param('~canny_thresholds')
        self.detector.sobel_threshold = rospy.get_param('~sobel_threshold')

    def cbSwitch(self, switch_msg):
        self.active = switch_msg.data

    def cbImage(self, image_msg):
        if not self.active:
            return
        # Start a daemon thread to process the image
        thread = threading.Thread(target=self.processImage, args=(image_msg, ))
        thread.setDaemon(True)
        thread.start()
        # Returns rightaway

    def cbTransform(self, transform_msg):
        self.ai.shift = transform_msg.s[0:3]
        self.ai.scale = transform_msg.s[3:6]

    def intermittent_log(self, s):
        if self.intermittent_counter % self.intermittent_interval != 1:
            return
        n = self.node_name
        rospy.loginfo('[%s]%3d:%s' % (n, self.intermittent_counter, s))

    def processImage(self, image_msg):
        if not self.thread_lock.acquire(False):
            # Return immediately if the thread is locked
            return

        tk = TimeKeeper(image_msg)

        self.intermittent_counter += 1

        # Decode from compressed image with OpenCV
        image_cv = image_cv_from_jpg(image_msg.data)

        tk.completed('decoded')

        # Resize and crop image
        hei_original, wid_original = image_cv.shape[0:2]

        if self.image_size[0] != hei_original or self.image_size[
                1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv,
                                  (self.image_size[1], self.image_size[0]),
                                  interpolation=cv2.INTER_NEAREST)
        image_cv = image_cv[self.top_cutoff:, :, :]

        tk.completed('resized')

        # apply color correction: AntiInstagram
        image_cv_corr = self.ai.applyTransform(image_cv)
        image_cv_corr = cv2.convertScaleAbs(image_cv_corr)

        tk.completed('corrected')

        # Set the image to be detected
        self.detector.setImage(image_cv_corr)

        # Detect lines and normals
        lines_white, normals_white, centers_white, area_white = self.detector.detectLines2(
            'white')
        lines_yellow, normals_yellow, centers_yellow, area_yellow = self.detector.detectLines2(
            'yellow')
        lines_red, normals_red, centers_red, area_red = self.detector.detectLines2(
            'red')

        tk.completed('detected')

        # SegmentList constructor
        segmentList = SegmentList()
        segmentList.header.stamp = image_msg.header.stamp

        # Convert to normalized pixel coordinates, and add segments to segmentList
        arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array(
            (1. / self.image_size[1], 1. / self.image_size[0],
             1. / self.image_size[1], 1. / self.image_size[0]))
        if len(lines_white) > 0:
            lines_normalized_white = ((lines_white + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_white, normals_white,
                                  Segment.WHITE))
        if len(lines_yellow) > 0:
            lines_normalized_yellow = ((lines_yellow + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_yellow, normals_yellow,
                                  Segment.YELLOW))
        if len(lines_red) > 0:
            lines_normalized_red = ((lines_red + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_red, normals_red,
                                  Segment.RED))

        self.intermittent_log(
            '# segments: white %3d yellow %3d red %3d' %
            (len(lines_white), len(lines_yellow), len(lines_red)))

        tk.completed('prepared')

        # Publish segmentList
        self.pub_lines.publish(segmentList)
        tk.completed('pub_lines')

        # VISUALIZATION only below

        # Publish the frame with lines

        # Draw lines and normals
        image_with_lines = np.copy(image_cv_corr)
        drawLines(image_with_lines, lines_white, (0, 0, 0))
        drawLines(image_with_lines, lines_yellow, (255, 0, 0))
        drawLines(image_with_lines, lines_red, (0, 255, 0))
        #drawNormals2(image_with_lines, centers_white, normals_white, (0,0,0))
        #drawNormals2(image_with_lines, centers_yellow, normals_yellow, (255,0,0))
        #drawNormals2(image_with_lines, centers_red, normals_red, (0,255,0))

        tk.completed('drawn')

        image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
        image_msg_out.header.stamp = image_msg.header.stamp
        self.pub_image.publish(image_msg_out)

        tk.completed('pub_image')

        # Verbose
        if self.verbose:

            color_segment = self.detector.color_segment(
                area_white, area_red, area_yellow)

            edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges,
                                                     "mono8")
            colorSegment_msg_out = self.bridge.cv2_to_imgmsg(
                color_segment, "bgr8")
            self.pub_edge.publish(edge_msg_out)
            self.pub_colorSegment.publish(colorSegment_msg_out)

        tk.completed('pub_edge')

        self.intermittent_log(tk.getall())

        # Release the thread lock
        self.thread_lock.release()

    def onShutdown(self):
        rospy.loginfo("[LineDetectorNode2] Shutdown.")

    def toSegmentMsg(self, lines, normals, color):

        segmentMsgList = []
        #segmentMsgList = [Segment() for i in range(lines.shape[0])]
        #k = 0
        for x1, y1, x2, y2, norm_x, norm_y in np.hstack((lines, normals)):
            segment = Segment()
            segment.color = color
            segment.pixels_normalized[0].x = x1
            segment.pixels_normalized[0].y = y1
            segment.pixels_normalized[1].x = x2
            segment.pixels_normalized[1].y = y2
            segment.normal.x = norm_x
            segment.normal.y = norm_y

            segmentMsgList.append(segment)
            #segmentMsgList[k] = segment
            #k += 1
        return segmentMsgList