Exemple #1
0
    def __init__(self):
        self.node_name = "LineDetectorNode"

        # Thread lock
        self.thread_lock = threading.Lock()

        # Constructor of line detector
        self.bridge = CvBridge()

        self.active = True

        self.stats = Stats()

        # Only be verbose every 10 cycles
        self.intermittent_interval = 100
        self.intermittent_counter = 0
	self.calibfile = ''
        # color correction
        self.ai = AntiInstagram()

        # these will be added if it becomes verbose
        self.pub_edge = None
        self.pub_colorSegment = None

        # We use two different detectors (parameters) for lane following and
        # intersection navigation (both located in yaml file)
        self.detector = None
        self.detector_intersection = None
        self.detector_used = self.detector

	self.pcm_ = None

        self.verbose = None
        self.updateParams(None)

	print('calib file is')
	print(self.calibfile)
	#self._load_camera_info(self.calibfile)
	self.parse_calibration_yaml(self.calibfile)
        self.fsm_state = "NORMAL_JOYSTICK_CONTROL"

        # Publishers
        self.pub_lines = rospy.Publisher("~segment_list", SegmentList, queue_size=1)
        self.pub_image = rospy.Publisher("~image_with_lines", Image, queue_size=1)
        # Publisher for camera info - needed for the ground_projection
        self.cam_info_pub = rospy.Publisher('/default/camera_node/real_camera_info', CameraInfo, queue_size=1)

        # Subscribers
        self.sub_image = rospy.Subscriber("~corrected_image/compressed", CompressedImage, self.cbImage, queue_size=1)
        self.sub_transform = rospy.Subscriber("~transform", AntiInstagramTransform, self.cbTransform, queue_size=1)
        self.sub_switch = rospy.Subscriber("~switch", BoolStamped, self.cbSwitch, queue_size=1)
        self.sub_fsm = rospy.Subscriber("~fsm_mode", FSMState, self.cbFSM, queue_size=1)

        rospy.loginfo("[%s] Initialized (verbose = %s)." %(self.node_name, self.verbose))

        rospy.Timer(rospy.Duration.from_sec(2.0), self.updateParams)
Exemple #2
0
class LineDetectorNode(object):
    def __init__(self):
        self.node_name = "LineDetectorNode"

        # Thread lock
        self.thread_lock = threading.Lock()

        # Constructor of line detector
        self.bridge = CvBridge()

        self.active = True

        self.stats = Stats()

        # Only be verbose every 10 cycles
        self.intermittent_interval = 100
        self.intermittent_counter = 0
        self.calibfile = ''
        # color correction
        self.ai = AntiInstagram()

        # these will be added if it becomes verbose
        self.pub_edge = None
        self.pub_colorSegment = None

        # We use two different detectors (parameters) for lane following and
        # intersection navigation (both located in yaml file)
        self.detector = None
        self.detector_intersection = None
        self.detector_used = self.detector

        self.pcm_ = None

        self.verbose = None
        self.updateParams(None)

        print('calib file is')
        print(self.calibfile)
        #self._load_camera_info(self.calibfile)
        self.parse_calibration_yaml(self.calibfile)
        self.fsm_state = "NORMAL_JOYSTICK_CONTROL"

        # Publishers
        self.pub_lines = rospy.Publisher("~segment_list",
                                         SegmentList,
                                         queue_size=1)
        self.pub_image = rospy.Publisher("~image_with_lines",
                                         Image,
                                         queue_size=1)
        # Publisher for camera info - needed for the ground_projection
        self.cam_info_pub = rospy.Publisher(
            '/default/camera_node/real_camera_info', CameraInfo, queue_size=1)

        # Subscribers
        self.sub_image = rospy.Subscriber("~corrected_image/compressed",
                                          CompressedImage,
                                          self.cbImage,
                                          queue_size=1)
        self.sub_transform = rospy.Subscriber("~transform",
                                              AntiInstagramTransform,
                                              self.cbTransform,
                                              queue_size=1)
        self.sub_switch = rospy.Subscriber("~switch",
                                           BoolStamped,
                                           self.cbSwitch,
                                           queue_size=1)
        self.sub_fsm = rospy.Subscriber("~fsm_mode",
                                        FSMState,
                                        self.cbFSM,
                                        queue_size=1)

        rospy.loginfo("[%s] Initialized (verbose = %s)." %
                      (self.node_name, self.verbose))

        rospy.Timer(rospy.Duration.from_sec(2.0), self.updateParams)

    def cbFSM(self, msg):
        self.fsm_state = msg.state

        if self.fsm_state in [
                "INTERSECTION_PLANNING", "INTERSECTION_COORDINATION",
                "INTERSECTION_CONTROL"
        ]:
            self.detector_used = self.detector_intersection
        else:
            self.detector_used = self.detector

    def updateParams(self, _event):
        #print('updating params')
        old_verbose = self.verbose

        self.verbose = rospy.get_param('~verbose', True)
        self.calibfile = rospy.get_param('~calibfile')
        # self.loginfo('verbose = %r' % self.verbose)
        if self.verbose != old_verbose:
            self.loginfo('Verbose is now %r' % self.verbose)

        self.image_size = rospy.get_param('~img_size')
        self.top_cutoff = rospy.get_param('~top_cutoff')

        if self.detector is None:
            c = rospy.get_param('~detector')
            assert isinstance(c, list) and len(c) == 2, c

            #         if str(self.detector_config) != str(c):
            self.loginfo('new detector config: %s' % str(c))

            self.detector = instantiate(c[0], c[1])
            #             self.detector_config = c
            self.detector_used = self.detector

        if self.detector_intersection is None:
            c = rospy.get_param('~detector_intersection')
            assert isinstance(c, list) and len(c) == 2, c

            #         if str(self.detector_config) != str(c):
            self.loginfo('new detector_intersection config: %s' % str(c))

            self.detector_intersection = instantiate(c[0], c[1])


#             self.detector_config = c

        if self.verbose and self.pub_edge is None:
            self.pub_edge = rospy.Publisher("~edge", Image, queue_size=1)
            self.pub_colorSegment = rospy.Publisher("~colorSegment",
                                                    Image,
                                                    queue_size=1)

    def cbSwitch(self, switch_msg):
        self.active = switch_msg.data

    def cbImage(self, image_msg):
        print('line_detector_node: image received!!')
        self.stats.received()

        if not self.active:
            return
        # Start a daemon thread to process the image
        thread = threading.Thread(target=self.processImage, args=(image_msg, ))
        thread.setDaemon(True)
        thread.start()
        # Returns rightaway

    def cbTransform(self, transform_msg):
        self.ai.shift = transform_msg.s[0:3]
        self.ai.scale = transform_msg.s[3:6]

        self.loginfo("AntiInstagram transform received")

    def loginfo(self, s):
        rospy.loginfo('[%s] %s' % (self.node_name, s))

    def intermittent_log_now(self):
        return self.intermittent_counter % self.intermittent_interval == 1

    def intermittent_log(self, s):
        if not self.intermittent_log_now():
            return
        self.loginfo('%3d:%s' % (self.intermittent_counter, s))

    def processImage(self, image_msg):
        if not self.thread_lock.acquire(False):
            self.stats.skipped()
            # Return immediately if the thread is locked
            return

        try:
            self.processImage_(image_msg)
        finally:
            # Release the thread lock
            self.thread_lock.release()

    def _load_camera_info(self, filename):
        calib_data = yaml_load_file(filename)
        #     logger.info(yaml_dump(calib_data))
        self.pcm_ = CameraInfo()

        self.pcm_.width = calib_data['image_width']
        self.pcm_.height = calib_data['image_height']
        self.pcm_.K = np.array(calib_data['camera_matrix']['data']).reshape(
            (3, 3))
        self.pcm_.D = np.array(
            calib_data['distortion_coefficients']['data']).reshape((1, 5))
        self.pcm_.R = np.array(
            calib_data['rectification_matrix']['data']).reshape((3, 3))
        self.pcm_.P = np.array(
            calib_data['projection_matrix']['data']).reshape((3, 4))
        self.pcm_.distortion_model = calib_data['distortion_model']

    def rectify(self, cv_image_raw):
        '''Undistort image'''

        cv_image_rectified = np.zeros(np.shape(cv_image_raw))
        mapx = np.ndarray(shape=(self.pcm_.height, self.pcm_.width, 1),
                          dtype='float32')
        mapy = np.ndarray(shape=(self.pcm_.height, self.pcm_.width, 1),
                          dtype='float32')
        mapx, mapy = cv2.initUndistortRectifyMap(
            np.array(self.pcm_.K).reshape((3, 3)),
            np.array(self.pcm_.D).reshape((1, 5)),
            np.array(self.pcm_.R).reshape((3, 3)),
            np.array(self.pcm_.P).reshape((3, 4)),
            (self.pcm_.width, self.pcm_.height), cv2.CV_32FC1, mapx, mapy)
        return cv2.remap(cv_image_raw, mapx, mapy, cv2.INTER_CUBIC,
                         cv_image_rectified)

    def _publish_info(self, caminfo):
        """
        Publishes a default CameraInfo - TODO: Fix after distortion applied in simulator
        """
        #print(caminfo)
        self.cam_info_pub.publish(caminfo)

    def parse_calibration_yaml(self, calib_file):

        params = yaml_load_file(calib_file)
        #with file(calib_file, 'r') as f:
        #    params = yaml.load(f)

        self.pcm_ = CameraInfo()
        self.pcm_.height = params['image_height']
        self.pcm_.width = params['image_width']
        self.pcm_.distortion_model = params['distortion_model']
        self.pcm_.K = params['camera_matrix']['data']
        self.pcm_.D = params['distortion_coefficients']['data']
        self.pcm_.R = params['rectification_matrix']['data']
        self.pcm_.P = params['projection_matrix']['data']

    def processImage_(self, image_msg):

        self._publish_info(self.pcm_)
        self.stats.processed()

        if self.intermittent_log_now():
            self.intermittent_log(self.stats.info())
            self.stats.reset()

        tk = TimeKeeper(image_msg)

        self.intermittent_counter += 1

        # Decode from compressed image with OpenCV
        try:
            image_cv = bgr_from_jpg(image_msg.data)
        except ValueError as e:
            self.loginfo('Could not decode image: %s' % e)
            return

        tk.completed('decoded')

        # Resize and crop image
        hei_original, wid_original = image_cv.shape[0:2]

        # Undistort image
        image_cv = self.rectify(image_cv)

        if self.image_size[0] != hei_original or self.image_size[
                1] != wid_original:
            # image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
            image_cv = cv2.resize(image_cv,
                                  (self.image_size[1], self.image_size[0]),
                                  interpolation=cv2.INTER_NEAREST)
        image_cv = image_cv[self.top_cutoff:, :, :]

        tk.completed('resized')

        # milansc: color correction is now done within the image_tranformer_node (antiInstagram pkg)

        # apply color correction: AntiInstagram
        image_cv_corr = self.ai.applyTransform(image_cv)
        image_cv_corr = cv2.convertScaleAbs(image_cv_corr)
        tk.completed('corrected')

        # Set the image to be detected
        self.detector_used.setImage(image_cv)

        # Detect lines and normals

        white = self.detector_used.detectLines('white')
        yellow = self.detector_used.detectLines('yellow')
        red = self.detector_used.detectLines('red')

        tk.completed('detected')

        # SegmentList constructor
        segmentList = SegmentList()
        segmentList.header.stamp = image_msg.header.stamp

        # Convert to normalized pixel coordinates, and add segments to segmentList
        arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
        arr_ratio = np.array(
            (1. / self.image_size[1], 1. / self.image_size[0],
             1. / self.image_size[1], 1. / self.image_size[0]))
        if len(white.lines) > 0:
            lines_normalized_white = ((white.lines + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_white, white.normals,
                                  Segment.WHITE))
        if len(yellow.lines) > 0:
            lines_normalized_yellow = ((yellow.lines + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_yellow, yellow.normals,
                                  Segment.YELLOW))
        if len(red.lines) > 0:
            lines_normalized_red = ((red.lines + arr_cutoff) * arr_ratio)
            segmentList.segments.extend(
                self.toSegmentMsg(lines_normalized_red, red.normals,
                                  Segment.RED))

        self.intermittent_log(
            '# segments: white %3d yellow %3d red %3d' %
            (len(white.lines), len(yellow.lines), len(red.lines)))

        tk.completed('prepared')

        # Publish segmentList
        self.pub_lines.publish(segmentList)
        tk.completed('--pub_lines--')

        # VISUALIZATION only below

        if self.verbose:

            # print('line_detect_node: verbose is on!')

            # Draw lines and normals
            image_with_lines = np.copy(image_cv)
            drawLines(image_with_lines, white.lines, (0, 0, 0))
            drawLines(image_with_lines, yellow.lines, (255, 0, 0))
            drawLines(image_with_lines, red.lines, (0, 255, 0))

            tk.completed('drawn')

            # Publish the frame with lines
            image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
            image_msg_out.header.stamp = image_msg.header.stamp
            self.pub_image.publish(image_msg_out)

            tk.completed('pub_image')

            #         if self.verbose:
            colorSegment = color_segment(white.area, red.area, yellow.area)
            edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector_used.edges,
                                                     "mono8")
            colorSegment_msg_out = self.bridge.cv2_to_imgmsg(
                colorSegment, "bgr8")
            self.pub_edge.publish(edge_msg_out)
            self.pub_colorSegment.publish(colorSegment_msg_out)

            tk.completed('pub_edge/pub_segment')

        self.intermittent_log(tk.getall())

    def onShutdown(self):
        self.loginfo("Shutdown.")

    def toSegmentMsg(self, lines, normals, color):

        segmentMsgList = []
        for x1, y1, x2, y2, norm_x, norm_y in np.hstack((lines, normals)):
            segment = Segment()
            segment.color = color
            segment.pixels_normalized[0].x = x1
            segment.pixels_normalized[0].y = y1
            segment.pixels_normalized[1].x = x2
            segment.pixels_normalized[1].y = y2
            segment.normal.x = norm_x
            segment.normal.y = norm_y

            segmentMsgList.append(segment)
        return segmentMsgList