예제 #1
0
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector')

        self.pose = None
        self.waypoints = None
        self.waypoints_2d = None
        self.waypoint_tree = None
        self.camera_image = None
        self.lights = []
        self.has_image = False

        sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)

        '''
        /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
        helps you acquire an accurate ground truth data source for the traffic light
        classifier by sending the current color state of all traffic lights in the
        simulator. When testing on the vehicle, the color state will not be available. You'll need to
        rely on the position of the light and the camera image to predict it.
        '''
        sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
        sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)

        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)

        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)

        self.bridge = CvBridge()
        self.light_classifier = TLClassifier()
        self.listener = tf.TransformListener()

        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.state_count = 0
        self.image_counter = 0

        rospy.spin()

    def pose_cb(self, msg):
        self.pose = msg

    def waypoints_cb(self, waypoints):
        self.waypoints = waypoints

        # setup k-d tree in order to find the closest path waypoint to the given position [get_closest_waypoint(position)]
        # the code is copied from waypoints_cb(pose) in ros/src/waypoint_updater/waypoint_updater.py written by https://github.com/ysonggit
        if not self.waypoints_2d:
            self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]
            self.waypoint_tree = cKDTree(self.waypoints_2d)

    def traffic_cb(self, msg):
        self.lights = msg.lights

    def image_cb(self, msg):
        """Identifies red lights in the incoming camera image and publishes the index
            of the waypoint closest to the red light's stop line to /traffic_waypoint
        Args:
            msg (Image): image from car-mounted camera
        """
        if self.image_counter < IMAGE_CB_THRESHOLD:
            self.image_counter += 1
            return
        else:
            self.image_counter = 0

        self.has_image = True
        self.camera_image = msg
        light_wp, state = self.process_traffic_lights()
        rospy.loginfo("Closest light waypoint= {0}, light state= {1}".format(light_wp, state))

        '''
        Publish upcoming red lights at camera frequency.
        Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
        of times till we start using it. Otherwise the previous stable state is
        used.
        '''
        if self.state != state:
            self.state_count = 0
            self.state = state
        elif self.state_count >= STATE_COUNT_THRESHOLD:
            self.last_state = self.state
            light_wp = light_wp if state == TrafficLight.RED else -1
            self.last_wp = light_wp
            self.upcoming_red_light_pub.publish(Int32(light_wp))
        else:
            self.upcoming_red_light_pub.publish(Int32(self.last_wp))
        self.state_count += 1

    def get_closest_waypoint(self, position):
        """Identifies the closest path waypoint to the given position
            https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        Args:
            position [x, y]: position to match a waypoint to
        Returns:
            int: index of the closest waypoint in self.waypoints
        """
        # cKDTree.query() returns (distance, index), but only index is needed
        return self.waypoint_tree.query(position)[1]

    def get_light_state(self, light):
        """Determines the current color of the traffic light
        Args:
            light (TrafficLight): light to classify
        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)
        """
        if not self.has_image:
            # self.prev_light_loc = None
            # return False
            return TrafficLight.UNKNOWN

        cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")
        print('Light State: ', light.state)
        # Get classification
        return self.light_classifier.get_classification(cv_image)

    def process_traffic_lights(self):
        """Finds closest visible traffic light, if one exists, and determines its
            location and color
        Returns:
            int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)
        """
        closest_light = None
        closest_light_waypoint_idx = None

        # List of positions that correspond to the line to stop in front of for a given intersection
        stop_line_positions = self.config['stop_line_positions']
        if self.pose:
            car_waypoint_idx = self.get_closest_waypoint([self.pose.pose.position.x, self.pose.pose.position.y])

            # find the closest visible traffic light (if one exists)
            min_stop_line_dist = len(self.waypoints.waypoints)
            for idx, light in enumerate(self.lights):
                current_stop_line_position = stop_line_positions[idx]
                current_stop_line_waypoint_idx = self.get_closest_waypoint([current_stop_line_position[0], current_stop_line_position[1]])

                # number of waypoints between selected stop_line and the car
                current_stop_line_dist = current_stop_line_waypoint_idx - car_waypoint_idx

                # check if stop line is in front of the car (current_stop_line_dist > 0) and is closest
                if 0.0 <= current_stop_line_dist < min_stop_line_dist:
                    min_stop_line_dist = current_stop_line_dist
                    closest_light = light
                    closest_light_waypoint_idx = current_stop_line_waypoint_idx

        if closest_light and closest_light_waypoint_idx:
            state = self.get_light_state(closest_light)
            return closest_light_waypoint_idx, state
        self.waypoints = None  # TODO check why self.waypoints are reset
        return -1, TrafficLight.UNKNOWN
예제 #2
0
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector')
        self.pose = None
        self.waypoints = None
        self.camera_image = None
        self.lights = []

        sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)

        '''
        /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
        helps you acquire an accurate ground truth data source for the traffic light
        classifier by sending the current color state of all traffic lights in the
        simulator. When testing on the vehicle, the color state will not be available. You'll need to
        rely on the position of the light and the camera image to predict it.
        '''
        sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
        sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)

        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)

        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)

        self.bridge = CvBridge()

        graph = self.config['model']['graph']
        labels = self.config['model']['labels']
        self.light_classifier = TLClassifier(graph, labels)
        self.listener = tf.TransformListener()

        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.state_count = 0

        rospy.spin()


    def pose_cb(self, msg):
        self.pose = msg

    def waypoints_cb(self, msg):
        self.waypoints = msg.waypoints

    def traffic_cb(self, msg):
        self.lights = msg.lights

    def image_cb(self, msg):
        """Identifies red lights in the incoming camera image and publishes the index
            of the waypoint closest to the red light's stop line to /traffic_waypoint
        Args:
            msg (Image): image from car-mounted camera
        """
        self.has_image = True

        self.camera_image = msg
        light_wp, state = self.process_traffic_lights()

        '''
        Publish upcoming red lights at camera frequency.
        Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
        of times till we start using it. Otherwise the previous stable state is
        used.
        '''
        if self.state != state:
            self.state_count = 0
            self.state = state
        elif self.state_count >= STATE_COUNT_THRESHOLD:
            self.last_state = self.state
            light_wp = light_wp if state != TrafficLight.GREEN else -1
            self.last_wp = light_wp
            self.upcoming_red_light_pub.publish(Int32(light_wp))
        else:
            self.upcoming_red_light_pub.publish(Int32(self.last_wp))
        self.state_count += 1

    def get_closest_waypoint(self, x,y):
        """Identifies the closest path waypoint to the given position
            https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        Args:
            pose (Pose): position to match a waypoint to
        Returns:
            int: index of the closest waypoint in self.waypoints
        """
        #TODO implement
        if self.waypoints is None:
            return

        # Find index of closest waypoint
        closestwp = float("inf")
        targetIndex = 0

        for index, waypoint in enumerate(self.waypoints):
            dist = self.distanceCalculation(x,waypoint.pose.pose.position.x,y,waypoint.pose.pose.position.y)

            if dist < closestwp:
                closestwp = dist
                targetIndex = index
        return targetIndex

    # Calculate distance
    def distanceCalculation(self,a1,a2,b1,b2):
        return math.sqrt((a1-a2)**2 + (b1-b2)**2)

    def get_light_state(self, light):
        """Determines the current color of the traffic light
        Args:
            light (TrafficLight): light to classify
        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)
        """
        if(not self.has_image):
            self.prev_light_loc = None
            return False

        cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "rgb8")

        #Get classification
        lightState = self.light_classifier.get_classification(cv_image)

        return lightState

    def process_traffic_lights(self):
        """Finds closest visible traffic light, if one exists, and determines its
            location and color
        Returns:
            int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)
        """

        if self.waypoints is None:
            return -1, TrafficLight.UNKNOWN


        light = None
        light_wp = None

        # List of positions that correspond to the line to stop in front of for a given intersection
        stop_line_positions = self.config['stop_line_positions']
        if(self.pose):
            car_position = self.get_closest_waypoint(self.pose.pose.position.x, self.pose.pose.position.y)

        #TODO Find closest traffic light
        closestwp = float('inf')
        for lightIndex, stopLine in enumerate(stop_line_positions):
            lineIndex = self.get_closest_waypoint(stopLine[0], stopLine[1])

            if (abs(lineIndex-car_position) < Trajectory) and (abs(lineIndex-car_position) < closestwp) and (lineIndex > car_position):
                light = self.lights[lightIndex]
                light_wp = lineIndex


        if light:
            state = self.get_light_state(light)
            rospy.loginfo("Herby Light Status: %s", lightState[state])
            return light_wp, state

        return -1, TrafficLight.UNKNOWN
예제 #3
0
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector')

        self.last_busy = 0
        self.pose = None
        self.waypoints = None
        self.camera_image = None
        self.lights = []

        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)

        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint',
                                                      Int32,
                                                      queue_size=1)

        self.bridge = CvBridge()
        self.light_classifier = TLClassifier()
        self.listener = tf.TransformListener()

        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.state_count = 0

        # Generate a list of stop line points from given configuration
        stop_line_positions = self.config['stop_line_positions']
        self.stop_line_points = [
            self.make_point(p[0], p[1], 0) for p in stop_line_positions
        ]

        self.light_classifier.get_classification(np.zeros((800, 600, 3)))

        sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
        '''
        /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
        helps you acquire an accurate ground truth data source for the traffic light
        classifier by sending the current color state of all traffic lights in the
        simulator. When testing on the vehicle, the color state will not be available. You'll need to
        rely on the position of the light and the camera image to predict it.
        '''
        sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray,
                                self.traffic_cb)
        sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)

        rospy.spin()

    def make_point(self, x, y, z):
        point = Point()
        point.x = x
        point.y = y
        point.z = z
        return point

    def pose_cb(self, msg):
        self.pose = msg

    def waypoints_cb(self, waypoints):
        self.waypoints = waypoints.waypoints

    def traffic_cb(self, msg):
        self.lights = msg.lights

    def image_cb(self, msg):
        """Identifies red lights in the incoming camera image and publishes the index
            of the waypoint closest to the red light's stop line to /traffic_waypoint

        Args:
            msg (Image): image from car-mounted camera

        """
        if len(self.lights) < 1:
            print("Got image, ignoring because no lights are known.")
            return False
        if self.waypoints is None:
            print("Got image, ignoring because no waypoints are known.")
            return False
        # Skip frames received within 70ms of the previous so we do not saturate the pipeline
        # images take ~50-100ms to process
        if time.time() - self.last_busy < 70 / 1000:
            print("Busy, skipping frame.")
            return False
        else:
            print("Processing image")
            self.last_busy = time.time()
        self.has_image = True
        self.camera_image = msg
        light_wp, state = self.process_traffic_lights()
        '''
        Publish upcoming red lights at camera frequency.
        Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
        of times till we start using it. Otherwise the previous stable state is
        used.
        '''
        if self.state != state:
            self.state_count = 0
            self.state = state
        elif self.state_count >= STATE_COUNT_THRESHOLD:
            self.last_state = self.state
            light_wp = light_wp if state == TrafficLight.RED else -1
            self.last_wp = light_wp
            self.upcoming_red_light_pub.publish(Int32(light_wp))
        else:
            self.upcoming_red_light_pub.publish(Int32(self.last_wp))
        self.state_count += 1

    def get_closest_waypoint(self, position):
        """Identifies the closest path waypoint to the given position
            https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        Args:
            pose (Pose): position to match a waypoint to

        Returns:
            int: index of the closest waypoint in self.waypoints

        """
        if self.waypoints is None:
            return 0
        waypoints = [
            waypoint.pose.pose.position for waypoint in self.waypoints
        ]
        index, _ = self.get_closest_point(position, waypoints)
        return index

    def get_closest_point(self, needle, list_of_points):
        """Identifies the closest point in a list to the given position
            https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        Args:
            pose (Pose): position to match a waypoint to

        Returns:
            int: index of the closest point in list_of_points
            pose: closest point

        """
        # Brute-force solution is fine here since N is small
        minimum_distance = 9999999
        closest_point = 0
        i = 0
        for point in list_of_points:
            dist = self.distance_between_points(point, needle)
            if dist < minimum_distance:
                minimum_distance = dist
                closest_point = i
            i = i + 1

        return closest_point, list_of_points[closest_point]

    def get_closest_traffic_signal(self, point):
        """Identifies the closest path waypoint to the given position
            https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        Args:
            pose (Pose): position to match a waypoint to

        Returns:
            int: index of the closest waypoint in self.waypoints

        """
        light_poses = [light.pose.pose.position for light in self.lights]
        index, _ = self.get_closest_point(point, light_poses)
        return index

    def distance_between_points(self, point1, point2):
        """Returns the Euclidean distance between two points"""
        return math.sqrt(
            math.pow(point1.x - point2.x, 2) +
            math.pow(point1.y - point2.y, 2) +
            math.pow(point1.z - point2.z, 2))

    def get_light_state(self, light):
        """Determines the current color of the traffic light

        Args:
            light (TrafficLight): light to classify

        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        if (not self.has_image):
            self.prev_light_loc = None
            return False

        cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "rgb8")

        # Classify using the TF light classifier if not available (from simulator)
        if light.state != TrafficLight.UNKNOWN and light.state != 3:
            print("Using provided state: " + str(light.state))
            return light.state
        else:
            return self.light_classifier.get_classification(cv_image)

    def process_traffic_lights(self):
        """Finds closest visible traffic light, if one exists, and determines its
            location and color

        Returns:
            int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        light = None

        # List of positions that correspond to the line to stop in front of for a given intersection
        if (self.pose):
            car_position = self.get_closest_waypoint(self.pose.pose.position)
            closest_light_index = self.get_closest_traffic_signal(
                self.waypoints[car_position].pose.pose.position)
            light = self.lights[closest_light_index]

        if light:
            state = self.get_light_state(light)
            _, stopline = self.get_closest_point(light.pose.pose.position,
                                                 self.stop_line_points)
            stopline_waypoint_index = self.get_closest_waypoint(stopline)
            return stopline_waypoint_index, state
        return -1, TrafficLight.UNKNOWN
예제 #4
0
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector')

        self.pose = None
        self.waypoints = None
        self.waypoint_tree = None
        self.waypoints_2d = None
        self.camera_image = None
        self.lights = []
        self.closest_light = None
        self.camera_info = None

        sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
        sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray,
                                self.traffic_cb)
        sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)
        sub7 = rospy.Subscriber('/camera_info', CameraInfo, self.camera_cb)

        # get camera info
        #calib_yaml = rospy.get_param("/grasshopper_calibration_yaml")
        #self.camera_info = yaml_to_CameraInfo(calib_yaml)

        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)

        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint',
                                                      Int32,
                                                      queue_size=1)
        self.TLDetector_pub = rospy.Publisher('/traffic_light_detector',
                                              Image,
                                              queue_size=1)
        self.most_confident_detection = rospy.Publisher(
            '/traffic_light_most_confident', Image, queue_size=1)

        self.bridge = CvBridge()
        self.light_classifier = TLClassifier()
        self.listener = tf.TransformListener()

        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.state_count = 0

        self.image_counter = 0

        self.detection_graph = None
        self.category_index = None
        self.tensor_dict = {}
        self.image_tensor = None
        self.sess = None

        self.loadModel()

        self.detections = None
        self.hasDetections = False

        rate = rospy.Rate(5)  # Drop rate to 5 Hz to handle latency

        rospy.spin()

    #Leading pretrained model
    def loadModel(self):
        rospy.loginfo("Tensorflow version: %s", tensorflow.__version__)
        MODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17'
        PATH_TO_FROZEN_GRAPH = MODEL_NAME + '/frozen_inference_graph.pb'
        PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')

        self.detection_graph = tensorflow.Graph()
        with self.detection_graph.as_default():
            od_graph_def = tensorflow.GraphDef()
            with tensorflow.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
                serialized_graph = fid.read()
                od_graph_def.ParseFromString(serialized_graph)
                tensorflow.import_graph_def(od_graph_def, name='')

        #category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)
        self.category_index = {1: "traffic lights"}
        rospy.loginfo("Detection graph loaded")

        rospy.loginfo("Initializing Sesson")
        with self.detection_graph.as_default():
            self.sess = tensorflow.Session()
            # Get handles to input and output tensors
            ops = tensorflow.get_default_graph().get_operations()
            all_tensor_names = {
                output.name
                for op in ops for output in op.outputs
            }
            self.tensor_dict = {}
            for key in [
                    'num_detections', 'detection_boxes', 'detection_scores',
                    'detection_classes', 'detection_masks'
            ]:
                tensor_name = key + ':0'
                if tensor_name in all_tensor_names:
                    self.tensor_dict[key] = tensorflow.get_default_graph(
                    ).get_tensor_by_name(tensor_name)
            if 'detection_masks' in self.tensor_dict:
                # The following processing is only for single image
                detection_boxes = tensorflow.squeeze(
                    self.tensor_dict['detection_boxes'], [0])
                detection_masks = tensorflow.squeeze(
                    self.tensor_dict['detection_masks'], [0])
                # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
                real_num_detection = tensorflow.cast(
                    self.tensor_dict['num_detections'][0], tensorflow.int32)
                detection_boxes = tensorflow.slice(detection_boxes, [0, 0],
                                                   [real_num_detection, -1])
                detection_masks = tensorflow.slice(
                    detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
                detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
                    detection_masks, detection_boxes, image.shape[1],
                    image.shape[2])
                detection_masks_reframed = tensorflow.cast(
                    tf.greater(detection_masks_reframed, 0.5),
                    tensorflow.uint8)
                # Follow the convention by adding back the batch dimension
                self.tensor_dict['detection_masks'] = tensorflow.expand_dims(
                    detection_masks_reframed, 0)
            self.image_tensor = tensorflow.get_default_graph(
            ).get_tensor_by_name('image_tensor:0')
        rospy.loginfo("Sesson Initialized")

    def getBBox(self):
        #rospy.loginfo("Getting bboxes")
        cv_image = self.bridge.imgmsg_to_cv2(self.camera_image,
                                             desired_encoding="passthrough")

        image_np = np.asarray(cv_image)
        image = np.expand_dims(image_np, axis=0)

        # Run inference
        output_dict = self.sess.run(self.tensor_dict,
                                    feed_dict={self.image_tensor: image})

        # all outputs are float32 numpy arrays, so convert types as appropriate
        output_dict['num_detections'] = int(output_dict['num_detections'][0])
        output_dict['detection_classes'] = output_dict['detection_classes'][
            0].astype(np.uint8)
        output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
        output_dict['detection_scores'] = output_dict['detection_scores'][0]
        if 'detection_masks' in output_dict:
            output_dict['detection_masks'] = output_dict['detection_masks'][0]

        detections = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)

        # Visualize detected bounding boxes.
        num_detections = int(output_dict['num_detections'])
        rows = detections.shape[0]
        cols = detections.shape[1]
        for i in range(num_detections):
            bbox = [float(v) for v in output_dict['detection_boxes'][i]]
            score = output_dict['detection_scores'][i]
            if score > 0.3:
                x = bbox[1] * cols
                y = bbox[0] * rows
                right = bbox[3] * cols
                bottom = bbox[2] * rows
                cv2.rectangle(detections, (int(x), int(y)),
                              (int(right), int(bottom)), (125, 255, 51),
                              thickness=2)

        image_message = self.bridge.cv2_to_imgmsg(detections,
                                                  encoding="passthrough")

        self.TLDetector_pub.publish(image_message)
        return output_dict

    def camera_cb(self, msg):
        #print("Got camera info")
        self.camera_info = msg

    def pose_cb(self, msg):
        self.pose = msg

        if TESTING_WITHOUT_IMG:
            #Remove -- Temporary code - sub for image processing
            light_wp, state = self.process_traffic_lights()
            '''
            Publish upcoming red lights at camera frequency.
            Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
            of times till we start using it. Otherwise the previous stable state is
            used.
            '''
            if self.state != state:
                self.state_count = 0
                self.state = state
            elif self.state_count >= STATE_COUNT_THRESHOLD:
                self.last_state = self.state
                light_wp = light_wp if state == TrafficLight.RED else -1
                self.last_wp = light_wp
                self.upcoming_red_light_pub.publish(Int32(light_wp))
            else:
                self.upcoming_red_light_pub.publish(Int32(self.last_wp))
            self.state_count += 1

        # Remove till here

    def waypoints_cb(self, waypoints):
        self.waypoints = waypoints
        if not self.waypoints_2d:
            self.waypoints_2d = [[
                waypoint.pose.pose.position.x, waypoint.pose.pose.position.y
            ] for waypoint in waypoints.waypoints]
            self.waypoint_tree = KDTree(self.waypoints_2d)

    def traffic_cb(self, msg):
        self.lights = msg.lights
        #rospy.loginfo(self.lights)

    def image_cb(self, msg):
        """Identifies red lights in the incoming camera image and publishes the index
            of the waypoint closest to the red light's stop line to /traffic_waypoint
        Args:
            msg (Image): image from car-mounted camera
        """

        # Only check every 3rd image (reduce latency)
        self.image_counter += 1
        if self.image_counter % 5 != 0:
            return
        # End speed-up

        self.has_image = True
        self.hasDetections = False
        self.camera_image = msg
        light_wp, state = self.process_traffic_lights()
        '''
        Publish upcoming red lights at camera frequency.
        Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
        of times till we start using it. Otherwise the previous stable state is
        used.
        '''
        if self.state != state:
            self.state_count = 0
            self.state = state
        elif self.state_count >= STATE_COUNT_THRESHOLD:
            self.last_state = self.state
            light_wp = light_wp if state == TrafficLight.RED else -1
            self.last_wp = light_wp
            self.upcoming_red_light_pub.publish(Int32(light_wp))
        else:
            self.upcoming_red_light_pub.publish(Int32(self.last_wp))
        self.state_count += 1

    def get_closest_waypoint(self, x, y):
        """Identifies the closest path waypoint to the given position
            https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        Args:
            pose (Pose): position to match a waypoint to
        Returns:
            int: index of the closest waypoint in self.waypoints
        """
        closest_idx = self.waypoint_tree.query([x, y], 1)[1]
        return closest_idx

    def get_sim_light_state(self, light_idx):
        """Determines the current color of the traffic light
        Args:
            light (TrafficLight): light to classify
        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)
        """
        # Dummy code to be removed later
        #rospy.loginfo('Light state: %s', self.lights[light_idx].state)
        if (self.lights):
            #rospy.loginfo(self.lights[light_idx].state)
            return self.lights[light_idx].state
        else:
            rospy.loginfo('Lights uninit')
        return TrafficLight.GREEN

    def get_tl_coords_in_image(self, coords_in_world):
        """Get transform from (X,Y,Z) world coords to (x,y) camera coords. 
        See https://github.com/udacity/CarND-Capstone/issues/24
        Args:
            coords_in_world : TrafficLight coordinates
        """

        self.listener.waitForTransform("/world", "/base_link", rospy.Time(),
                                       rospy.Duration(1.0))
        try:
            now = rospy.Time.now()
            self.listener.waitForTransform("/world", "/base_link", now,
                                           rospy.Duration(1.0))
            (trans,
             rot) = self.listener.lookupTransform("/world", "/base_link", now)
            #print("Got map transform")
        except (tf.Exception, tf.LookupException, tf.ConnectivityException):
            rospy.log_err("Couldn't find camera to map transform.")
            #print("Can't get map transform")

        # do 3D rotation and translation of light coords from world to car frame
        x_world = coords_in_world.x
        y_world = coords_in_world.y
        z_world = coords_in_world.z
        e = tf.transformations.euler_from_quaternion(rot)
        cos_yaw = math.cos(e[2])
        sin_yaw = math.sin(e[2])
        x_car = x_world * cos_yaw - y_world * sin_yaw + trans[0]
        y_car = x_world * sin_yaw + y_world * cos_yaw + trans[1]
        z_car = z_world + trans[2]

        # use camera projection matrix to translate world coords to camera pixel coords
        # http://docs.ros.org/melodic/api/sensor_msgs/html/msg/CameraInfo.html
        uvw = np.dot(self.camera_info.P, [x_car, y_car, z_car, 1])
        camera_x = uvw[0] / uvw[2]
        camera_y = uvw[1] / uvw[2]

        #focal_length = 2300
        #half_image_width = 400
        #half_image_height = 300
        #x_offset = -30
        #y_offset = 340
        #half_image_width = 400
        #half_image_height = 300

        return (camera_x, camera_y)

    def get_light_state(self, tl_idx):
        """Determines the current color of the traffic light
        Args:
            tl_idx (int): index of light to classify
        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)
        """

        if (not self.has_image):
            self.prev_light_loc = None
            return False

        cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")
        crop_cv_image = cv_image.copy()

        num_detections = int(self.detections['num_detections'])
        rows = cv_image.shape[0]
        cols = cv_image.shape[1]

        index = np.argmax(self.detections['detection_scores'])
        #rospy.loginfo('most confident detection = ', index)
        #rospy.loginfo('max score = ', self.detections['detection_scores'][index])

        for i in range(num_detections):
            bbox = [float(v) for v in self.detections['detection_boxes'][i]]
            score = self.detections['detection_scores'][i]
            if score > 0.3:
                x = int(bbox[1] * cols)
                y = int(bbox[0] * rows)
                right = int(bbox[3] * cols)
                bottom = int(bbox[2] * rows)

                crop_cv_image = cv_image[y:bottom, x:right].copy()
                image_message = self.bridge.cv2_to_imgmsg(
                    crop_cv_image, encoding="passthrough")
                self.most_confident_detection.publish(image_message)
                return self.light_classifier.get_classification(crop_cv_image)

        return TrafficLight.UNKNOWN

    def process_traffic_lights(self):
        """Finds closest visible traffic light, if one exists, and determines its color
        and the best waypoint for stopping.
        Returns:
            int: index of waypoint closest to the upcoming stop line for a traffic light (-1 if none exists)
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)
        """
        line = None
        max_visible_dist = 100  # units
        line_wp_idx = -1
        tl_idx = -1

        # List of positions that correspond to the line to stop in front of for a given intersection
        stop_line_positions = self.config['stop_line_positions']
        if (self.pose):
            car_position = self.get_closest_waypoint(self.pose.pose.position.x,
                                                     self.pose.pose.position.y)

        #TODO find the closest visible traffic light (if one exists)
        diff = len(self.waypoints.waypoints)
        for i, temp_closest_stop_position in enumerate(stop_line_positions):
            line = stop_line_positions[i]
            wp_idx = self.get_closest_waypoint(line[0], line[1])
            d = wp_idx - car_position
            if d >= 0 and d < diff and d < max_visible_dist:
                diff = d
                line_wp_idx = wp_idx
                tl_idx = i
                line = temp_closest_stop_position

        # if light and self.closest_light != light:
        #     rospy.loginfo('TL coming up: %d', diff)
        #     rospy.loginfo('Light details(%d): %s', tl_idx, light)
        #     self.closest_light = light
        #     state = self.get_sim_light_state(tl_idx)
        #     return line_wp_idx, state
        # elif not light and self.closest_light:
        #     rospy.loginfo('Free.. speed up')
        #     self.closest_light = None
        #     return -1, TrafficLight.UNKNOWN
        if tl_idx >= 0:
            if TESTING_WITHOUT_IMG:
                state = self.get_sim_light_state(tl_idx)
                return line_wp_idx, state

            else:
                # Fix this for image processing.
                self.detections = self.getBBox()
                self.hasDetections = True
                state = self.get_light_state(tl_idx)
                #print("Light state: {0}".format(state))
                return line_wp_idx, state

        return -1, TrafficLight.UNKNOWN
예제 #5
0
class TLDetector(object):
    def __init__(self):
        '''
        /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
        helps you acquire an accurate ground truth data source for the traffic light
        classifier by sending the current color state of all traffic lights in the
        simulator. When testing on the vehicle, the color state will not be available. You'll need to
        rely on the position of the light and the camera image to predict it.
        '''
        rospy.init_node('tl_detector')

        self.pose = None
        self.waypoints = None
        self.camera_image = None
        self.tree = None
        self.lights = []
        self.img = None
        self.has_image = False
        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_time = 0

        sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)

        
        sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
        sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)

        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)

        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)

        #self.bridge = CvBridge()
        self.light_classifier = TLClassifier(self.config['is_site'])
        #self.listener = tf.TransformListener()
        
        self.last_wp = -1
        self.state_count = 0
        
        #rospy.spin()
        self.loop()
       
    def loop(self):
        rate = rospy.Rate(5) # 5Hz
        while not rospy.is_shutdown():
            if self.has_image and self.pose:
                light_wp, state = self.process_traffic_lights()
                if light_wp:
                    if self.state != state:
                        self.state_count = 0
                        self.state = state
                    elif self.state_count >= STATE_COUNT_THRESHOLD:
                        self.last_state = self.state
                        light_wp = light_wp if state == TrafficLight.RED or state == TrafficLight.YELLOW else -1
                        self.last_wp = light_wp
                        self.upcoming_red_light_pub.publish(Int32(light_wp))
                    else:
                        self.upcoming_red_light_pub.publish(Int32(self.last_wp))
                    self.state_count += 1
            rate.sleep()

    def pose_cb(self, msg):
        self.pose = msg

    def waypoints_cb(self, waypoints):
        self.waypoints = waypoints.waypoints
        self.tree = KDTree([[p.pose.pose.position.x, p.pose.pose.position.y] for p in waypoints.waypoints])

    def traffic_cb(self, msg):
        self.lights = msg.lights

    def image_cb(self, msg):
        """Identifies red lights in the incoming camera image and publishes the index
            of the waypoint closest to the red light's stop line to /traffic_waypoint

        Args:
            msg (Image): image from car-mounted camera

        """
        self.has_image = True
        self.camera_image = msg

    def get_closest_waypoint(self, pose):
        """Identifies the closest path waypoint to the given position
            https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        Args:
            pose (Pose): position to match a waypoint to

        Returns:
            int: index of the closest waypoint in self.waypoints
        """
        #TODO implement
        if self.tree is not None:
            x, y = pose.position.x, pose.position.y
            return self.tree.query([x, y])[1]
        return 0

    def get_light_state(self, light):
        """Determines the current color of the traffic light

        Args:
            light (TrafficLight): light to classify

        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        
        #Get classification
        img = np.frombuffer(self.camera_image.data, np.uint8).reshape((self.camera_image.height, self.camera_image.width, 3))
        img = cv2.resize(img, None, fx = 0.5, fy = 0.5, interpolation = cv2.INTER_AREA)
        #img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
        #cv2.imwrite('/mnt/c/temp/img.png', img)
        self.img = img.reshape((1, self.camera_image.height // 2, self.camera_image.width // 2, 3))
        return self.light_classifier.get_classification(self.img)

    def distance(self, waypoints, wp1, wp2):
        dist = 0
        dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2  + (a.z-b.z)**2)
        for i in range(wp1 + 1, wp2 + 1):
            dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
            wp1 = i
        return dist
    
    def process_traffic_lights(self):
        """Finds closest visible traffic light, if one exists, and determines its
            location and color

        Returns:
            int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        light = None

        # List of positions that correspond to the line to stop in front of for a given intersection
        stop_line_positions = self.config['stop_line_positions']
                    
        car_position = self.get_closest_waypoint(self.pose.pose)
        
        #TODO find the closest visible traffic light (if one exists)
        if not train:
            for stop in stop_line_positions:
                stop_pose = Pose()
                stop_pose.position.x, stop_pose.position.y = stop
                light_wp = self.get_closest_waypoint(stop_pose)
                if light_wp > car_position and self.distance(self.waypoints, car_position, light_wp) < 70:
                    light = light_wp
                    break
        else:
            for l in self.lights:
                light_wp = self.get_closest_waypoint(l.pose.pose)
                #rospy.loginfo('Light:{}, Car:{}, max:{}'.format(light_wp, car_position, self.waypoints))
                if light_wp > car_position and self.distance(self.waypoints, car_position, light_wp) < 100:
                    light = light_wp
                    break
            
        if light:
            if not train:
                state = self.get_light_state(light)
            else:
                state = l.state
            return light, state
        return -1, TrafficLight.UNKNOWN
예제 #6
0
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector')

        self.pose = None
        self.waypoints = None
        self.camera_image = None
        self.lights = []
        self.waypoints_2d = None
        self.waypoint_tree = None

        sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
        '''
        /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
        helps you acquire an accurate ground truth data source for the traffic light
        classifier by sending the current color state of all traffic lights in the
        simulator. When testing on the vehicle, the color state will not be available. You'll need to
        rely on the position of the light and the camera image to predict it.
        '''
        sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray,
                                self.traffic_cb)
        sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)

        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)

        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint',
                                                      Int32,
                                                      queue_size=1)

        self.bridge = CvBridge()
        self.light_classifier = TLClassifier()
        self.listener = tf.TransformListener()

        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.state_count = 0

        rospy.spin()

    def pose_cb(self, msg):
        self.pose = msg

    def waypoints_cb(self, waypoints):
        self.waypoints = waypoints
        # next lines from waypoint_updater for similar KDtree
        if not self.waypoints_2d:
            self.waypoints_2d = [[
                waypoint.pose.pose.position.x, waypoint.pose.pose.position.y
            ] for waypoint in waypoints.waypoints]
            self.waypoint_tree = KDTree(self.waypoints_2d)

    def traffic_cb(self, msg):
        self.lights = msg.lights

    def image_cb(self, msg):
        """Identifies red lights in the incoming camera image and publishes the index
            of the waypoint closest to the red light's stop line to /traffic_waypoint

        Args:
            msg (Image): image from car-mounted camera

        """
        self.has_image = True
        self.camera_image = msg
        light_wp, state = self.process_traffic_lights()
        '''
        Publish upcoming red lights at camera frequency.
        Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
        of times till we start using it. Otherwise the previous stable state is
        used.
        '''
        if self.state != state:
            self.state_count = 0
            self.state = state
        elif self.state_count >= STATE_COUNT_THRESHOLD:
            self.last_state = self.state
            light_wp = light_wp if state == TrafficLight.RED else -1
            self.last_wp = light_wp
            self.upcoming_red_light_pub.publish(Int32(light_wp))
        else:
            self.upcoming_red_light_pub.publish(Int32(self.last_wp))
        self.state_count += 1

    def get_closest_waypoint(self, x, y):
        """Identifies the closest path waypoint to the given position
            https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        Args:
            pose (Pose): position to match a waypoint to

        Returns:
            int: index of the closest waypoint in self.waypoints

        """
        closest_idx = self.waypoint_tree.query([x, y], 1)[1]
        return closest_idx

    def get_light_state(self, light):
        """Determines the current color of the traffic light

        Args:
            light (TrafficLight): light to classify

        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        if (not self.has_image):
            self.prev_light_loc = None
            return False

        cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")

        #Get classification
        return self.light_classifier.get_classification(cv_image)

    def process_traffic_lights(self):
        """Finds closest visible traffic light, if one exists, and determines its
            location and color

        Returns:
            int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        light = None
        closest_light = None
        line_wp_idx = None

        # List of positions that correspond to the line to stop in front of for a given intersection
        stop_line_positions = self.config['stop_line_positions']
        if (self.pose):
            car_wp_idx = self.get_closest_waypoint(self.pose.pose.position.x,
                                                   self.pose.pose.position.y)

        #Find the closest visible traffic light (if one exists)

        diff = len(self.waypoints.waypoints)
        for i, light in enumerate(self.lights):
            # get stopline waypoint index
            line = stop_line_positions[i]
            temp_wp_idx = self.get_closest_waypoint(line[0], line[1])
            #Find the closest stopline index
            d = temp_wp_idx - car_wp_idx
            if d >= 0 and d < diff:
                diff = d
                closest_light = light
                line_wp_idx = temp_wp_idx

        if closest_light:
            state = closest_light.state
            return line_wp_idx, state

        return -1, TrafficLight.UNKNOWN
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector')

        self.pose = None
        self.waypoints = None
        self.camera_image = None
        self.lights = []
        '''
        /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
        helps you acquire an accurate ground truth data source for the traffic light
        classifier by sending the current color state of all traffic lights in the
        simulator. When testing on the vehicle, the color state will not be available. You'll need to
        rely on the position of the light and the camera image to predict it.
        '''

        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)

        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint',
                                                      Int32,
                                                      queue_size=1)

        self.bridge = CvBridge()
        self.light_classifier = TLClassifier(
            './tf_files/retrained_graph.pb', './tf_files/retrained_labels.txt')
        self.listener = tf.TransformListener()

        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.state_count = 0
        self.has_waypoints = False
        self.has_pose = False

        sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
        sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray,
                                self.traffic_cb)
        sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)

        self.save_images_min_dist = rospy.get_param('~save_images_min_dist')
        rospy.loginfo('save img min dist: {}'.format(
            self.save_images_min_dist))

        self.save_images = rospy.get_param('~save_images')
        self.use_classifier = rospy.get_param('~use_classifier')
        self.max_tl_dist = rospy.get_param('~max_tl_distance')

        # Always use classifier
        self.use_classifier = True
        if self.use_classifier:
            rospy.loginfo('using traffic light classifier')

        self.has_image = False
        self.start()

    def pose_cb(self, msg):
        self.has_pose = True
        self.pose = msg

    def waypoints_cb(self, waypoints):
        if self.has_waypoints:
            return

        self.waypoints = waypoints.waypoints

        data = np.zeros((len(self.waypoints), 2), dtype=np.float32)
        for idx, wp in enumerate(self.waypoints):
            xy = (wp.pose.pose.position.x, wp.pose.pose.position.y)
            data[idx, :] = xy
        self.kdtree = scipy.spatial.KDTree(data)
        self.has_waypoints = True

    def traffic_cb(self, msg):
        self.lights = msg.lights

    def image_cb(self, msg):
        """Identifies red lights in the incoming camera image and publishes the index
            of the waypoint closest to the red light's stop line to /traffic_waypoint

        Args:
            msg (Image): image from car-mounted camera

        """
        self.has_image = True
        self.camera_image = msg

    def publish(self):
        if not (self.has_pose and self.has_waypoints):
            return
        light_wp, state = self.process_traffic_lights()
        '''
        Publish upcoming red lights at camera frequency.
        Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
        of times till we start using it. Otherwise the previous stable state is
        used.
        '''
        # rospy.loginfo("tl_detector - publishing state: %s light_wp %s", state,
        #   light_wp)
        if self.state != state:
            self.state_count = 0
            self.state = state
        elif self.state_count >= STATE_COUNT_THRESHOLD:
            self.last_state = self.state
            light_wp = light_wp if state == TrafficLight.RED else -1
            self.last_wp = light_wp
            self.upcoming_red_light_pub.publish(Int32(light_wp))
        else:
            self.upcoming_red_light_pub.publish(Int32(self.last_wp))
        self.state_count += 1

    def calculate_distance(self, x1, x2, y1, y2):
        return math.sqrt((x1 - x2)**2 + (y1 - y2)**2)

    def get_closest_waypoint_xy(self, pt):
        d, kdwp = self.kdtree.query(pt)
        return kdwp

    def get_closest_waypoint_idx(self, pose):
        """Identifies the closest path waypoint to the given position
            https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        Args:
            pose (Pose): position to match a waypoint to

        Returns:
            int: index of the closest waypoint in self.waypoints

        """
        d, kdwp = self.kdtree.query((pose.position.x, pose.position.y))
        return kdwp

    def get_light_state(self, light):
        """Determines the current color of the traffic light

        Args:
            light (TrafficLight): light to classify

        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        if (not self.has_image):
            self.prev_light_loc = None
            return TrafficLight.UNKNOWN

        rgb = self.bridge.imgmsg_to_cv2(self.camera_image)

        if self.use_classifier:
            light_state, hsv, thresh_red = self.light_classifier.get_classification(
                rgb)
            # Save images for visual debugging
            # plt.imsave("./data/out-{}-{}.{}.hsv.jpg".format(
            #     light_state, light.state, self.camera_image.header.seq), hsv)
            # plt.imsave("./data/out-{}-{}.{}.thresh_red.jpg".format(
            #     light_state, light.state, self.camera_image.header.seq),
            #            thresh_red)
            rospy.loginfo('inference: {} / ground truth {}'.format(
                light_state, light.state))
        else:
            rospy.loginfo("use classifier is ", self.use_classifier)
            light_state = light.state

        if self.save_images:
            dist_to_light = self.calculate_distance(self.pose.pose.position.x,
                                                    light.pose.pose.position.x,
                                                    self.pose.pose.position.y,
                                                    light.pose.pose.position.y)

            if dist_to_light <= self.save_images_min_dist:
                bgr = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR)
                cv2.imwrite(
                    "./data/test/test-{}-{}.{}.jpg".format(
                        light_state, light.state,
                        self.camera_image.header.seq), bgr)

        return light_state

    def is_ahead_of(self, pose, x, y):
        """Determines whether a wappoint is ahead of position
        Args:
            pose: base position
            x (float): waypoint's global x-coordinate
            y (float): waypoint's global y-coordinate
        Returns:
            bool: Whether the waypoint is ahead of this position
        """
        x1 = pose.position.x
        y1 = pose.position.y
        orientation = pose.orientation
        euler = tf.transformations.euler_from_quaternion(
            [orientation.x, orientation.y, orientation.z, orientation.w])
        yaw = euler[2]
        return ((x - x1) * math.cos(yaw) + (y - y1) * math.sin(yaw)) > 0

    def process_traffic_lights(self):
        """Finds closest visible traffic light, if one exists, and determines its
            location and color

        Returns:
            int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        light = None
        light_wp = -1

        if not self.waypoints:
            return -1, TrafficLight.UNKNOWN
        if not self.lights:
            return -1, TrafficLight.UNKNOWN

        stop_line_positions = self.config['stop_line_positions']
        car_position_idx = 0
        if (self.pose):
            car_position_idx = self.get_closest_waypoint_idx(self.pose.pose)
        car_position = self.waypoints[car_position_idx]
        #rospy.loginfo("tl_detector - pose %s %s", self.pose.pose.position, car_position.pose.pose.position)

        #TODO find the closest visible traffic light (if one exists)
        # Fined the closet visible traffic light based on current position
        dist_min = float('inf')
        closest_stop_line_idx = -1
        for i, stop_line_pose in enumerate(stop_line_positions):
            if self.is_ahead_of(car_position.pose.pose, stop_line_pose[0],
                                stop_line_pose[1]):
                dist = self.calculate_distance(
                    car_position.pose.pose.position.x, stop_line_pose[0],
                    car_position.pose.pose.position.y, stop_line_pose[1])
                if dist < dist_min:
                    dist_min = dist
                    closest_stop_line_idx = i

        # if dist_min < self.max_tl_dist and closest_stop_line_idx >= 0:
        if closest_stop_line_idx >= 0:
            # Find wp index in waypoints which is closest to the traffic light
            light = self.lights[closest_stop_line_idx]
            dist_min = float('inf')
            wp = self.get_closest_waypoint_xy(
                stop_line_positions[closest_stop_line_idx])

            if wp > car_position_idx:
                light_wp = wp

            state = self.get_light_state(light)
            return light_wp, state
        return -1, TrafficLight.UNKNOWN

    def start(self):
        rospy.loginfo("Traffic Light Detector - Starting")
        rate = rospy.Rate(LOOP_RATE)
        while not rospy.is_shutdown():
            if self.has_image:
                self.publish()
            rate.sleep()
예제 #8
0
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector')

        self.pose = None
        self.waypoints = None
        self.waypoint_tree = None
        self.waypoints_2d = None
        self.camera_image = None
        self.lights = []
        rospy.loginfo('Creating CvBridge')
        self.bridge = CvBridge()

        sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
        '''
        /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
        helps you acquire an accurate ground truth data source for the traffic light
        classifier by sending the current color state of all traffic lights in the
        simulator. When testing on the vehicle, the color state will not be available. You'll need to
        rely on the position of the light and the camera image to predict it.
        '''
        sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray,
                                self.traffic_cb)
        # might want to use image_raw instead for more data
        sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)

        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)

        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint',
                                                      Int32,
                                                      queue_size=1)

        rospy.loginfo(self.config)

        if self.config['is_site']:
            rospy.loginfo('Loading Site Traffic Light Graph')
            graph = self.load_graph(
                './saved_models/carla_traffic_light_classifier.pb')
        else:
            rospy.loginfo('Loading Simulator Traffic Light Graph')
            graph = self.load_graph(
                './saved_models/traffic_light_classifier.pb')

        self.light_classifier = TLClassifier(graph)
        self.listener = tf.TransformListener()

        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.last_car_wp = 0
        self.state_count = 0
        self.img_idx = 0

        # model = keras.models.load_model('/capstone/ros/src/tl_detector/traffic_light_classifier.h5')

        rospy.spin()

    def load_graph(self, frozen_graph_filename):
        # Source: https://blog.metaflow.fr/tensorflow-how-to-freeze-a-model-and-serve-it-with-a-python-api-d4f3596b3adc
        # We load the protobuf file from the disk and parse it to retrieve the
        # unserialized graph_def
        with tensorflow.gfile.GFile(frozen_graph_filename, "rb") as f:
            graph_def = tensorflow.GraphDef()
            graph_def.ParseFromString(f.read())

        # Then, we import the graph_def into a new Graph and returns it
        with tensorflow.Graph().as_default() as graph:
            # The name var will prefix every op/nodes in your graph
            # Since we load everything in a new graph, this is not needed
            tensorflow.import_graph_def(graph_def, name="prefix")
        return graph

    def pose_cb(self, msg):
        self.pose = msg

    def waypoints_cb(self, waypoints):
        self.waypoints = waypoints
        if not self.waypoints_2d:
            self.waypoints_2d = [[
                waypoint.pose.pose.position.x, waypoint.pose.pose.position.y
            ] for waypoint in waypoints.waypoints]
            self.waypoint_tree = KDTree(self.waypoints_2d)

    def traffic_cb(self, msg):
        self.lights = msg.lights

    def image_cb(self, msg):
        """Identifies red lights in the incoming camera image and publishes the index
            of the waypoint closest to the red light's stop line to /traffic_waypoint

        Args:
            msg (Image): image from car-mounted camera

        """
        self.has_image = True
        self.camera_image = msg
        light_wp, state = self.process_traffic_lights()

        # rospy.logwarn("Closest light wp: {0}\nAnd light state: {1}".format(light_wp, state))
        '''
        Publish upcoming red lights at camera frequency.
        Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
        of times till we start using it. Otherwise the previous stable state is
        used.
        '''
        if self.state != state:
            self.state_count = 0
            self.state = state
        elif self.state_count >= STATE_COUNT_THRESHOLD:
            self.last_state = self.state
            # Stop on red or yellow light just to be safe
            light_wp = light_wp if state == TrafficLight.RED or state == TrafficLight.YELLOW else -1
            self.last_wp = light_wp
            self.upcoming_red_light_pub.publish(Int32(light_wp))
        else:
            self.upcoming_red_light_pub.publish(Int32(self.last_wp))
        self.state_count += 1

    def get_closest_waypoint(self, x, y):
        """Identifies the closest path waypoint to the given position
            https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        Args:
            x (Pose position x): x position to match a waypoint to
            y (Pose position y): y position to match a waypoint to

        Returns:
            int: index of the closest waypoint in self.waypoints

        """
        #TODO implement
        closest_idx = self.waypoint_tree.query([x, y], 1)[1]
        return closest_idx

    def get_light_state(self, light, line_wp_idx, car_wp_idx):
        """Determines the current color of the traffic light

        Args:
            light (TrafficLight): light to classify

        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """

        state = TrafficLight.UNKNOWN

        if not self.has_image:
            self.prev_light_loc = None
            return False

        # rospy.logwarn("Next line wp: {}, Car Wp: {}".format(line_wp_idx, car_wp_idx))
        if line_wp_idx - car_wp_idx < 50 and self.bridge:
            cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")
            state = self.light_classifier.get_classification(cv_image)

        # ## Code below used to capture training image dataset.
        # if self.last_car_wp == 0:
        #     self.last_car_wp = car_wp_idx
        # elif self.last_car_wp != car_wp_idx:
        # #     rospy.logwarn("car_wp_idx: {}".format(car_wp_idx))
        #     self.last_car_wp = car_wp_idx
        #     if line_wp_idx - car_wp_idx < 100:
        #         cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")
        #         cv2.imwrite('dataset/{}-{}-{}.png'.format(self.img_idx, line_wp_idx, light.state), cv_image)
        #         rospy.loginfo('Writing image to dataset/{}-{}-{}.png'.format(self.img_idx, line_wp_idx, light.state))
        #         self.img_idx += 1

        # #Get classification
        # for testing, just return the light state
        rospy.loginfo("State: {}".format(state))
        return state
        # return light.state
        # return self.light_classifier.get_classification(cv_image)

    def process_traffic_lights(self):
        """Finds closest visible traffic light, if one exists, and determines its
            location and color

        Returns:
            int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        closest_light = None
        line_wp_idx = None

        # List of positions that correspond to the line to stop in front of for a given intersection
        stop_line_positions = self.config['stop_line_positions']
        if (self.pose):
            car_wp_idx = self.get_closest_waypoint(self.pose.pose.position.x,
                                                   self.pose.pose.position.y)

            #TODO find the closest visible traffic light (if one exists)
            diff = len(self.waypoints.waypoints)
            for i, light in enumerate(self.lights):
                # Get stop line waypoint index
                line = stop_line_positions[i]
                temp_wp_idx = self.get_closest_waypoint(line[0], line[1])

                # Find closest stop line waypoint index
                d = temp_wp_idx - car_wp_idx
                if d >= 0 and d < diff:
                    diff = d
                    closest_light = light
                    line_wp_idx = temp_wp_idx

        if closest_light:
            state = self.get_light_state(closest_light, line_wp_idx,
                                         car_wp_idx)
            return line_wp_idx, state

        return -1, TrafficLight.UNKNOWN
예제 #9
0
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector')

        self.pose = None
        self.waypoints = None
        self.camera_image = None
        self.lights = []

        sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)

        '''
        /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
        helps you acquire an accurate ground truth data source for the traffic light
        classifier by sending the current color state of all traffic lights in the
        simulator. When testing on the vehicle, the color state will not be available. You'll need to
        rely on the position of the light and the camera image to predict it.
        '''
        sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
        sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)

        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)

        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)

        self.bridge = CvBridge()
        self.light_classifier = TLClassifier()
        self.listener = tf.TransformListener()

        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.state_count = 0

        rospy.spin()

    def pose_cb(self, msg):
        self.pose = msg
        rospy.logdebug("current pose: %s", self.pose)

    def waypoints_cb(self, waypoints):
        self.waypoints = waypoints.waypoints
        rospy.logdebug("waypoints: %s", self.waypoints)

    def traffic_cb(self, msg):
        self.lights = msg.lights
        rospy.logdebug("lights: %s", self.lights)

    def image_cb(self, msg):
        """Identifies red lights in the incoming camera image and publishes the index
            of the waypoint closest to the red light's stop line to /traffic_waypoint

        Args:
            msg (Image): image from car-mounted camera

        """
        self.has_image = True
        self.camera_image = msg
        light_wp, state = self.process_traffic_lights()

        '''
        Publish upcoming red lights at camera frequency.
        Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
        of times till we start using it. Otherwise the previous stable state is
        used.
        '''
        if self.state != state:
            self.state_count = 0
            self.state = state
        elif self.state_count >= STATE_COUNT_THRESHOLD:
            self.last_state = self.state
            light_wp = light_wp if state == TrafficLight.RED else -1
            self.last_wp = light_wp
            self.upcoming_red_light_pub.publish(Int32(light_wp))
        else:
            self.upcoming_red_light_pub.publish(Int32(self.last_wp))
        self.state_count += 1

    def calculate_distance(self, x1, x2, y1, y2):
        return math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)

    def get_closest_waypoint_idx(self, pose):
        """Identifies the closest path waypoint to the given position
            https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        Args:
            pose (Pose): position to match a waypoint to

        Returns:
            int: index of the closest waypoint in self.waypoints

        """
        #TODO implement
        index = -1
        dist_min = float("inf")
        for i, wp in enumerate(self.waypoints):
            dist = self.calculate_distance(pose.position.x, wp.pose.pose.positions.x, pose.position.y, wp.pose.pose.position.y)
            if dist < dist_min:
                dist_min = distance
                index = i
        return index

    def get_light_state(self, light):
        """Determines the current color of the traffic light

        Args:
            light (TrafficLight): light to classify

        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        if(not self.has_image):
            self.prev_light_loc = None
            return TrafficLight.UNKNOWN

        cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")

        #Get classification
        return self.light_classifier.get_classification(cv_image)

    def is_ahead_of(self, pose, x, y):
        """Determines whether a wappoint is ahead of position
        Args:
            pose: base position
            x (float): waypoint's global x-coordinate
            y (float): waypoint's global y-coordinate
        Returns:
            bool: Whether the waypoint is ahead of this position
        """
        x1 = pose.position.x
        y1 = pose.position.y
        orientation = pose.orientation
        euler = tf.transformations.euler_from_quaternion([orientation.x, orientation.y, orientation.z, orientation.w])
        yaw = euler[2]
        return ((x - x1) * math.cos(yaw) + (y - y1) * math.sin(yaw)) > 0

    def process_traffic_lights(self):
        """Finds closest visible traffic light, if one exists, and determines its
            location and color

        Returns:
            int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        light = None
        light_wp = -1

        if not self.waypoints:
            return -1, TrafficLight.UNKNOWN
        if not self.lights:
            return -1, TrafficLight.UNKNOWN

        # List of positions that correspond to the line to stop in front of for a given intersection
        stop_line_positions = self.config['stop_line_positions']
        car_position_idx = 0
        if(self.pose):
            car_position_idx = self.get_closest_waypoint_idx(self.pose.pose)
        car_position = self.waypoints[car_position_idx]

        #TODO find the closest visible traffic light (if one exists)
        # Fined the closet visible traffic light based on current position
        dist_min = float('inf')
        closest_stop_line_idx = -1
        for i, stop_line_pose in enumerate(stop_line_positions):
            if self.is_ahead_of(car_position.pose.pose, stop_line_pose[0], stop_line_pose[1]):
                dist = self.calculate_distance(car_position.pose.pose.position.x, stop_line_pose[0], car_position.pose.pose.position.y, stop_line_pose[1])
                if dist < dist_min:
                    dist_min = dist
                    closest_stop_line_idx = i


        if closest_stop_line_idx >= 0:
            # Find wp index in waypoints which is closest to the traffic light
            light = self.lights[closest_stop_line_idx]
            dist_min = float('inf')
            for i, wp in enumerate(self.waypoints):
                if self.is_ahead_of(wp.pose.pose, stop_line_positions[closest_stop_line_idx][0], stop_line_positions[closest_stop_line_idx][1]):
                    dist = self.calculate_distance(wp.pose.pose.position.x, stop_line_positions[closest_stop_line_idx][0], wp.pose.pose.position.y, stop_line_positions[closest_stop_line_idx][1])
                    if dist < dist_min:
                        dist_min = dist
                        light_wp = i

            state = self.get_light_state(light)
            return light_wp, state
        return -1, TrafficLight.UNKNOWN
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector')

        self.pose = None
        self.waypoints_2d = None
        self.waypoint_tree = None
        self.waypoints = None
        self.camera_image = None
        self.lights = []
        self.process_count = 0

        self.bridge = CvBridge()

        self.light_classifier = TLClassifier()
        print("Traffic light classifier created")
        
        self.listener = tf.TransformListener()

        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.state_count = 0
        self.last_processed_wp = -1
        self.last_processed_state = TrafficLight.UNKNOWN
        self.currentFrame = 1

        sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
        sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
        sub4 = rospy.Subscriber('/image_color', Image, self.image_cb)
        
        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)

        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)

        rospy.spin()

    def pose_cb(self, msg):
        self.pose = msg

    def waypoints_cb(self, waypoints):
        self.base_waypoints = waypoints
        if not self.waypoints_2d:
            self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in
                                 waypoints.waypoints]
            self.waypoint_tree = KDTree(self.waypoints_2d)

    def traffic_cb(self, msg):
        self.lights = msg.lights

    def image_cb(self, msg):
        """
        Identifies red lights in the incoming camera image and publishes the index of the waypoint closest to the red
        light's stop line to /traffic_waypoint
        :param msg: image from car-mounted camera
        """
        self.has_image = True
        self.camera_image = msg
        if (self.currentFrame % PROCESS_FRAME_RATE == 1):
            light_wp, state = self.process_traffic_lights()
            self.last_processed_wp = light_wp
            self.last_processed_state = state
        else:
            light_wp = self.last_processed_wp
            state = self.last_processed_state
        self.currentFrame += 1
        rospy.loginfo("TL Detector ----> image processed: waypoint= " + str(light_wp) + ", state=" + str(state))
        '''
        Publish upcoming red lights at camera frequency.
        Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
        of times till we start using it. Otherwise the previous stable state is
        used.
        '''
        if self.state != state:
            self.state_count = 0
            self.state = state
        elif self.state_count >= STATE_COUNT_THRESHOLD:
            self.last_state = self.state
            light_wp = light_wp if state == TrafficLight.RED or state == TrafficLight.YELLOW else -1
            self.last_wp = light_wp
            self.upcoming_red_light_pub.publish(Int32(light_wp))
        else:
            self.upcoming_red_light_pub.publish(Int32(self.last_wp))
        self.state_count += 1

    def process_traffic_lights(self):
        """
        Finds closest visible traffic light, if one exists, and determines its location and color
        :return:
        int: index of waypoint closest to the upcoming stop line for a traffic light (-1 if none exists)
        int: ID of traffic light color (specified in styx_msgs/TrafficLight)
        """
        closest_light = None
        line_wp_idx = -1
        state = TrafficLight.UNKNOWN

        # List of positions that correspond to the line to stop in front of for a given intersection
        stop_line_positions = self.config['stop_line_positions']
        if self.pose:
            car_wp_idx = self.get_closest_waypoint(self.pose.pose.position.x, self.pose.pose.position.y)

            diff = len(self.base_waypoints.waypoints)
            for i, light in enumerate(self.lights):
                # Get stop line waypoint index
                line = stop_line_positions[i]
                temp_wp_idx = self.get_closest_waypoint(line[0], line[1])
                # Find closest stop line waypoint index
                d = temp_wp_idx - car_wp_idx
                if 0 <= d < diff:
                    diff = d
                    closest_light = light
                    line_wp_idx = temp_wp_idx
        if self.pose:
            current_wp=self.get_closest_waypoint(self.pose.pose.position.x, self.pose.pose.position.y)
        else:
            current_wp=-1
        # if we have found a closest light to monitor, then determine the stop line position of this light
        if closest_light and line_wp_idx:
            state = self.get_light_state(closest_light)
            if (self.process_count % LOGGING_THROTTLE_FACTOR) == 0:
                rospy.logwarn("DETECT: line_wp_idx={}, current_wp={}, state={}".format(line_wp_idx, current_wp, self.to_string(state)))
            rospy.logdebug('Closest light idx: {} \t state: {}'.format(line_wp_idx, state))

        return line_wp_idx, state

    def get_closest_waypoint(self, x, y):
        """
        Identifies the closest path waypoint to the given position
        https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        :param x: x coord position to match a waypoint to
        :param y: y coord position to match a waypoint to
        :return: index of the closest waypoint in self.waypoints
        """
        closest_idx = self.waypoint_tree.query([x, y], 1)[1]
        return closest_idx

    def get_light_state(self, light):
        """
        Determines the current color of the traffic light
        :param light: light to classify
        :return: ID of traffic light color (specified in styx_msgs/TrafficLight)
        """
    
        if TEST_MODE_ENABLED:
            classification = light.state
        else:
            cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, 'bgr8')

            # Get classification
            classification = self.light_classifier.get_classification(cv_image)

            # Save image (throttled)
            if SAVE_IMAGES and (self.process_count % LOGGING_THROTTLE_FACTOR == 0):
                save_file = "../../../imgs/{}-{:.0f}.jpeg".format(self.to_string(classification), (time.time() * 100))
                cv2.imwrite(save_file, cv_image)
        print(classification)
        return classification

    def to_string(self, state):
        out = "unknown"
        if state == TrafficLight.GREEN:
            out = "green"
        elif state == TrafficLight.YELLOW:
            out = "yellow"
        elif state == TrafficLight.RED:
            out = "red"
        elif state == TrafficLight.UNKNOWN:
            out = "unknown"
        return out
예제 #11
0
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector')

        self.pose = None
        self.prev_pose = None
        self.waypoints = None
        self.camera_image = None
        self.lights = []
        self.stop_lines = {}

        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)

        self.is_site = rospy.get_param("/launch") == "site"

        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint',
                                                      Int32,
                                                      queue_size=1)

        self.bridge = CvBridge()
        self.listener = tf.TransformListener()
        self.light_classifier = TLClassifier()

        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.state_count = 0

        sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
        '''
        /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
        helps you acquire an accurate ground truth data source for the traffic light
        classifier by sending the current color state of all traffic lights in the
        simulator. When testing on the vehicle, the color state will not be available. You'll need to
        rely on the position of the light and the camera image to predict it.
        '''
        sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray,
                                self.traffic_cb)
        sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)

        rospy.spin()

    def pose_cb(self, msg):
        self.prev_pose = self.pose
        self.pose = msg

    def car_moved(self):
        if self.prev_pose is None and self.pose is not None:
            return True
        if self.prev_pose.pose.position.x != self.pose.pose.position.x:
            return True
        if self.prev_pose.pose.position.y != self.pose.pose.position.y:
            return True
        return False

    def waypoints_cb(self, waypoints):
        self.waypoints = waypoints

        # List of positions that correspond to the line to stop in front of for a given intersection
        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)
        stop_line_positions = self.config['stop_line_positions']

        self.stop_lines = {}
        for stop_line in stop_line_positions:
            stop_line_pose = Pose()
            stop_line_pose.position.x = stop_line[0]
            stop_line_pose.position.y = stop_line[1]
            line_position = self.get_closest_waypoint(stop_line_pose)
            self.stop_lines[line_position] = stop_line_pose

    def traffic_cb(self, msg):
        self.lights = msg.lights

    def image_cb(self, msg):
        """Identifies red lights in the incoming camera image and publishes the index
            of the waypoint closest to the red light's stop line to /traffic_waypoint

        Args:
            msg (Image): image from car-mounted camera

        """
        self.has_image = True
        self.camera_image = msg
        if EXTRACT_SITE_IMAGES:
            cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")
            filename = os.path.abspath(
                "light_classification/site_training_data/4-{}.jpg".format(
                    rospy.Time.now()))
            cv2.imwrite(filename, cv_image)

        light_wp, state = self.process_traffic_lights()
        '''
        Publish upcoming red lights at camera frequency.
        Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
        of times till we start using it. Otherwise the previous stable state is
        used.
        '''
        if self.state != state:
            self.state_count = 0
            self.state = state
        elif self.state_count >= STATE_COUNT_THRESHOLD:
            self.last_state = self.state
            light_wp = light_wp if state == TrafficLight.RED else -1
            self.last_wp = light_wp
            self.upcoming_red_light_pub.publish(Int32(light_wp))
        else:
            self.upcoming_red_light_pub.publish(Int32(self.last_wp))
        self.state_count += 1

    def distance_to_waypoint(self, pose1, pose2):
        veh_x = pose1.x
        veh_y = pose1.y
        wp_x = pose2.x
        wp_y = pose2.y
        dx = veh_x - wp_x
        dy = veh_y - wp_y
        return math.sqrt(dx * dx + dy * dy)

    def get_closest_waypoint(self, pose):
        """Identifies the closest path waypoint to the given position
            https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        Args:
            pose (Pose): position to match a waypoint to

        Returns:
            int: index of the closest waypoint in self.waypoints

        """
        nearest = None
        min_dist = float("inf")
        for index, waypoint in enumerate(self.waypoints.waypoints):
            dist = self.distance_to_waypoint(pose.position,
                                             waypoint.pose.pose.position)
            if dist < min_dist:
                min_dist = dist
                nearest = index

        return nearest

    def project_to_image_plane(self, point_in_world):
        """Project point from 3D world coordinates to 2D camera image location

        Args:
            point_in_world (Point): 3D location of a point in the world

        Returns:
            x (int): x coordinate of target point in image
            y (int): y coordinate of target point in image

        """

        fx = self.config['camera_info']['focal_length_x']
        fy = self.config['camera_info']['focal_length_y']
        image_width = self.config['camera_info']['image_width']
        image_height = self.config['camera_info']['image_height']
        cx = image_width / 2
        cy = image_height / 2

        # get transform between pose of camera and world frame
        trans = None
        try:
            now = rospy.Time.now()
            self.listener.waitForTransform("/world", "/base_link", now,
                                           rospy.Duration(1.0))
            tl_point = PointStamped()
            tl_point.header.frame_id = "/world"
            tl_point.header.stamp = now
            tl_point.point.x = point_in_world.x
            tl_point.point.y = point_in_world.y
            tl_point.point.z = point_in_world.z

            tl_point = self.listener.transformPoint("/base_link", tl_point)

        except (tf.Exception, tf.LookupException,
                tf.ConnectivityException) as e:
            rospy.logerr(
                "Failed to find camera to map transform: {}".format(e))
            return None

        objectPoints = np.array([
            tl_point.point.y / tl_point.point.x,
            tl_point.point.z / tl_point.point.x, 1.0
        ])

        ################################################################################
        # Manually tune focal length and camera coordinate for simulator
        #   for more details about this issue, please reference
        #   https://discussions.udacity.com/t/focal-length-wrong/358568/22
        if fx < 10:
            fx = -2580
            fy = -2730
            cx = 360
            cy = 700
            objectPoints[2]
        ################################################################################

        # TODO This can be a class member
        cameraMatrix = np.array([[fx, 0, 0], [0, fy, 0], [0, 0, 1]])

        imagePoints = cameraMatrix.dot(objectPoints)
        x = int(round(imagePoints[0]) + cx)
        y = int(round(imagePoints[1]) + cy)
        if 10 > x > image_width - 10 or 10 > y > image_height - 10:
            return None
        # rospy.loginfo("objectpoints: x: {}, y: {}, z: {}".format(tl_point.point.x, tl_point.point.y, tl_point.point.z))
        return (x, y)

    def get_light_state(self, light):
        """Determines the current color of the traffic light

        Args:
            light (TrafficLight): light to classify

        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """

        if not light:
            return TrafficLight.UNKNOWN

        if not self.has_image:
            self.prev_light_loc = None
            return False

        crop_size = self.light_classifier.crop_size
        cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")

        if self.is_site:
            # remove hood and upper unused sections
            cropped = cv_image[280:750, :, :]
            cropped = cv2.resize(cropped, (crop_size, crop_size))
        else:
            xy = self.project_to_image_plane(light.pose.pose.position)

            if xy is None:
                cropped = cv2.resize(cv_image, (crop_size, crop_size))
            else:
                x, y = xy
                rospy.loginfo("light image loc: {}, {}".format(x, y))
                image_width = self.config['camera_info']['image_width']
                image_height = self.config['camera_info']['image_height']

                half_crop = int(crop_size / 2)
                if x > image_width - half_crop:
                    x_min = image_width - crop_size
                    x_max = image_width
                elif x < half_crop:
                    x_min = 0
                    x_max = crop_size
                else:
                    x_min = max(0, x - half_crop)
                    x_max = min(x_min + crop_size, image_width)

                if y > image_height - half_crop:
                    y_min = image_height - crop_size
                    y_max = image_height
                elif y < half_crop:
                    y_min = 0
                    y_max = crop_size
                else:
                    y_min = max(0, y - half_crop)
                    y_max = min(y_min + crop_size, image_height)

                cropped = cv_image[y_min:y_max, x_min:x_max]

        pred = self.light_classifier.get_classification(cropped)

        rospy.loginfo("Truth: {}, Pred: {}".format(light.state, pred))

        if light.state != TrafficLight.UNKNOWN and self.car_moved(
        ) and COLLECT_TRAINING_DATA:
            # Ground truth is known, use it to create training data
            filename = os.path.abspath(
                "light_classification/training_data/{}-{}.jpg".format(
                    light.state, rospy.Time.now()))
            rospy.loginfo("Car moved, saving new image: {}".format(filename))
            cv2.imwrite(filename, cropped)

        return pred

    def process_traffic_lights(self):
        """Finds closest visible traffic light, if one exists, and determines its
            location and color

        Returns:
            int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        if not self.waypoints or not self.pose:
            return -1, TrafficLight.UNKNOWN

        light = None

        # car_fwd indicates whether car's moving the same direction as the waypoint index increase
        car_fwd = False
        closest_wp_index = None
        waypoints = self.waypoints.waypoints
        wp_length = len(waypoints)
        if self.pose:
            car_orientation = self.pose.pose.orientation
            quaternion = (car_orientation.x, car_orientation.y,
                          car_orientation.z, car_orientation.w)
            _, _, yaw = tf.transformations.euler_from_quaternion(quaternion)

            car_pose = self.pose.pose
            closest_wp_index = self.get_closest_waypoint(car_pose)
            wp1 = waypoints[closest_wp_index]
            wp2 = waypoints[(closest_wp_index + 1) % wp_length]
            fwd_angle = math.atan2(
                wp2.pose.pose.position.y - wp1.pose.pose.position.y,
                wp2.pose.pose.position.x - wp1.pose.pose.position.x)

            if -math.pi / 2.0 < yaw - fwd_angle < math.pi / 2:
                car_fwd = True

        light, light_wp = None, None

        for i in range(wp_length):
            inc = 1 if car_fwd else -1
            index = (closest_wp_index + i * inc) % wp_length
            if index in self.stop_lines:
                light_wp = index
                stop_line_pose = self.stop_lines[index]
                min_dist = float("inf")
                # Find associated light
                for light_candidate in self.lights:
                    dist = self.distance_to_waypoint(
                        light_candidate.pose.pose.position,
                        stop_line_pose.position)
                    if dist < min_dist:
                        light = light_candidate
                        min_dist = dist
                break

        if light:
            state = TrafficLight.UNKNOWN
            if abs(light_wp - closest_wp_index) < 50:
                state = self.get_light_state(light)
            return light_wp, state
        self.waypoints = None
        return -1, TrafficLight.UNKNOWN
예제 #12
0
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector')

        self.pose = None
        self.base_waypoints = None
        self.camera_image = None
        self.lights = []
        
        self.has_image = False

        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.state_count = 0
        
        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)
        
        model = load_model("./light_classification/model.h5")
        self.light_classifier = TLClassifier(model)

        sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)

        '''
        /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and 
        helps you acquire an accurate ground truth data source for the traffic light
        classifier by sending the current color state of all traffic lights in the
        simulator. When testing on the vehicle, the color state will not be available. You'll need to
        rely on the position of the light and the camera image to predict it.
        '''
        sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
        sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)

        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)

        self.bridge = CvBridge()
        self.listener = tf.TransformListener()

        rospy.spin()

    def pose_cb(self, msg):
        self.pose = msg.pose

    def waypoints_cb(self, data): # is only called once
        self.base_waypoints = data.waypoints

    def traffic_cb(self, msg):  # call-back for cheating, as long as we dont have a classifier
        self.lights = msg.lights

    def image_cb(self, msg):
        """Identifies red lights in the incoming camera image and publishes the index
            of the waypoint closest to the red light's stop line to /traffic_waypoint

        Args:
            msg (Image): image from car-mounted camera

        """
        self.has_image = True
        self.camera_image = msg
        light_wp, state = self.process_traffic_lights()

        '''
        Publish upcoming red lights at camera frequency.
        Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
        of times till we start using it. Otherwise the previous stable state is
        used.
        '''
        if self.state != state:
            self.state_count = 0
            self.state = state
        elif self.state_count >= STATE_COUNT_THRESHOLD:
            self.last_state = self.state
            light_wp = light_wp if state == TrafficLight.RED else -1
            self.last_wp = light_wp
            self.upcoming_red_light_pub.publish(Int32(light_wp))
        else:
            self.upcoming_red_light_pub.publish(Int32(self.last_wp))
        self.state_count += 1

    def get_closest_waypoint(self, pose):
        """Identifies the closest path waypoint to the given position
            https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        Args:
            pose (Pose): position to match a waypoint to

        Returns:
            int: index of the closest waypoint in self.waypoints

        """
        dist = float('inf') # a very large number
        nearest_idx = None
        for idx,waypoint in enumerate(self.base_waypoints):
            temp_dist = self.distance(waypoint, pose)
            #rospy.logwarn(temp_dist)
            if dist > temp_dist:
                dist = temp_dist
                nearest_idx = idx
        return nearest_idx

    def distance(self, waypoint, pose):
        dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2)
        return dl(waypoint.pose.pose.position, pose.position)

    def project_to_image_plane(self, point_in_world):
        """Project point from 3D world coordinates to 2D camera image location

        Args:
            point_in_world (Point): 3D location of a point in the world

        Returns:
            x (int): x coordinate of target point in image
            y (int): y coordinate of target point in image

        """

        fx = self.config['camera_info']['focal_length_x']
        fy = self.config['camera_info']['focal_length_y']
        image_width = self.config['camera_info']['image_width']
        image_height = self.config['camera_info']['image_height']

        # get transform between pose of camera and world frame
        trans = None
        try:
            now = rospy.Time.now()
            self.listener.waitForTransform("/base_link",
                  "/world", now, rospy.Duration(1.0))
            (trans, rot) = self.listener.lookupTransform("/base_link",
                  "/world", now)

        except (tf.Exception, tf.LookupException, tf.ConnectivityException):
            rospy.logerr("Failed to find camera to map transform")

        #TODO Use tranform and rotation to calculate 2D position of light in image

        x = 0
        y = 0

        return (x, y)

    def get_light_state(self, light):
        """Determines the current color of the traffic light

        Args:
            light (TrafficLight): light to classify

        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        if(not self.has_image):
            self.prev_light_loc = None
            return False

        cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")

        #x, y = self.project_to_image_plane(light.pose.pose.position)
        
        #Get classification
        return self.light_classifier.get_classification(cv_image)

    def process_traffic_lights(self):
        """Finds closest visible traffic light, if one exists, and determines its
            location and color

        Returns:
            int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        
        # check if all necessary information is available
        if (self.pose == None or self.base_waypoints == None):
            return -1, TrafficLight.UNKNOWN
        
        light_positions = self.config['stop_line_positions']

        # find the waypoint index that is closest to the car
        car_wp_idx = self.get_closest_waypoint(self.pose)

        # find the closest visible traffic light (if one exists)
        light_wp_idx = None
        light = None # TrafficLight object

        tl_waypoint_indices = self.get_traffic_light_wp_index(light_positions)
        for i, tl_wp_idx in enumerate(tl_waypoint_indices):
            idx_diff = tl_wp_idx - car_wp_idx
            # traffic light is ahead of the car within number of LOOKAHEAD_WPS
            if idx_diff >= 0 and idx_diff <= LOOKAHEAD_WPS:
                # minus LIGHTGAP so that the car stops near the stop line
                light_wp_idx = tl_wp_idx - LIGHTGAP
                light = self.lights[i]
                #rospy.logwarn(light_wp_idx)
                #rospy.logwarn(tl_waypoint_indices)

        if light:
            state = self.get_light_state(light)
            #rospy.logwarn(state)
            return light_wp_idx, state#light.state
        else:
            return -1, TrafficLight.UNKNOWN

    def get_traffic_light_wp_index(self, light_positions):
        indices = []
        for pos in light_positions:
            pose = Pose()
            pose.position.x = pos[0]
            pose.position.y = pos[1]
            indices.append(self.get_closest_waypoint(pose))
        return indices
예제 #13
0
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector')

        self.pose = None
        self.waypoints = None
        self.KDTree = None
        self.camera_image = None
        self.lights = []
        self.stop_lines = None

        rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb, queue_size=1)
        rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb, queue_size=1)

        '''
        /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
        helps you acquire an accurate ground truth data source for the traffic light
        classifier by sending the current color state of all traffic lights in the
        simulator. When testing on the vehicle, the color state will not be available. You'll need to
        rely on the position of the light and the camera image to predict it.
        '''
        #[alexm]NOTE: we should rely on this topic's data except state of the light 
        #[alexm]NOTE: according to this: https://carnd.slack.com/messages/C6NVDVAQ3/convo/C6NVDVAQ3-1504625321.000063/
        rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb, queue_size=1)
        rospy.Subscriber('/image_color', Image, self.image_cb, queue_size=1)
        rospy.Subscriber('/next_wp', Int32, self.next_wp_cb, queue_size=1)
        config_string = rospy.get_param("/traffic_light_config")
        
        self.config = yaml.load(config_string)

        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)

        self.bridge = CvBridge()
        self.light_classifier = TLClassifier()
        self.light_classifier.init()
        
        self.listener = tf.TransformListener()

        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.state_count = 0

        self.next_wp = None
        rospy.spin()

    def next_wp_cb(self, val):
        self.next_wp = val.data

    def pose_cb(self, msg):
        self.pose = msg
            
    def waypoints_cb(self, lane):
        if self.waypoints != lane.waypoints:
            data = []
            for wp in lane.waypoints:
                data.append((wp.pose.pose.position.x, wp.pose.pose.position.y))
            self.KDTree = spatial.KDTree(data)
            self.waypoints = lane.waypoints
            self.stop_lines = None

    def find_stop_line_position(self, light):
        """Finds stop line position from config corresponding to given light 
        Args:
            msg (Image): image from car-mounted camera

        """

        stop_line_positions = self.config['stop_line_positions']
        min_distance = 100000
        result = None
        light_pos = light.pose.pose.position
        for pos in stop_line_positions:
            distance = self.euclidean_distance_2d(pos, light_pos)
            if (distance< min_distance):
                min_distance = distance
                result = pos
        return result

    def traffic_cb(self, msg):
        if not self.stop_lines and self.KDTree:
            stop_lines = []
            for light in msg.lights:
                # find corresponding stop line position from config
                stop_line_pos = self.find_stop_line_position(light)
                # find corresponding waypoint indicex
                closest_index = self.KDTree.query(np.array([stop_line_pos]))[1][0]
                closest_wp = self.waypoints[closest_index]
                if not self.is_ahead(closest_wp.pose.pose, stop_line_pos):
                    closest_index = max(closest_index - 1, 0)
                # add index to list
                stop_lines.append(closest_index)
            # update lights and stop line waypoint indices
            self.lights = msg.lights
            self.stop_lines = stop_lines

    def image_cb(self, msg):
        """Identifies red lights in the incoming camera image and publishes the index
            of the waypoint closest to the red light's stop line to /traffic_waypoint

        Args:
            msg (Image): image from car-mounted camera

        """
        self.has_image = True
        self.camera_image = msg
        light_wp, state = self.process_traffic_lights()

        '''
        Publish upcoming red lights at camera frequency.
        Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
        of times till we start using it. Otherwise the previous stable state is
        used.
        '''
        if self.state != state:
            self.state_count = 0
            self.state = state
        elif self.state_count >= STATE_COUNT_THRESHOLD:
            self.last_state = self.state
            light_wp = light_wp if state in [TrafficLight.RED, TrafficLight.YELLOW] else -1
            self.last_wp = light_wp
            self.upcoming_red_light_pub.publish(Int32(light_wp))
        else:
            self.upcoming_red_light_pub.publish(Int32(self.last_wp))
        self.state_count += 1

    def project_to_image_plane(self, stamped_world_point):
        """Project point from 3D world coordinates to 2D camera image location

        Args:
            point_in_world (Point): 3D location of a point in the world

        Returns:
            x (int): x coordinate of target point in image
            y (int): y coordinate of target point in image

        """

        fx = self.config['camera_info']['focal_length_x']
        fy = self.config['camera_info']['focal_length_y']
        image_width = self.config['camera_info']['image_width']
        image_height = self.config['camera_info']['image_height']

        # get transform between pose of camera and world frame
        stamped_world_point.header.stamp = rospy.Time.now()
        base_point = None

        try:
            now = rospy.Time.now()
            self.listener.waitForTransform("/base_link",
                  "/world", now, rospy.Duration(1.0))
            base_point = self.listener.transformPoint("/base_link", stamped_world_point);

        except (tf.Exception, tf.LookupException, tf.ConnectivityException):
            rospy.logerr("Failed to find camera to map transform")

        if not base_point:
            return (0, 0)

        base_point = base_point.point

        print (base_point)

        cx = image_width/2
        cy = image_height/2
        # Ugly workaround wrong camera settings
        if fx < 10.:
            fx = 2344.
            fy = 2552.
            cy = image_height 
            base_point.z -= 1

        cam_matrix = np.array([[fx,  0, cx],
                               [ 0, fy, cy],
                               [ 0,  0,  1]])
        obj_points = np.array([[- base_point.y, - base_point.z, base_point.x]]).astype(np.float32)
        result, _ = cv2.projectPoints(obj_points, (0,0,0), (0,0,0), cam_matrix, None)

        #print(result)
        cx = image_width/2
        cy = image_height

        x = int(result[0,0,0]) 
        y = int(result[0,0,1])
        print(x, y)

        return (x, y)

    def get_light_state(self, light):
        """Determines the current color of the traffic light

        Args:
            light (TrafficLight): light to classify

        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        if(not self.has_image):
            self.prev_light_loc = None
            return False

        cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")

        #TODO use light location to zoom in on traffic light in image
        # Projection wont work cuz of absent base_link->world transform on site
        
        #tl_point = PointStamped()
        #tl_point.header = light.pose.header
        #tl_point.point = Point()
        #tl_point.point.x = light.pose.pose.position.x
        #tl_point.point.y = light.pose.pose.position.y
        #tl_point.point.z = light.pose.pose.position.z
        #x, y = self.project_to_image_plane(tl_point)

        #Get classification
        state = self.light_classifier.get_classification(cv_image)
        if state == TrafficLight.UNKNOWN and self.last_state:
            state = self.last_state
        return state

    def process_traffic_lights(self):
        """Finds closest visible traffic light, if one exists, and determines its
            location and color

        Returns:
            int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)
        """
        light = None
        light_wp = -1

        if(self.waypoints and self.next_wp and self.stop_lines):
            next_wp = self.waypoints[min(self.next_wp, len(self.waypoints)-1)]
            target_velocity = next_wp.twist.twist.linear.x
            search_distance = target_velocity * target_velocity / 2 / MAX_DECEL
            min_distance = search_distance
            for i in range(len(self.stop_lines)):
                stop_line_wp_index = self.stop_lines[i]
                if stop_line_wp_index >= self.next_wp:
                    stop_line_wp = self.waypoints[stop_line_wp_index]
                    distance = self.euclidean_distance_2d(next_wp.pose.pose.position, stop_line_wp.pose.pose.position)
                    if (distance < min_distance):
                        light_wp = stop_line_wp_index
                        light = self.lights[i]
#        print('n_wp:{}; l_wp:{}'.format(self.next_wp, light_wp))
        if light_wp > -1:
            state = self.get_light_state(light)
            return light_wp, state
        return -1, TrafficLight.UNKNOWN

    def is_ahead(self, origin_pose, test_position):
        """Determines if test position is ahead of origin_pose
        Args:
            origin_pose - geometry_msgs.msg.Pose instance
            test_position - geometry_msgs.msg.Point instance, or tuple (x,y)

        Returns:
            bool: True iif test_position is ahead of origin_pose
        """
        test_x = self.get_x(test_position)
        test_y = self.get_y(test_position)

        orig_posit = origin_pose.position
        orig_orient = origin_pose.orientation
        quaternion = (orig_orient.x, orig_orient.y, orig_orient.z, orig_orient.w)
        _, _, yaw = tf.transformations.euler_from_quaternion(quaternion)
        orig_x = ((test_x - orig_posit.x) * math.cos(yaw) \
                + (test_y - orig_posit.y) * math.sin(yaw))
        return orig_x > 0.

    def euclidean_distance_2d(self, position1, position2):
        """Finds distance between two position1 and position2
        Args:
            position1, position2 - geometry_msgs.msg.Point instance, or tuple (x,y)
        Returns:
            double: distance
        """
        a_x = self.get_x(position1)
        a_y = self.get_y(position1)
        b_x = self.get_x(position2)
        b_y = self.get_y(position2)
        return math.sqrt((a_x - b_x)**2 + (a_y - b_y)**2)

    def get_x(self, pos):
        return pos.x if isinstance(pos, Point) else pos[0] 
    def get_y(self, pos):
        return pos.y if isinstance(pos, Point) else pos[1] 
예제 #14
0
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector')

        self.pose = None
        self.waypoints = None
        self.camera_image = None
        self.lights = []
        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.state_count = 0
        self.light_classifier = None

        self.waypoints_2d = None
        self.waypoints_tree = None 

        rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)

        '''
        /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
        helps you acquire an accurate ground truth data source for the traffic light
        classifier by sending the current color state of all traffic lights in the
        simulator. When testing on the vehicle, the color state will not be available. You'll need to
        rely on the position of the light and the camera image to predict it.
        '''
        rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
        rospy.Subscriber('/image_color', Image, self.image_cb)

        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)

        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)

        self.bridge = CvBridge()
        
        dirname = os.path.dirname(__file__)
        model_name = self.config['model_name']
        model_path = os.path.join(dirname, "light_classification/models/{}".format(model_name))
        label_path = os.path.join(dirname, 'light_classification/models/label_map.pbtxt')
        self.light_classifier = TLClassifier(model_path, label_path)
        self.listener = tf.TransformListener()

        self.loop()

    def loop(self):
        rate = rospy.Rate(50) # 50Hz
        while not rospy.is_shutdown():
            rate.sleep()

    def pose_cb(self, msg):
        self.pose = msg

    def waypoints_cb(self, waypoints):
        self.waypoints = waypoints
        if not self.waypoints_2d:
            self.waypoints_2d = [[e.pose.pose.position.x, e.pose.pose.position.y] for e in waypoints.waypoints]
            self.waypoints_tree = KDTree(self.waypoints_2d)

    def traffic_cb(self, msg):
        self.lights = msg.lights

    def image_cb(self, msg):
        """Identifies red lights in the incoming camera image and publishes the index
            of the waypoint closest to the red light's stop line to /traffic_waypoint

        Args:
            msg (Image): image from car-mounted camera

        """
        self.has_image = True
        self.camera_image = msg
        light_wp, state = self.process_traffic_lights()

        '''
        Publish upcoming red lights at camera frequency.
        Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
        of times till we start using it. Otherwise the previous stable state is
        used.
        '''
        if self.state != state:
            self.state_count = 0
            self.state = state
        elif self.state_count >= STATE_COUNT_THRESHOLD:
            self.last_state = self.state
            light_wp = light_wp if state == TrafficLight.RED else -1
            self.last_wp = light_wp
            self.upcoming_red_light_pub.publish(Int32(light_wp))
        else:
            self.upcoming_red_light_pub.publish(Int32(self.last_wp))
        self.state_count += 1

    def get_closest_waypoint(self, x, y):
        """Identifies the closest path waypoint to the given position
            https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        Args:
            pose (Pose): position to match a waypoint to

        Returns:
            int: index of the closest waypoint in self.waypoints

        """
        closest_idx = self.waypoints_tree.query([x, y], 1)[1]
        return closest_idx

    def get_light_state(self, light):
        """Determines the current color of the traffic light

        Args:
            light (TrafficLight): light to classify

        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        if(not self.has_image or not self.light_classifier):
            self.prev_light_loc = None
            return False

        cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")

        #Get classification
        return self.light_classifier.get_classification(cv_image)

    def process_traffic_lights(self):
        """Finds closest visible traffic light, if one exists, and determines its
            location and color

        Returns:
            int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        closest_light = None
        line_wp_idx = None

        # List of positions that correspond to the line to stop in front of for a given intersection
        stop_line_positions = self.config['stop_line_positions']
        if(self.pose and self.waypoints_tree):
            car_wp_idx = self.get_closest_waypoint(self.pose.pose.position.x, self.pose.pose.position.y)
            
            # find the closest visible traffic light (if one exists)
            diff = len(self.waypoints.waypoints)
            for i, light in enumerate(self.lights):
                # get stop line waypoint index
                line = stop_line_positions[i]
                temp_wp_idx = self.get_closest_waypoint(line[0], line[1])
                # find closest stop line waypoint index
                d = temp_wp_idx - car_wp_idx
                if d >= 0 and d < diff:
                    diff = d
                    closest_light = light
                    line_wp_idx = temp_wp_idx
        
        if closest_light:
            state = self.get_light_state(closest_light)
            return line_wp_idx, state
        return -1, TrafficLight.UNKNOWN
예제 #15
0
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector')

        self.pose = None
        self.waypoints = None
        self.camera_image = None
        self.lights = []

        sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        self.sub2 = rospy.Subscriber('/base_waypoints', Lane,
                                     self.waypoints_cb)
        '''
        /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
        helps you acquire an accurate ground truth data source for the traffic light
        classifier by sending the current color state of all traffic lights in the
        simulator. When testing on the vehicle, the color state will not be available. You'll need to
        rely on the position of the light and the camera image to predict it.
        '''
        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)

        self.bridge = CvBridge()

        self.light_classifier = TLClassifier()
        self.listener = tf.TransformListener()

        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.state_count = 0

        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint',
                                                      Int32,
                                                      queue_size=1)
        sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray,
                                self.traffic_cb)
        sub6 = rospy.Subscriber('/image_color',
                                Image,
                                self.image_cb,
                                queue_size=1)

        rospy.spin()

    def pose_cb(self, msg):
        self.pose = msg.pose

    def waypoints_cb(self, waypoints):
        self.waypoints = waypoints.waypoints
        self.waypoints_array = np.asarray([(w.pose.pose.position.x,
                                            w.pose.pose.position.y)
                                           for w in waypoints.waypoints])
        #rospy.loginfo('waypoints {} = {}'.format(self.waypoints_array.shape, self.waypoints_array))
        if rospy.get_param('/unregister_base_waypoints', False):
            self.sub2.unregister()
            rospy.loginfo('base_waypoints subscriber unregistered')

    def traffic_cb(self, msg):
        self.lights = msg.lights

    def image_cb(self, msg):
        global ProcessingTimeSum, ProcessingIterations
        """Identifies red lights in the incoming camera image and publishes the index
            of the waypoint closest to the red light to /traffic_waypoint

        Args:
            msg (Image): image from car-mounted camera
        """
        if MEASURE_PERFORMANCE:
            startTime = time.time()

        self.has_image = True
        self.camera_image = msg

        light_wp, state = self.process_traffic_lights()
        '''
        Publish upcoming red lights at camera frequency.
        Each predicted state has to occur `~state_count_threshold` number
        of times till we start using it. Otherwise the previous stable state is
        used.
        '''
        if self.state != state:
            self.state_count = 0
            self.state = state
        elif self.state_count >= rospy.get_param('~state_count_threshold', 3):
            self.last_state = self.state
            light_wp = light_wp if state == TrafficLight.RED else -1
            self.last_wp = light_wp
            #rospy.loginfo('llight waypoints %i',light_wp)
            self.upcoming_red_light_pub.publish(Int32(light_wp))
        else:
            self.upcoming_red_light_pub.publish(Int32(self.last_wp))
        self.state_count += 1

    def get_closest_waypoint(self, position_x, position_y):
        """Identifies the closest path waypoint to the given position
            https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        Args:
            pose (Pose): position to match a waypoint to

        Returns:
            int: index of the closest waypoint in self.waypoints
        """
        index = -1
        #Return if way points are empty
        if self.waypoints is None:
            return index

        position = np.asarray([position_x, position_y])
        dist_squared = np.sum((self.waypoints_array - position)**2, axis=1)
        index = np.argmin(dist_squared)
        #rospy.loginfo('tl.detector.get_closest_waypoint({}) found at = {}, distance = {}, time = {}'.format(
        #    position, index, np.sqrt(dist_squared[index]), time.time() - t))

        return index

    def euclidianDistance(self, x1, y1, x2, y2):
        return math.sqrt((x1 - x2)**2 + (y1 - y2)**2)

    def project_to_image_plane(self, point_in_world_x, point_in_world_y,
                               point_in_world_z):
        """Project point from 3D world coordinates to 2D camera image location

        Args:
            point_in_world (Point): 3D location of a point in the world

        Returns:
            x (int): x coordinate of target point in image
            y (int): y coordinate of target point in image

        """
        fx = self.config['camera_info']['focal_length_x']
        fy = self.config['camera_info']['focal_length_y']
        image_width = self.config['camera_info']['image_width']
        image_height = self.config['camera_info']['image_height']

        # get transform between pose of camera and world frame
        trans = None
        rot = None
        try:
            now = rospy.Time.now()
            self.listener.waitForTransform("/base_link", "/world", now,
                                           rospy.Duration(1.0))
            (trans,
             rot) = self.listener.lookupTransform("/base_link", "/world", now)

        except (tf.Exception, tf.LookupException, tf.ConnectivityException):
            rospy.logerr("Failed to find camera to map transform")

        if trans == None or rot == None:
            return False, -1, -1

        #Use tranform and rotation to calculate 2D position of light in image

        point_in_world_z = 1  # Does not work with correct z value
        world_point = np.array(
            [point_in_world_x, point_in_world_y,
             point_in_world_z]).reshape(1, 3, 1)

        camera_mat = np.matrix([[fx, 0, image_width / 2],
                                [0, fy, image_height / 2], [0, 0, 1]])
        distCoeff = None

        rot_vec, _ = cv2.Rodrigues(self.QuaterniontoRotationMatrix(
            rot))  # 4x1 -> quaternion to rotation matrix at z-axis
        ret, _ = cv2.projectPoints(world_point, rot_vec,
                                   np.array(trans).reshape(3, 1), camera_mat,
                                   distCoeff)

        #Unpack values and return
        ret = ret.reshape(2, )
        #For some reason u & v are swapped
        u = int(round(ret[1]))
        v = int(round(ret[0]))

        traffic_light_visible = False
        if u >= 0 and u < image_width and v >= 0 and v <= image_height:
            traffic_light_visible = True

        return traffic_light_visible, u, v

    def QuaterniontoRotationMatrix(self, q):
        '''Calculates the Rotation Matrix from Quaternion
        s is the real part
        x, y, z  are the complex elements'''
        #https://www.uni-koblenz.de/~cg/veranst/ws0001/sem/Bartz.pdf Chap. 1.2.6

        x, y, z, s = q

        #Precalculate repeatedly used values
        x2 = x**2
        xy = x * y
        xz = x * z

        y2 = y**2
        yz = y * z

        z2 = z**2

        sz = s * z
        sy = s * y
        sx = s * x

        #Calculate rotation matrix
        R11 = 1 - 2.0 * (y2 + z2)
        R12 = 2.0 * (xy - sz)
        R13 = 2.0 * (xz + sy)

        R21 = 2.0 * (xy + sz)
        R22 = 1 - 2.0 * (x2 + z2)
        R23 = 2.0 * (yz - sx)

        R31 = 2.0 * (xz - sy)
        R32 = 2.0 * (yz + sx)
        R33 = 1 - 2.0 * (x2 + y2)

        return np.matrix([[R11, R12, R13], [R21, R22, R23], [R31, R32, R33]])

    def get_light_state(self, light_pos_x, light_pos_y, light_pos_z):
        """Determines the current color of the traffic light

        Args:
            light (TrafficLight): light to classify

        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        if (not self.has_image):
            self.prev_light_loc = None
            return False

        cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")

        visible, x, y = self.project_to_image_plane(light_pos_x, light_pos_y,
                                                    light_pos_z)

        #Show car image
        if DISPLAY_CAMERA:
            image_tmp = np.copy(cv_image)
            #Draw a circle
            cv2.circle(image_tmp, (x, y), 20, (255, 0, 0), thickness=2)
            cv2.imshow('image', image_tmp)
            cv2.waitKey(1)

        state = TrafficLight.UNKNOWN
        if visible:
            #TODO use light location to zoom in on traffic light in image

            #Get classification
            state = self.light_classifier.get_classification(cv_image)

        return state

    def get_nearest_traffic_light(self, waypoint_start_index):
        traffic_light = None
        traffic_light_positions = self.config['light_positions']
        #traffic_light_positions = self.config['manual_light_positions']
        last_index = sys.maxsize

        #TODO: Only one complete circle, no minimum distance considered, yet
        for i in range(0, len(traffic_light_positions)):
            index = self.get_closest_waypoint(
                float(traffic_light_positions[i][0]),
                float(traffic_light_positions[i][1]))
            if index >= waypoint_start_index and index < last_index:
                last_index = index
                traffic_light = traffic_light_positions[i]

        return traffic_light, last_index

    def process_traffic_lights(self):
        """Finds closest visible traffic light, if one exists, and determines its
            location and color

        Returns:
            int: index of waypoint closes to the upcoming traffic light (-1 if none exists)
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        if (self.pose):
            #find the closest visible traffic light (if one exists)
            car_position = self.get_closest_waypoint(self.pose.position.x,
                                                     self.pose.position.y)
            #rospy.loginfo("wp_updater tl_detector: car wp %i", car_position)

            if car_position > 0:
                light_pos, light_waypoint = self.get_nearest_traffic_light(
                    car_position)
                #rospy.loginfo("wp_updater tl_detector: light wp %i (%i)", light_waypoint, len(self.waypoints))

                if light_pos:
                    #rospy.loginfo("Next traffic light ahead from waypoint " + str(car_position) +
                    #              " is at position " + str(light_pos) + " at waypoint " + str(light_waypoint))
                    state = TrafficLight.UNKNOWN
                    if rospy.get_param('~use_classifier', False):
                        state = self.get_light_state(
                            light_pos[0], light_pos[1],
                            light_pos[2] if len(light_pos) >= 3 else 0.)
                    else:
                        for light in self.lights:
                            ''' If position of the light from the yaml file and one roperted via
                                /vehicle/traffic_lights differs only within 30 m consider them as same '''
                            if self.euclidianDistance(
                                    light.pose.pose.position.x,
                                    light.pose.pose.position.y, light_pos[0],
                                    light_pos[1]) < 30:
                                #state = self.get_light_state(light.pose.pose.position.x, light.pose.pose.position.y, light.pose.pose.position.z)
                                state = light.state

                    return light_waypoint, state

        return -1, TrafficLight.UNKNOWN
예제 #16
0
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector')

        self.pose = None
        self.waypoints = None
        self.waypoints_2d = None
        self.waypoint_tree = None
        self.camera_image = None
        self.lights = []
        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.state_count = 0
        self.has_image = False
        self.process_count = 0
        self.light_classifier = TLClassifier(rospy.get_param('~model'))
        sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
        '''
        /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
        helps you acquire an accurate ground truth data source for the traffic light
        classifier by sending the current color state of all traffic lights in the
        simulator. When testing on the vehicle, the color state will not be available. You'll need to
        rely on the position of the light and the camera image to predict it.
        '''
        sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray,
                                self.traffic_cb)
        sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)

        config_string = rospy.get_param(
            "/traffic_light_config"
        )  # Simulator_mode parameter (1== ON, 0==OFF)
        self.config = yaml.load(config_string)

        # Publish the index of the waypoint where we have to stop
        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint',
                                                      Int32,
                                                      queue_size=1)

        self.bridge = CvBridge()

        self.listener = tf.TransformListener()
        self.loop()
        #rospy.spin()

    def loop(self):
        # Set loop rate at 10Hz
        rate = rospy.Rate(10)
        # Run until node is shutted down
        while not rospy.is_shutdown():
            if self.pose and self.waypoints and self.camera_image:
                # Process current image from camera
                light_wp, state = self.process_traffic_lights()
                '''
                Publish upcoming red lights at camera frequency.
                Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
                of times till we start using it. Otherwise the previous stable state is
                used.
                '''
                #rospy.logwarn("self state :{} state:{} state_count:{}".format(self.state, state, self.state_count))

                if self.state != state:
                    self.state_count = 0
                    self.state = state
                elif self.state_count >= STATE_COUNT_THRESHOLD:
                    self.last_state = self.state
                    light_wp = light_wp if state == TrafficLight.RED else -1
                    self.last_wp = light_wp
                    self.upcoming_red_light_pub.publish(Int32(light_wp))
                else:
                    self.upcoming_red_light_pub.publish(Int32(self.last_wp))
                self.state_count += 1

            rate.sleep()

    def pose_cb(self, msg):
        self.pose = msg

    def waypoints_cb(self, waypoints):
        self.waypoints = waypoints
        # Setup the Kd Tree which has log(n) complexity
        if not self.waypoints_2d:
            self.waypoints_2d = [[
                waypoint.pose.pose.position.x, waypoint.pose.pose.position.y
            ] for waypoint in waypoints.waypoints]

            self.waypoint_tree = KDTree(self.waypoints_2d)
        #rospy.logwarn("Updating TL detector Waypoints")

    def traffic_cb(self, msg):
        self.lights = msg.lights

    def image_cb(self, msg):
        """Identifies red lights in the incoming camera image and publishes the index
            of the waypoint closest to the red light's stop line to /traffic_waypoint
        Args:
            msg (Image): image from car-mounted camera
        """
        self.has_image = True
        self.camera_image = msg

    def get_closest_waypoint(self, x, y):
        """Identifies the closest path waypoint to the given position
            https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        Args:
            pose (Pose): position to match a waypoint to
        Returns:
            int: index of the closest waypoint in self.waypoints
        """
        # TODO implement
        closest_idx = self.waypoint_tree.query([x, y], 1)[1]
        return closest_idx

    def get_light_state(self, light):
        """Determines the current color of the traffic light
        Args:
            light (TrafficLight): light to classify
        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)
        """
        # For test mode, just return the light state
        if DEBUG_CODE:
            classification = light.state
        else:
            cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")
            # Get classification
            classification = self.light_classifier.get_classification(cv_image)
        return classification

    def process_traffic_lights(self):
        closest_light = None
        line_wp_idx = None
        state = TrafficLight.UNKNOWN
        # List of positions that correspond to the line to stop in front of for a given intersection
        stop_line_positions = self.config['stop_line_positions']

        if (self.pose):  # and self.waypoint_tree):
            car_wp_idx = self.get_closest_waypoint(self.pose.pose.position.x,
                                                   self.pose.pose.position.y)

            # TODO find the closest visible traffic light (if one exists)
            diff = len(self.waypoints.waypoints)
            for i, light in enumerate(self.lights):
                # Get stop line waypoint index
                line = stop_line_positions[i]
                temp_wp_idx = self.get_closest_waypoint(line[0], line[1])

                # Find the closest stop line waypoint index
                d = temp_wp_idx - car_wp_idx
                if d >= 0 and d < diff:
                    diff = d
                    closest_light = light
                    line_wp_idx = temp_wp_idx

        if closest_light:
            #self.process_count += 1
            state = self.get_light_state(closest_light)
            #if (self.process_count % 5) == 0:
            #    rospy.logwarn("DETECT: line_wp_idx={}, state={}".format(line_wp_idx, self.to_string(state)))

            return line_wp_idx, state
        self.waypoints = None
        return -1, TrafficLight.UNKNOWN

    def to_string(self, state):
        out = "unknown"
        if state == TrafficLight.GREEN:
            out = "green"
        elif state == TrafficLight.YELLOW:
            out = "yellow"
        elif state == TrafficLight.RED:
            out = "red"
        return out
예제 #17
0
class AsyncPipeline:
    """
    Incapsulates traffic light detector and classifier. Performs detection/classification in a dedicated thread.
    After thread is started some iterations of classification is spent on graphic card warm up, after which a
    message is published to /traffic_classifier_ready topic.
    Methods and variables that can be accessed from different threads have "sh" prefix (shared)
    """
    def __init__(self):
        rospy.loginfo("Initializing traffic light classifier...")
        self.object_detector = ObjectDetector()
        self.classifier = TLClassifier()
        rospy.loginfo("Traffic light classifier is initialized.")

        # this top indicates whether we are ready for detection/classification.
        self.traffic_classifier_ready_pub = rospy.Publisher(
            "/traffic_classifier_ready", Bool, queue_size=1, latch=True)

        # locks for getting/setting image and state values
        self.image_lock = threading.Lock()
        self.state_lock = threading.Lock()

        # fields that are shared between threads
        # should be accessed only via getters/setters which are using locks

        # last camera image
        self.sh_image = None
        # last acknowledged traffic light state (classified enough times in a row, to make sure it is not noise)
        self.sh_acknowledged_state = None

        # result of last classification (may differ from acknowledged state)
        self.sh_last_classified_state = None
        # time of last classification
        self.sh_last_classification_time = 0

    def start_thread(self):
        """
        Starts main classification thread.
        :return:
        """
        thread = threading.Thread(target=self.classification_thread_fn)
        thread.start()

    def sh_set_image(self, image):
        """
        Sets camera image. Thread safe.
        :param image: camera image message
        """
        self.image_lock.acquire()
        self.sh_image = image
        self.image_lock.release()

    def sh_get_image(self):
        """
        Gets camera image thread safe.
        :return: camera image.
        """
        self.image_lock.acquire()
        image = self.sh_image
        self.image_lock.release()
        return image

    def sh_get_state_info(self):
        """
        Returns results of classification. Thread safe.
        :return: acknowledged classification result, last classification result, last classification time
        """
        self.state_lock.acquire()
        acknowledged_state = self.sh_acknowledged_state
        last_classified_state = self.sh_last_classified_state
        last_classification_time = self.sh_last_classification_time
        self.state_lock.release()

        return acknowledged_state, last_classified_state, last_classification_time

    def sh_set_classification_info(self, last_classified_state,
                                   last_classification_time):
        """
        Sets last classification information. Thread safe.
        :param last_classified_state: classification result
        :param last_classification_time: classification time
        """
        self.state_lock.acquire()
        self.sh_last_classified_state = last_classified_state
        self.sh_last_classification_time = last_classification_time
        self.state_lock.release()

    def sh_set_state_info(self, acknowledged_state, last_classification_time):
        """
        Sets state information
        :param acknowledged_state: acknowledged classification state
        :param last_classification_time: last classification time
        """
        self.state_lock.acquire()
        self.sh_acknowledged_state = acknowledged_state
        self.sh_last_classified_state = acknowledged_state
        self.sh_last_classification_time = last_classification_time
        self.state_lock.release()

    def classification_thread_fn(self):
        """
        Performs classification in a dedicated thread.
        """
        # last image that classification was done on
        last_image = None
        # last classification result
        last_state = None
        # number of times last classification has the same result in a row
        last_state_count = 0
        # overall count of classifications
        overall_count = 0

        while not rospy.is_shutdown():
            # 1. Get camera image
            image = self.sh_get_image()

            # 2. In case image was changed
            if last_image != image:

                # 3. In case warming up is done send message that the classifier is ready
                if overall_count == WARMUP_NUM:
                    self.traffic_classifier_ready_pub.publish(Bool(True))
                    rospy.loginfo("Warming up is finished")
                elif overall_count < WARMUP_NUM:
                    rospy.loginfo("Warming up")

                # 4. Do classification
                start_time = time.time()
                state = self.get_light_state(image, overall_count)
                classification_time = time.time() - start_time

                if last_state != state:  # in case state is changed, remember it and reset the counter
                    last_state_count = 0
                    last_state = state
                    self.sh_set_classification_info(state, classification_time)
                elif last_state_count >= STATE_COUNT_THRESHOLD:  # in case state persists long enough change it
                    self.sh_set_state_info(last_state, classification_time)
                else:  # otherwise just save last classificaton info for debugging
                    self.sh_set_classification_info(state, classification_time)

                last_state_count += 1  # increment how many times in a row we got the same state
                overall_count += 1  # increment how many times we done classification
                last_image = image  # memorize last image

    def get_light_state(self, image, counter):
        """
        Performs detection and then classification.
        :param image: ROS image
        :return: TrafficLight.state
        """
        # 1. do detection
        image_np = np.fromstring(image.data, dtype=np.uint8).reshape(
            (image.height, image.width, 3))
        # site images are in bgr, convert if needed
        if image.encoding == "bgr8":
            image_np = np.flip(image_np, 2)

        boxes = self.object_detector.detect_traffic_lights(image_np)

        # 2. create list of detected subimages
        patches = []
        for top, left, bottom, right in boxes:
            patches.append(image_np[top:bottom, left:right])

        # 3. do classification
        predicted_state, classifications = self.classifier.get_classification(
            patches)

        #self.draw_boxes(image_np, boxes, classifications, counter)

        return predicted_state

    def draw_boxes(self, image, boxes, classifications, counter, thickness=4):
        """Draw bounding boxes on the image"""
        colors = {
            0: (255, 0, 0),
            1: (255, 255, 0),
            2: (0, 255, 0),
            3: (255, 255, 255)
        }

        image = Image.fromarray(image)
        draw = ImageDraw.Draw(image)
        for i in range(len(boxes)):
            bot, left, top, right = boxes[i, ...]
            class_id = int(classifications[i])
            color = colors[class_id]
            draw.line([(left, top), (left, bot), (right, bot), (right, top),
                       (left, top)],
                      width=thickness,
                      fill=color)

        image.save("./tmp/image_{:04d}.png".format(counter))
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector')

        self.pose_stamped = None
        self.waypoints_stamped = None
        self.camera_image = None
        self.lights = None
        self.has_image = False
        self.light_classifier = TLClassifier()
        self.prev_light_loc = None
        self.waypoint_tree = None
        self.waypoints_2d = None

        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.state_count = 0

        self.lights_wp = []
        self.stoplines_wp = []

        self.simulated_detection = rospy.get_param('~simulated_detection', 1)

        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)

        # Classifier Setup
        rospy.loginfo("Loading TLClassifier model...")
        self.light_classifier = TLClassifier()
        model = load_model(self.config['tl']['tl_classification_model'])
        resize_width = self.config['tl']['classifier_resize_width']
        resize_height = self.config['tl']['classifier_resize_height']
        self.light_classifier.setup_classifier(model, resize_width,
                                               resize_height)
        self.invalid_class_number = 3

        # Detector setup
        rospy.loginfo("Loading TLDetector model...")
        custom_objects = {
            'dice_coef_loss': dice_coef_loss,
            'dice_coef': dice_coef
        }
        self.detector_model = load_model(
            self.config['tl']['tl_detection_model'],
            custom_objects=custom_objects)
        self.detector_model._make_predict_function()
        self.resize_width = self.config['tl']['detector_resize_width']
        self.resize_height = self.config['tl']['detector_resize_height']
        self.resize_height_ratio = self.config['camera_info'][
            'image_height'] / float(self.resize_height)
        self.resize_width_ratio = self.config['camera_info'][
            'image_width'] / float(self.resize_width)
        self.middle_col = self.resize_width / 2
        self.is_carla = self.config['tl']['is_carla']
        self.projection_threshold = self.config['tl']['projection_threshold']
        self.projection_min = self.config['tl']['projection_min']
        self.color_mode = self.config['tl']['color_mode']
        rospy.loginfo("[TL_DETECTOR] Loaded models from disk")

        sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
        '''
        /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
        helps you acquire an accurate ground truth data source for the traffic light
        classifier by sending the current color state of all traffic lights in the
        simulator. When testing on the vehicle, the color state will not be available. You'll need to
        rely on the position of the light and the camera image to predict it.
        '''
        sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray,
                                self.traffic_cb)
        sub6 = rospy.Subscriber('/image_color',
                                Image,
                                self.image_cb,
                                queue_size=1)

        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint',
                                                      Int32,
                                                      queue_size=1)

        self.bridge = CvBridge()

        rospy.spin()

## Callback functions
##==============================================================================
## Current Car pose

    def pose_cb(self, msg):
        self.pose_stamped = msg

## base waypoints.

    def waypoints_cb(self, waypoints):

        self.waypoints_stamped = waypoints

        if not self.waypoints_2d:
            self.waypoints_2d = [[
                waypoint.pose.pose.position.x, waypoint.pose.pose.position.y
            ] for waypoint in waypoints.waypoints]
            self.waypoint_tree = KDTree(self.waypoints_2d)

        for i in range(len(self.waypoints_stamped.waypoints)):
            self.waypoints_stamped.waypoints[
                i].pose.header.frame_id = self.waypoints_stamped.header.frame_id
        self.calculate_traffic_light_waypoints()

## useful for simulater and cala, the color state will not be available on Cala

    def traffic_cb(self, msg):

        if self.simulated_detection > 0:
            self.lights = msg.lights
            self.calculate_traffic_light_waypoints()

            light_wp, state = self.process_traffic_lights()
            self.publish_upcoming_red_light(light_wp, state)
        else:
            if self.lights is not None:
                return

            self.lights = msg.lights
            self.calculate_traffic_light_waypoints()

## Camera image

    def image_cb(self, msg):
        """Identifies red lights in the incoming camera image and publishes the index
            of the waypoint closest to the red light's stop line to /traffic_waypoint
        Args:
            msg (Image): image from car-mounted camera
        """

        self.has_image = True
        self.camera_image = msg
        light_wp, state = self.process_traffic_lights()
        '''
        Publish upcoming red lights at camera frequency.
        '''
        self.publish_upcoming_red_light(light_wp, state)


## methods
##==============================================================================

    def publish_upcoming_red_light(self, light_wp, state):
        """Publishes the index of the waypoint closest to the red light's
            stop line to /traffic_waypoint
        Args:
            light_wp: waypoint of the closest traffic light
            state: state of the closest traffic light
        """
        '''
        Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
        of times till we start using it. Otherwise the previous stable state is
        used.
        '''
        if self.state != state:
            self.state_count = 0
            self.state = state
        elif self.state_count >= STATE_COUNT_THRESHOLD:
            self.last_state = self.state
            light_wp = light_wp if state == TrafficLight.RED else -1
            self.last_wp = light_wp
            self.upcoming_red_light_pub.publish(Int32(light_wp))
        else:
            self.upcoming_red_light_pub.publish(Int32(self.last_wp))
        self.state_count += 1

    def process_traffic_lights(self):
        """Finds closest visible traffic light, if one exists, and determines its
            location and color
        Returns:
            int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)
        """
        if self.pose_stamped is None or len(self.stoplines_wp) == 0:
            rospy.loginfo("[TL_DETECTOR] No TL is detected. None")
            return -1, TrafficLight.UNKNOWN

        # find the closest visible traffic light (if one exists)
        closest_light = None
        line_wp_idx = None

        # List of positions that correspond to the line to stop in front of for a given intersection
        stop_line_positions = self.config['stop_line_positions']
        if (self.pose_stamped):
            car_wp_idx = self.get_closest_waypoint(
                self.pose_stamped.pose.position.x,
                self.pose_stamped.pose.position.y)

            #TODO find the closest visible traffic light (if one exists)
            diff = len(self.waypoints_stamped.waypoints)
            for i, light in enumerate(
                    self.lights
            ):  # the traffic light's information (position, number:8)
                line = stop_line_positions[i]

                temp_wp_idx = self.get_closest_waypoint(line[0], line[1])
                d = temp_wp_idx - car_wp_idx
                if d >= 0 and d < diff:
                    diff = d
                    closest_light = light
                    line_wp_idx = temp_wp_idx

        if closest_light:
            state = self.get_light_state(closest_light)
            return line_wp_idx, state

        rospy.logwarn("[TL_DETECTOR] No TL is detected. None")
        return -1, TrafficLight.UNKNOWN

    def get_light_state(self, light):
        """Determines the current color of the traffic light
        Args:
            light (TrafficLight): light to classify
        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)
        """
        labels = list(enumerate(['Red', 'Yellow', 'Green', 'None', 'None']))
        if self.simulated_detection > 0:
            if self.lights is None or light >= len(self.lights):
                rospy.loginfo(
                    "[TL_DETECTOR] simulated_detection: No TL is detected. None"
                )
                return TrafficLight.UNKNOWN
            state = self.lights[light].state
            rospy.loginfo(
                "[TL_DETECTOR] simulated_detection: Nearest TL-state is: %s",
                labels[state][1])
            return state

        if not self.has_image:
            self.prev_light_loc = None
            rospy.loginfo(
                "[TL_DETECTOR] has_image is None: No TL is detected. None")
            return TrafficLight.UNKNOWN

        cv_image = self.bridge.imgmsg_to_cv2(self.camera_image,
                                             self.color_mode)
        tl_image = self.detect_traffic_light(cv_image)
        if tl_image is not None:
            state = self.light_classifier.get_classification(tl_image)
            state = state if (
                state != self.invalid_class_number) else TrafficLight.UNKNOWN
            rospy.loginfo("[TL_DETECTOR] Nearest TL-state is: %s",
                          labels[state][1])
            return state
        else:
            rospy.loginfo(
                "[TL_DETECTOR] tl_image is None: No TL is detected. None")
            return TrafficLight.UNKNOWN

    def get_closest_waypoint(self, x, y):
        """Identifies the closest path waypoint to the given position
            https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        Args:
            pose (Pose): position to match a waypoint to

        Returns:
            int: index of the closest waypoint in self.waypoints

        """
        #TODO implement
        closest_idx = self.waypoint_tree.query([x, y], 1)[1]
        return closest_idx

    def extract_image(self, pred_image_mask, image):
        # rospy.loginfo("[TL_DETECTOR] Detecting TL...extract_image()")
        if np.max(pred_image_mask) < self.projection_min:
            return None

        row_projection = np.sum(pred_image_mask, axis=1)
        row_index = np.argmax(row_projection)

        if np.max(row_projection) < self.projection_threshold:
            return None

        zero_row_indexes = np.argwhere(
            row_projection <= self.projection_threshold)
        top_part = zero_row_indexes[zero_row_indexes < row_index]
        top = np.max(top_part) if top_part.size > 0 else 0
        bottom_part = zero_row_indexes[zero_row_indexes > row_index]
        bottom = np.min(
            bottom_part) if bottom_part.size > 0 else self.resize_height

        roi = pred_image_mask[top:bottom, :]
        column_projection = np.sum(roi, axis=0)

        if np.max(column_projection) < self.projection_min:
            return None

        non_zero_column_index = np.argwhere(
            column_projection > self.projection_min)

        index_of_column_index = np.argmin(
            np.abs(non_zero_column_index - self.middle_col))
        column_index = non_zero_column_index[index_of_column_index][0]

        zero_column_indexes = np.argwhere(column_projection == 0)
        left_side = zero_column_indexes[zero_column_indexes < column_index]
        left = np.max(left_side) if left_side.size > 0 else 0
        right_side = zero_column_indexes[zero_column_indexes > column_index]
        right = np.min(
            right_side) if right_side.size > 0 else self.resize_width
        return image[
            int(top * self.resize_height_ratio):int(bottom *
                                                    self.resize_height_ratio),
            int(left * self.resize_width_ratio):int(right *
                                                    self.resize_width_ratio)]

    def detect_traffic_light(self, cv_image):
        # rospy.loginfo("[TL_DETECTOR] Detecting TL...detect_traffic_light()")
        resize_image = cv2.resize(cv_image,
                                  (self.resize_width, self.resize_height))
        resize_image = cv2.cvtColor(resize_image, cv2.COLOR_RGB2GRAY)
        resize_image = resize_image[..., np.newaxis]
        if self.is_carla:
            mean = np.mean(resize_image)  # mean for data centering
            std = np.std(resize_image)  # std for data normalization

            resize_image -= mean
            resize_image /= std

        image_mask = self.detector_model.predict(resize_image[None, :, :, :],
                                                 batch_size=1)[0]
        image_mask = (image_mask[:, :, 0] * 255).astype(np.uint8)
        return self.extract_image(image_mask, cv_image)

    def distance2(self, pose1, pose2):
        """Calculate the square of the Eucleadian distance bentween the two poses given
        Args:
            pose1: given Pose
            pose2: given Pose
        Returns:
            float: square of the Eucleadian distance bentween the two poses given
        """
        dist2 = (pose1.position.x - pose2.position.x)**2 + (
            pose1.position.y - pose2.position.y)**2
        return dist2

    def get_closest_stopline_pose(self, pose):
        """Finds closest stopline to the given Pose
        Args:
            pose: given Pose
        Returns:
            Pose: a Pose object whose position is that of the closest stopline
        """
        stop_line_positions = self.config['stop_line_positions']

        dist_min = sys.maxsize
        stop_line_min = None

        for stop_line_position in stop_line_positions:
            stop_line_pose = Pose()
            stop_line_pose.position.x = stop_line_position[0]
            stop_line_pose.position.y = stop_line_position[1]
            stop_line_pose.position.z = 0.0

            dist = self.distance2(pose, stop_line_pose)

            if dist < dist_min:
                dist_min = dist
                stop_line_min = stop_line_pose

        return stop_line_min

    def calculate_traffic_light_waypoints(self):
        """Populate traffic light waypoints and stopline waypoints arrays if they are not already populated
            self.lights_wp contains the closest waypoints to corresponding trafic lights in self.lights
            self.stoplines_wp contains the waypoints of stoplines corrsponding to trafic lights in self.lights
        """
        if self.waypoints_stamped is not None and self.lights is not None and len(
                self.lights_wp) == 0:
            for i in range(len(self.lights)):
                stopline = self.get_closest_stopline_pose(
                    self.lights[i].pose.pose)
                self.stoplines_wp.append(
                    self.get_closest_waypoint(stopline.position.x,
                                              stopline.position.y))
                self.lights_wp.append(
                    self.get_closest_waypoint(
                        self.lights[i].pose.pose.position.x,
                        self.lights[i].pose.pose.position.y))
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector', log_level=rospy.DEBUG)

        self.pose = None
        self.waypoints = None 
        self.camera_image = None
        self.lights = []
        self.stop_lines=[]

        sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)

        '''
        /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
        helps you acquire an accurate ground truth data source for the traffic light
        classifier by sending the current color state of all traffic lights in the
        simulator. When testing on the vehicle, the color state will not be available. You'll need to
        rely on the position of the light and the camera image to predict it.
        '''
        sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
        sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)

        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)

        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)

        self.bridge = CvBridge()
        self.light_classifier = TLClassifier()
        self.listener = tf.TransformListener()

        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.state_count = 0

        rospy.logdebug("initialized....")

        rospy.spin()

    def pose_cb(self, msg):
        self.pose = msg

    def waypoints_cb(self, waypoints):
        rospy.logdebug("waypoints received....")
        self.waypoints = waypoints

    def traffic_cb(self, msg):
        self.lights = msg.lights

    def image_cb(self, msg):
        """Identifies red lights in the incoming camera image and publishes the index
            of the waypoint closest to the red light's stop line to /traffic_waypoint

        Args:
            msg (Image): image from car-mounted camera

        """

        self.has_image = True
        self.camera_image = msg
        
        start = timeit.default_timer()
        light_wp, state = self.process_traffic_lights()
        end = timeit.default_timer()
        '''
        Publish upcoming red lights at camera frequency.
        Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
        of times till we start using it. Otherwise the previous stable state is
        used.
        '''
        if self.state != state:
            self.state_count = 0
            self.state = state
        elif self.state_count >= STATE_COUNT_THRESHOLD:
            self.last_state = self.state
            light_wp = light_wp if (state == TrafficLight.RED or state == TrafficLight.YELLOW) else -1
            self.last_wp = light_wp
            self.upcoming_red_light_pub.publish(Int32(light_wp))
        else:
            self.upcoming_red_light_pub.publish(Int32(self.last_wp))
        self.state_count += 1
		
    def get_coordinates_vector(self,position):
        return np.asarray([position.x, position.y, position.z])

    def get_closest_waypoint(self, pose):
        """Identifies the closest path waypoint to the given position
            https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        Args:
            pose (Pose): position to match a waypoint to

        Returns:
            int: index of the closest waypoint in self.waypoints

        """
        if self.waypoints is not None:
            node = self.get_coordinates_vector(pose.position)
            waypoints = np.asarray([[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y, waypoint.pose.pose.position.z] for waypoint in
                                    self.waypoints.waypoints])
            nearest_index = distance.cdist([node], waypoints).argmin()
        else:
            rospy.logdebug("no  waypoints")

        return nearest_index

    def get_light_state(self, light):
        """Determines the current color of the traffic light

        Args:
            light (TrafficLight): light to classify

        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        if(not self.has_image):
            self.prev_light_loc = None
            rospy.logdebug("returning false")
            return False
        cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "rgb8")
        #Get classification
        return self.light_classifier.get_classification(cv_image)

    def process_traffic_lights(self):
        """Finds closest visible traffic light, if one exists, and determines its
            location and color

        Returns:
            int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        light = None
        light_wp = -1
        state = TrafficLight.UNKNOWN

        # List of positions that correspond to the line to stop in front of for a given intersection
        stop_line_positions = self.config['stop_line_positions']
	
        closest_waypoint_fn = self.get_closest_stoplight_waypoint
        closest_waypoints =  [closest_waypoint_fn(stop_line_position) for stop_line_position in stop_line_positions]

        self.stop_lines.extend(closest_waypoints)

        # Find the nearest waypoint
        #print("self pose ",self.pose)
        min_idx_dist = 150
        if(self.pose):
            car_position = self.get_closest_waypoint(self.pose.pose)
            for i, stop_line_idx in enumerate(self.stop_lines):
                idx_dist = stop_line_idx - car_position

                if 0 < idx_dist < min_idx_dist:
                    light = i
                    min_idx_dist = idx_dist
                    light_wp = stop_line_idx
                    break
                else:
                    light=None

            #light=1

        #TODO find the closest visible traffic light (if one exists)
        if light is not None:
            state = self.get_light_state(light)
            return light_wp, state
        #self.waypoints = None
        return -1, TrafficLight.UNKNOWN
    
    def get_closest_stoplight_waypoint(self, stop_line_position):
        pose = Pose()
        pose.position.x = stop_line_position[0]
        pose.position.y = stop_line_position[1]
        pose.position.z = 0

        closest_waypoint = self.get_closest_waypoint(pose)
        return closest_waypoint
예제 #20
0
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector')

        self.pose = None
        self.waypoints = None
        self.camera_image = None
        self.waypoint_tree = None
        self.waypoints_2d = None
        self.lights = []
        self.imgCnt = 0

        sub1 = rospy.Subscriber('/current_pose', PoseStamped,
                                self.pose_cb)  # from vehicle
        sub2 = rospy.Subscriber(
            '/base_waypoints', Lane, self.waypoints_cb
        )  #rom waypoint loader (only gets sent once on startup)
        '''
        /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
        helps you acquire an accurate ground truth data source for the traffic light
        classifier by sending the current color state of all traffic lights in the
        simulator. When testing on the vehicle, the color statewill not be available. You'll need to
        rely on the position of the light and the camera image to predict it.
        '''
        sub3 = rospy.Subscriber(
            '/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb
        )  # from simulator (continuous data about upcomming lights.
        sub6 = rospy.Subscriber('/image_color', Image,
                                self.image_cb)  # from vehicle

        config_string = rospy.get_param(
            "/traffic_light_config"
        )  #stopline positions and camera info AND real vs. sim
        self.config = yaml.load(config_string)

        self.upcoming_red_light_pub = rospy.Publisher(
            '/traffic_waypoint', Int32, queue_size=1)  # to waypoint updater

        self.bridge = CvBridge()
        # Read parameter to deterine if we're running in the sim or on Carla and pass to classifier constructor.
        self.light_classifier = TLClassifier(self.config['is_site'])
        self.listener = tf.TransformListener()

        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.state_count = 0

        rospy.spin()

    def pose_cb(self, msg):
        #print("got pose")
        self.pose = msg

    def waypoints_cb(self, waypoints):
        # Only gets called once
        self.waypoints = waypoints
        if not self.waypoints_2d:
            self.waypoints_2d = [[
                waypoint.pose.pose.position.x, waypoint.pose.pose.position.y
            ] for waypoint in waypoints.waypoints]

            self.waypoint_tree = KDTree(self.waypoints_2d)

    def traffic_cb(self, msg):
        self.lights = msg.lights

    def image_cb(self, msg):
        if self.imgCnt == IMAGE_INTERVAL:
            self.imgCnt = 0
            #rospy.logwarn("image_cb")
            """Identifies red lights in the incoming camera image and publishes the index
                of the waypoint closest to the red light's stop line to /traffic_waypoint

            Args:
                msg (Image): image from car-mounted camera

            """
            self.has_image = True
            self.camera_image = msg
            light_wp, state = self.process_traffic_lights()
            '''
            Publish upcoming red lights at camera frequency.
            Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
            of times till we start using it. Otherwise the previous stable state is
            used.
            '''
            if self.state != state:
                self.state_count = 0
                self.state = state
            elif self.state_count >= STATE_COUNT_THRESHOLD:
                self.last_state = self.state
                light_wp = light_wp if state == TrafficLight.RED else -1
                self.last_wp = light_wp
                self.upcoming_red_light_pub.publish(Int32(light_wp))
            else:
                self.upcoming_red_light_pub.publish(Int32(self.last_wp))
            self.state_count += 1
        else:
            self.imgCnt += 1

    def get_closest_waypoint(self, x, y):
        """Identifies the closest path waypoint to the given position
            https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        Args:
            pose (Pose): position to match a waypoint to

        Returns:
            int: index of the closest waypoint in self.waypoints

        """
        if self.waypoint_tree is not None:
            closest_wp = self.waypoint_tree.query([x, y], 1)[1]
            return closest_wp
        else:
            return None

    def get_light_state(self, light):
        """Determines the current color of the traffic light

        Args:
            light (TrafficLight): light to classify

        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        # For testing, just return the light state
        #rospy.logwarn("getting light state")
        #rospy.logwarn(light.state)
        #return light.state

        if (not self.has_image):
            self.prev_light_loc = None
            return False

        cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "rgb8")

        #Get classification
        return self.light_classifier.get_classification(cv_image)

    def process_traffic_lights(self):
        #rospy.logwarn("processing traffic lights")
        """Finds closest visible traffic light, if one exists, and determines its
            location and color

        Returns:
            int: index of waypoint closest to the upcoming stop line for a traffic light (-1 if none exists)
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        closest_light = None
        light_wp = None

        # List of positions that correspond to the line to stop in front of for a given intersection
        stop_line_positions = self.config['stop_line_positions']
        if (self.pose):
            car_wp_idx = self.get_closest_waypoint(self.pose.pose.position.x,
                                                   self.pose.pose.position.y)

            # Find the closest visible traffic light (if one exists)
            wp_len = len(self.waypoints.waypoints)
            for i, light in enumerate(self.lights):
                # Get stop line waypoint index
                line = stop_line_positions[i]
                light_wp_idx = self.get_closest_waypoint(line[0], line[1])
                # Find the closest stop line waypoint index
                wp_delta = light_wp_idx - car_wp_idx
                #print(wp_delta)
                if wp_delta >= 0 and wp_delta < wp_len:
                    closest_light = light
                    light_wp = light_wp_idx
                    break

        if closest_light:
            if wp_delta < 150:
                state = self.get_light_state(closest_light)
            else:
                state = TrafficLight.UNKNOWN
            #rospy.logwarn("state: {}".format(state))
            return light_wp, state

        self.waypoints = None
        return -1, TrafficLight.UNKNOWN
예제 #21
0
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector')

        self.pose = None
        self.base_waypoints = None
        self.camera_image = None
        self.lights = []

        sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)

        '''
        /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
        helps you acquire an accurate ground truth data source for the traffic light
        classifier by sending the current color state of all traffic lights in the
        simulator. When testing on the vehicle, the color state will not be available. You'll need to
        rely on the position of the light and the camera image to predict it.
        '''
        sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
        sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)

        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)

        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)

        self.bridge = CvBridge()
        self.light_classifier = TLClassifier()
        self.listener = tf.TransformListener()

        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.state_count = 0

        #self.speed_limit = float(self.kmph2mps(rospy.get_param('waypoint_loader/velocity')))

        rospy.spin()

    def pose_cb(self, msg):
        self.pose = msg

    def waypoints_cb(self, msg):
        self.base_waypoints = msg.waypoints
        #rospy.loginfo('base_waypoints[100] = %d', self.base_waypoints[100].pose.pose.position.x)

    def traffic_cb(self, msg):
        self.lights = msg.lights

    def image_cb(self, msg):
        """Identifies red lights in the incoming camera image and publishes the index
            of the waypoint closest to the red light's stop line to /traffic_waypoint

        Args:
            msg (Image): image from car-mounted camera

        """
        self.has_image = True
        self.camera_image = msg
        light_wp, state = self.process_traffic_lights()

        '''
        Publish upcoming red lights at camera frequency.
        Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
        of times till we start using it. Otherwise the previous stable state is
        used.
        '''
        if self.state != state:
            self.state_count = 0
            self.state = state
        elif self.state_count >= STATE_COUNT_THRESHOLD:
            self.last_state = self.state
            light_wp = light_wp if state == TrafficLight.RED else -1
            self.last_wp = light_wp
            self.upcoming_red_light_pub.publish(Int32(light_wp))
        else:
            self.upcoming_red_light_pub.publish(Int32(self.last_wp))
        self.state_count += 1

    def get_closest_waypoint(self, car_pose):
        """Identifies the closest path waypoint to the given position
            https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        Args:
            pose (Pose): position to match a waypoint to

        Returns:
            int: index of the closest waypoint in self.waypoints

        """
        #TODO implement
        closest_dist = 999999
        closest_index = -1

        # Find waypoint closest to vehicle position
        for i in range(len(self.base_waypoints)):
            dist = self.get_dist(self.base_waypoints[i].pose.pose.position, car_pose.position)
            if dist < closest_dist:
                closest_index = i
                closest_dist = dist
        return closest_index

    def buffer_dist(self, current_velocity, decel):
        v = current_velocity.twist.linear.x
        return 0.5 * v * v / decel

    def get_closest_waypoint_light(self, light_pose):
        """Identifies the closest path waypoint to the given position
            https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        Args:
            pose (Pose): position to match a waypoint to

        Returns:
            int: index of the closest waypoint in self.waypoints

        """
        #TODO implement
        min_dist = 999999
        min_index = -1

        # Find waypoint closest to vehicle position
        for i in range(len(self.base_waypoints)):
            dist = self.get_dist_light(self.base_waypoints[i].pose.pose.position, light_pose)
            if dist < min_dist:
                min_index = i
                min_dist = dist
        return min_index

    def get_dist(self, pos1, pos2):
        return math.sqrt((pos1.x - pos2.x)**2 + (pos1.y - pos2.y)**2 + (pos1.z - pos2.z)**2)

    def get_dist_light(self, waypoint_pose, light_pose):
        return math.sqrt((waypoint_pose.x - light_pose[0])**2 + (waypoint_pose.y - light_pose[1])**2)

    def get_light_state(self, light):
        """Determines the current color of the traffic light

        Args:
            light (TrafficLight): light to classify

        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        if(not self.has_image):
            self.prev_light_loc = None
            return False

        cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")
        light_image = cv2.resize(cv_image, (IMG_SIZE, IMG_SIZE), interpolation = cv2.INTER_CUBIC)
        '''
        bbox = self.project_to_image_plane(light)
        if bbox is None:
            return TrafficLight.UNKNOW

        x1, y1, x2, y2 = bbox
        if x1 is not None and abs(y2-y1) > 70 and abs(x2-x1) > 70:
            light_roi = cv_image[y1:y2, x1:x2]
            light_image = cv2.resize(light_roi, (IMG_SIZE, IMG_SIZE), interpolation = cv2.INTER_CUBIC)
        '''

        #Get classification
        return self.light_classifier.get_classification(light_image)

    def process_traffic_lights(self):
        """Finds closest visible traffic light, if one exists, and determines its
            location and color

        Returns:
            int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        light = None

        # List of positions that correspond to the line to stop in front of for a given intersection
        stop_line_positions = self.config['stop_line_positions']
        #rospy.loginfo("stop_line_positions: x= %d, y= %d", stop_line_positions[0][0],stop_line_positions[0][1])

        light_waypoint_pos = []

        if self.base_waypoints is None:
            return -1, TrafficLight.UNKNOWN
        for i in range(len(stop_line_positions)):
            light_pos = self.get_closest_waypoint_light(stop_line_positions[i])
            light_waypoint_pos.append(light_pos)

        #rospy.loginfo("light pos = %i", light_waypoint_pos[1])
        #rospy.loginfo("light_waypoint_pos = %d", self.base_waypoints[light_waypoint_pos[0]].pose.pose.position.x)

        self.last_tl_pos_wp = light_waypoint_pos

        if(self.pose):
            car_position = self.get_closest_waypoint(self.pose.pose)
            if car_position is not None:
                self.last_car_position = car_position
        else:
            return -1, TrafficLight.UNKNOWN

        if self.last_car_position > max(self.last_tl_pos_wp):
            light_num_wp = min(self.last_tl_pos_wp)
        else:
            light_delta = self.last_tl_pos_wp[:]
            light_delta[:] = [x - self.last_car_position for x in light_delta]
            light_num_wp = min(i for i in light_delta if i >= 0) + self.last_car_position

        light_index = self.last_tl_pos_wp.index(light_num_wp)
        light = stop_line_positions[light_index]

        #light_distance = self.distance_light(light, point.pose.pose.position)
        light_distance = self.get_dist_light(self.base_waypoints[self.last_car_position].pose.pose.position, light)
        search_for_light_distance = 5

        if light:
            if light_distance >= search_for_light_distance:
                return -1, TrafficLight.UNKNOWN
            else:
                state = self.get_light_state(light)
                rospy.loginfo('Traffic light index = %i, state = %d', light_num_wp, state)
                return light_num_wp, state

        self.base_waypoints = None
        return -1, TrafficLight.UNKNOWN

    def process_traffic_lights_2(self):
        """Finds closest visible traffic light, if one exists, and determines its
            location and color

        Returns:
            int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        light = None

        # List of positions that correspond to the line to stop in front of for a given intersection
        stop_line_positions = self.config['stop_line_positions']

        tl_waypoint_pos = []


    def distance_light(self, pos1, pos2):
        x = pos1[0] - pos2.x
        y = pos1[1] - pos2.y
        #z = pos1.z - pos2.z

        return math.sqrt(x*x + y*y)

    def project_with_fov(self, d, x, y):
        # Camera characteristics
        fov_x = self.config['camera_info']['focal_length_x']
        fov_y = self.config['camera_info']['focal_length_y']
        image_width = self.config['camera_info']['image_width']
        image_height = self.config['camera_info']['image_height']

        img_x = 0.5*image_width - 2574*x/d
        img_y = image_height - 2740*y/d

        img_x = self.clamp(img_x, 0, image_width)
        img_y = self.clamp(img_y, 0, image_height)

        return int(img_x), int(img_y)

    def project_to_image_plane(self, light):
        """Project point from 3D world coordinates to 2D camera image location
        Args:
            point_in_world (Point): 3D location of a point in the world
        Returns:
            x (int): x coordinate of target point in image
            y (int): y coordinate of target point in image
        """
        fx = self.config['camera_info']['focal_length_x']

        # get transform between pose of camera and world frame
        # trans = None
        # rot = None
        base_light = None

        try:
            now = rospy.Time.now()
            self.listener.waitForTransform("/base_link",
                  light.header.frame_id, now, rospy.Duration(1.0))
            base_light = self.listener.transformPose("base_link", light.pose)

        except (tf.Exception, tf.LookupException, tf.ConnectivityException):
            rospy.logerr("Failed to find camera to map transform")
            return None

        # Find bounding box of traffic light in image
        if base_light is not None:
            # Simulator uses FOV
            if fx < 100:
                # x, y = self.project_with_fov(base_light)
                d = base_light.pose.position.x
                x = base_light.pose.position.y + 0.5
                y = base_light.pose.position.z - 1.75

                ux, uy = self.project_with_fov(d, x + 0.5*LIGHT_WIDTH, y + 0.5*LIGHT_HEIGHT)
                lx, ly = self.project_with_fov(d, x - 0.5*LIGHT_WIDTH, y - 0.5*LIGHT_HEIGHT)

                return ux, uy, lx, ly

            # Real car uses focal length
            else:
                rospy.loginfo('Real car detected...  Process image using focal length!')
예제 #22
0
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector')

        self.pose = None
        self.waypoints = None
        self.camera_image = None
        self.lights = []

        sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)

        '''
        /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
        helps you acquire an accurate ground truth data source for the traffic light
        classifier by sending the current color state of all traffic lights in the
        simulator. When testing on the vehicle, the color state will not be available. You'll need to
        rely on the position of the light and the camera image to predict it.
        '''
        sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
        sub6 = rospy.Subscriber('/image_color', Image, self.image_cb, queue_size=1)

        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)

        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)

        self.bridge = CvBridge()
        self.light_classifier = TLClassifier()
        self.listener = tf.TransformListener()

        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.state_count = 0

        rospy.loginfo("TLDetector is ready -> notify waypoint_updater")
        #notify updater about detector readiness, use max negative integer for this
        self.upcoming_red_light_pub.publish((-1234))
        rospy.spin()

    def pose_cb(self, msg):
        self.pose = msg

    def waypoints_cb(self, msg):
        self.waypoints = msg.waypoints

    def traffic_cb(self, msg):
        self.lights = msg.lights

    def image_cb(self, msg):
        global ProcessingTimeSum, ProcessingIterations
        """Identifies red lights in the incoming camera image and publishes the index
            of the waypoint closest to the red light's stop line to /traffic_waypoint

        Args:
            msg (Image): image from car-mounted camera

        """
        self.has_image = True
        self.camera_image = msg
        # start to call classification:
        light_wp, state = self.process_traffic_lights()

        '''
        Publish upcoming red lights at camera frequency.
        Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
        of times till we start using it. Otherwise the previous stable state is
        used.
        '''
        if self.state != state:
            self.state_count = 0
            self.state = state
        elif self.state_count >= STATE_COUNT_THRESHOLD:
            self.last_state = self.state

            if state == TrafficLight.RED or state == TrafficLight.YELLOW:
                light_wp = light_wp
            else:
                light_wp = -1
            self.last_wp = light_wp
            self.upcoming_red_light_pub.publish(Int32(light_wp))
        else:
            self.upcoming_red_light_pub.publish(Int32(self.last_wp))
        self.state_count += 1

    def get_closest_waypoint(self, pose):
        """Identifies the closest path waypoint to the given position
            https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        Args:
            pose (Pose): position to match a waypoint to

        Returns:
            int: index of the closest waypoint in self.waypoints

        """

        # If we don't have any waypoints return None
        if self.waypoints is None:
            return

        # Get current position
        x = pose.position.x
        y = pose.position.y

        # define minimum distance variable
        minimum_distance = None
        minumum_location = None

        # Search through all the waypoints to get the closes waypoint
        for i,waypoint in enumerate(self.waypoints):
            waypoint_x = waypoint.pose.pose.position.x
            waypoint_y = waypoint.pose.pose.position.y

            dist_to_waypoint = self.euclidianDistance(waypoint_x, waypoint_y, x, y)

            # Initialize minimum distance at first value, or get new minimum distance
            if minimum_distance is None:
                minimum_location = i
                minimum_distance = dist_to_waypoint
            elif dist_to_waypoint <= minimum_distance:
                minimum_location = i
                minimum_distance = dist_to_waypoint

        return minimum_location

    def get_light_state(self, light):
        """Determines the current color of the traffic light

        Args:
            light (TrafficLight): light to classify

        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        if(not self.has_image):
            self.prev_light_loc = None
            return False

        cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")

        #Get classification
        return self.light_classifier.get_classification(cv_image)

    def get_distance(self, wp1_idx, wp2_idx):
        """Determines the distance between two way point's index
        Args:
            wp1_idx (waypoint index): waypoint index 1
            wp2_idx (waypoint index): waypoint index 2

        Returns:
            int: distance in meters between two waypoint index
        """
        if(wp1_idx < 0 or wp1_idx > len(self.waypoints) or
           wp2_idx < 0 or wp2_idx > len(self.waypoints)):
           return -1

        wp1 = self.waypoints[wp1_idx].pose.pose.position
        wp2 = self.waypoints[wp2_idx].pose.pose.position
        return self.euclidianDistance(wp1.x, wp1.y, wp2.x, wp2.y)

    def euclidianDistance(self, x1, y1, x2, y2):
        return math.sqrt((x1 -x2)**2 + (y1 - y2)**2)

    def find_closest_traffic_light(self, car_position):
        """Determines closest traffic to the car position
        Args:
            car_position: actual car position
        Returns:
            ligt: position of the traffic light
            light_waypoint: closest waypoint to the traffic light
        """
        light = None
        closest_light = -1
        # List of positions that correspond to the line to stop in front of for a given intersection
        stop_light_positions = self.config['stop_line_positions']
        # Loop through all the stop light positions
        for i,stop_light_position in enumerate(stop_light_positions):
            # Initialize a Pose
            light_pose = Pose()
            light_pose.position.x = stop_light_position[0]
            light_pose.position.y = stop_light_position[1]

            # Initialize a Waypoint, which is closest to the traffic light
            light_waypoint = self.get_closest_waypoint(light_pose)

            # Find the closest waypoint with a traffic light (or closest traffic light)
            if light_waypoint >= car_position:
                if light is None:
                    closest_light = light_waypoint
                    light = light_pose
                elif light_waypoint < closest_light:
                    closest_light = light_waypoint
                    light = light_pose
        return light, closest_light

    def get_traffic_light_ground_truth(self, traffic_light):
        """
        Finds ground truth from the traffic light
        Args:
            traffic_light: traffic light position
        Returns:
            light.state: traffic light state
        """
        tl_x = traffic_light.position.x
        tl_y = traffic_light.position.y

        state = TrafficLight.UNKNOWN
        for light in self.lights:
            '''
            If position of the light from the yaml file and one roperted via
            /vehicle/traffic_lights differs only within 30 m consider them as same
            '''
            l_x = light.pose.pose.position.x
            l_y = light.pose.pose.position.y
            if self.euclidianDistance(l_x, l_y, tl_x, tl_y) < TL_DETECTION_RANGE:
                return light.state
        return state

    def process_traffic_lights(self):
        """
        Finds closest visible traffic light, if one exists, and determines its
            location and color

        Returns:
            int: index of waypoint closest to the upcoming stop line for a traffic light (-1 if none exists)
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)
        """
        if (self.has_image == False):
            return -1, TrafficLight.UNKNOWN

        light_pos = None
        closest_light = -1
        car_position = -1
        distance_to_tl = -1

        if(self.pose):
            car_position = self.get_closest_waypoint(self.pose.pose)

        light_pos, closest_light = self.find_closest_traffic_light(car_position)

        if car_position and light_pos:
            waypoint_num_to_light = abs(car_position - closest_light)
            distance_to_tl = self.get_distance(car_position, closest_light)

        if light_pos:
            state = TrafficLight.UNKNOWN
            if USE_CLASSIFIER:
                if (distance_to_tl < TL_DETECTION_RANGE):
                    state = self.get_light_state(light_pos)
                    #rospy.loginfo("-dist to closest TL- " + str(distance_to_tl))
            else:
                state = self.get_traffic_light_ground_truth(light_pos)
            return closest_light, state
        return -1, TrafficLight.UNKNOWN
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector')

        self.pose = None
        self.waypoints = None
        self.waypoints2D = None
        self.waypointsTree = None
        self.camera_image = None
        self.lights = []

        sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
        '''
        /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
        helps you acquire an accurate ground truth data source for the traffic light
        classifier by sending the current color state of all traffic lights in the
        simulator. When testing on the vehicle, the color state will not be available. You'll need to
        rely on the position of the light and the camera image to predict it.
        '''
        sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray,
                                self.traffic_cb)
        sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)

        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)

        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint',
                                                      Int32,
                                                      queue_size=1)

        self.bridge = CvBridge()
        self.light_classifier = TLClassifier()
        self.listener = tf.TransformListener()

        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.state_count = 0
        self.wrong_image_count = 0
        self.has_image = False

        #         rospy.spin()
        self.loop()

    def loop(self):
        rate = rospy.Rate(50)
        while not rospy.is_shutdown():
            self.checkLightAndPublish()
            rate.sleep()

    def pose_cb(self, msg):
        self.pose = msg

    def waypoints_cb(self, msg):
        self.waypoints = msg.waypoints
        self.waypoints2D = [[wp.pose.pose.position.x, wp.pose.pose.position.y]
                            for wp in msg.waypoints]
        self.waypointsTree = KDTree(self.waypoints2D)

    def traffic_cb(self, msg):
        self.lights = msg.lights

    def image_cb(self, msg):
        """Identifies red lights in the incoming camera image and publishes the index
            of the waypoint closest to the red light's stop line to /traffic_waypoint

        Args:
            msg (Image): image from car-mounted camera

        """
        self.has_image = True
        self.camera_image = msg

    def checkLightAndPublish(self):
        light_wp, state = self.process_traffic_lights()
        '''
        Publish upcoming red lights at camera frequency.
        Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
        of times till we start using it. Otherwise the previous stable state is
        used.
        '''
        if self.state != state:
            self.state_count = 0
            self.state = state
        elif self.state_count >= STATE_COUNT_THRESHOLD:
            self.last_state = self.state
            light_wp = light_wp if state == TrafficLight.RED else -1
            self.last_wp = light_wp
            self.upcoming_red_light_pub.publish(Int32(light_wp))
        else:
            self.upcoming_red_light_pub.publish(Int32(self.last_wp))
        self.state_count += 1

    def get_closest_waypoint(self, x, y):
        """Identifies the closest path waypoint to the given position
            https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        Args:
            pose (Pose): position to match a waypoint to

        Returns:
            int: index of the closest waypoint in self.waypoints

        """
        return self.waypointsTree.query([x, y], 1)[1]

    def get_light_state(self, light):
        """Determines the current color of the traffic light

        Args:
            light (TrafficLight): light to classify

        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        if (not self.has_image):
            self.prev_light_loc = None
            return light.state

        cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")

        #Get classification
        classification = self.light_classifier.get_classification(cv_image)
        if classification == TrafficLight.RED and light.state != TrafficLight.RED:
            self.wrong_image_count += 1
            cv2.imwrite(
                "light_classification/wrong/shouldntbered" +
                str(self.wrong_image_count) + ".png", cv_image)

        return classification

    def process_traffic_lights(self):
        """Finds closest visible traffic light, if one exists, and determines its
            location and color

        Returns:
            int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        closestLight = None

        if (self.pose):
            car_waypoint = self.get_closest_waypoint(self.pose.pose.position.x,
                                                     self.pose.pose.position.y)
        else:
            return -1, TrafficLight.UNKNOWN

        # find the closest visible traffic light (if one exists)
        closestLight, waypointToStopAt = self.get_closest_light(car_waypoint)

        if closestLight:
            state = self.get_light_state(closestLight)
            return waypointToStopAt, state
        return -1, TrafficLight.UNKNOWN

    def get_closest_light(self, current_waypoint):
        smallestDistance = len(self.waypoints)
        closestLight = None
        waypointToStopAt = None
        # List of positions that correspond to the line to stop in front of for a given intersection
        stop_line_positions = self.config['stop_line_positions']
        for i, light in enumerate(self.lights):
            line = stop_line_positions[i]
            lineWaypoint = self.get_closest_waypoint(line[0], line[1])

            distance = lineWaypoint - current_waypoint
            if distance < smallestDistance and distance >= 0:
                smallestDistance = distance
                closestLight = light
                waypointToStopAt = lineWaypoint

        return closestLight, waypointToStopAt
예제 #24
0
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector')

        self.pose = None
        self.waypoints = None
        self.camera_image = None
        self.lights = []
        self.tl_wps = []
        self.light_classifier = None
        self.disable_camera = False  #True

        sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
        '''
        /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
        helps you acquire an accurate ground truth data source for the traffic light
        classifier by sending the current color state of all traffic lights in the
        simulator. When testing on the vehicle, the color state will not be available. You'll need to
        rely on the position of the light and the camera image to predict it.
        '''
        sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray,
                                self.traffic_cb)
        sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)

        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)

        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint',
                                                      Int32,
                                                      queue_size=1)

        self.bridge = CvBridge()
        self.light_classifier = TLClassifier()
        self.listener = tf.TransformListener()

        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.state_count = 0
        self.has_image = False

        self.drop_count = 0
        self.drop_every_num_frames = 2
        rospy.spin()

    def pose_cb(self, msg):
        self.pose = msg

    def waypoints_cb(self, waypoints):
        self.waypoints = waypoints

    def traffic_cb(self, msg):
        self.lights = msg.lights

    def image_cb(self, msg):
        """Identifies red lights in the incoming camera image and publishes the index
            of the waypoint closest to the red light's stop line to /traffic_waypoint

        Args:
            msg (Image): image from car-mounted camera

        """
        if self.disable_camera == True:
            return

        self.drop_count += 1
        if self.drop_count == self.drop_every_num_frames:
            rospy.loginfo("Got new image!!!!")
            self.has_image = True
            self.camera_image = msg
            light_wp, state = self.process_traffic_lights()
            rospy.loginfo("light_wp = %s, state = %s", light_wp, state)
            '''
            Publish upcoming red lights at camera frequency.
            Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
            of times till we start using it. Otherwise the previous stable state is
            used.
            '''
            if self.state != state:
                self.state_count = 0
                self.state = state
            elif self.state_count >= STATE_COUNT_THRESHOLD:
                self.last_state = self.state
                light_wp = light_wp if state == TrafficLight.RED else -1
                self.last_wp = light_wp
                self.upcoming_red_light_pub.publish(Int32(light_wp))
            else:
                self.upcoming_red_light_pub.publish(Int32(self.last_wp))
            self.state_count += 1
            self.drop_count = 0

    def get_closest_waypoint(self, pose):
        """Identifies the closest path waypoint to the given position
            https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        Args:
            pose (Pose): position to match a waypoint to

        Returns:
            int: index of the closest waypoint in self.waypoints

        """
        idx = -1
        dis_closest = -1

        if self.waypoints is not None:
            wps = self.waypoints.waypoints
            for i in range(len(wps)):
                wp = wps[i]
                dist = (wp.pose.pose.position.x-pose.x)**2 + \
                       (wp.pose.pose.position.y-pose.y)**2
                if (dis_closest == -1) or (dis_closest > dist):
                    dis_closest = dist
                    idx = i
        return idx

    def get_light_state(self, light):
        """Determines the current color of the traffic light

        Args:
            light (TrafficLight): light to classify

        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        if (not self.has_image):
            self.prev_light_loc = None
            return False

        imag = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")
        cv_image = cv2.cvtColor(imag, cv2.COLOR_BGR2RGB)

        #Get classification
        if self.light_classifier is None:
            self.light_classifier = TLClassifier()
        return self.light_classifier.get_classification(cv_image)
        #return  TLClassifier().get_classification(cv_image)

    def process_traffic_lights(self):
        """Finds closest visible traffic light, if one exists, and determines its
            location and color

        Returns:
            int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        light = None
        light_wp = None
        state = None

        # List of positions that correspond to the line to stop in front of for a given intersection
        stop_line_positions = self.config['stop_line_positions']
        if (len(self.tl_wps) == 0):
            for stop_line in stop_line_positions:
                point = stop_line[:]
                point.append(0)
                self.tl_wps.append(
                    self.get_closest_waypoint(
                        Point(stop_line[0], stop_line[1], 0)))

        if (self.pose):
            car_position = self.get_closest_waypoint(self.pose.pose.position)
            #TODO find the closest visible5 traffic light (if one exists)
            nearest_diff = -1
            idx = -1
            for i in range(len(self.tl_wps)):
                diff = self.tl_wps[i] - car_position
                if (diff > 0) and ((nearest_diff == -1) or
                                   (nearest_diff > diff)):
                    nearest_diff = diff
                    idx = i
            if idx != -1 and nearest_diff < WAYPOINT_THRESHOLD:
                light = self.lights[idx]
                light_wp = self.tl_wps[idx]

        if light:
            state = self.get_light_state(light)
            # check the result
            if light.state != TrafficLight.UNKNOWN:
                if state != light.state:
                    rospy.logfatal(
                        'tl_detect error, state is %s but should be %s', state,
                        light.state)
                    #state = light.state
            return light_wp, state
        return -1, TrafficLight.UNKNOWN
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector')

        self.pose = None
        self.waypoints = None
        self.wp_search = None
        self.camera_image = None
        self.lights = []

        rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
        '''
        /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
        helps you acquire an accurate ground truth data source for the traffic light
        classifier by sending the current color state of all traffic lights in the
        simulator. When testing on the vehicle, the color state will not be available. You'll need to
        rely on the position of the light and the camera image to predict it.
        '''
        rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray,
                         self.traffic_cb)
        rospy.Subscriber('/image_color', Image, self.image_cb)

        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)

        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint',
                                                      Int32,
                                                      queue_size=1)

        self.bridge = CvBridge()
        self.light_classifier = TLClassifier()
        self.listener = tf.TransformListener()

        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.state_count = 0

        rospy.spin()

    def pose_cb(self, msg):
        self.pose = msg

    def waypoints_cb(self, waypoints):
        self.waypoints = waypoints
        self.wp_search = WaypointSearch(self.waypoints.waypoints)

    def traffic_cb(self, msg):
        self.lights = msg.lights

    def image_cb(self, msg):
        """Identifies red lights in the incoming camera image and publishes the index
            of the waypoint closest to the red light's stop line to /traffic_waypoint

        Args:
            msg (Image): image from car-mounted camera

        """

        start_time = rospy.get_time()
        if None in (self.pose, self.wp_search):
            return

        self.has_image = True
        self.camera_image = msg
        light_wp, state = self.process_traffic_lights()
        '''
        Publish upcoming red lights at camera frequency.
        Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
        of times till we start using it. Otherwise the previous stable state is
        used.
        '''
        if self.state != state:
            self.state_count = 0
            self.state = state
        elif self.state_count >= STATE_COUNT_THRESHOLD:
            self.last_state = self.state
            light_wp = light_wp if state == TrafficLight.RED else -1
            self.last_wp = light_wp
            self.upcoming_red_light_pub.publish(Int32(light_wp))
        else:
            self.upcoming_red_light_pub.publish(Int32(self.last_wp))
        self.state_count += 1

        rospy.loginfo("Classification duration=%.3f",
                      rospy.get_time() - start_time)

    def get_light_state(self, light):
        """Determines the current color of the traffic light

        Args:
            light (TrafficLight): light to classify

        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        if (not self.has_image):
            self.prev_light_loc = None
            return False

        cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")
        #cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "rgb8")

        # Get classification
        return self.light_classifier.get_classification(cv_image)

    def process_traffic_lights(self):
        """Finds closest visible traffic light, if one exists, and determines its
            location and color

        Returns:
            int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        closest_light = None
        closest_light_idx = -1
        line_wp_idx = None

        # List of positions that correspond to the line to stop in front of for a given intersection
        stop_line_positions = self.config['stop_line_positions']
        car_wp_idx = self.wp_search.get_closest_waypoint_idx_ahead(
            self.pose.pose.position.x, self.pose.pose.position.y)

        diff = len(self.waypoints.waypoints)
        for i, light in enumerate(self.lights):
            stop_line_x, stop_line_y = stop_line_positions[i]
            temp_wp_idx = self.wp_search.get_closest_waypoint_idx_behind(
                stop_line_x, stop_line_y)
            d = (temp_wp_idx - car_wp_idx) % len(self.waypoints.waypoints)
            if d < diff:
                diff = d
                closest_light = light
                closest_light_idx = i
                line_wp_idx = temp_wp_idx

        if closest_light:
            state = self.get_light_state(closest_light)
            distance = self.__calc_distance(car_wp_idx, line_wp_idx)
            if hasattr(closest_light, 'state') and (
                    closest_light.state != state) and (distance < 75.0):
                rospy.logwarn(
                    "Incorrect classification at TL %i: state=%s expected=%s at distance %.1f m",
                    closest_light_idx, state, closest_light.state, distance)
            return line_wp_idx, state

        return -1, TrafficLight.UNKNOWN

    def __calc_distance(self, start_idx, end_idx):
        """Calculates the distance between two base waypoints"""
        def dl(a, b):
            return math.sqrt((a.x - b.x)**2 + (a.y - b.y)**2 + (a.z - b.z)**2)

        total_dist = 0
        num_wps = (end_idx - start_idx) % len(self.waypoints.waypoints)
        idx1 = start_idx
        for i in range(num_wps):
            idx0 = idx1
            idx1 = (idx0 + 1) % len(self.waypoints.waypoints)

            total_dist += dl(self.waypoints.waypoints[idx0].pose.pose.position,
                             self.waypoints.waypoints[idx1].pose.pose.position)
        return total_dist
예제 #26
0
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector')

        self.pos = -1
        self.pose = None
        self.waypoints = None
        self.x_ave = 0.0
        self.y_ave = 0.0
        self.cos_rotate = 0.0
        self.sin_rotate = 0.0
        self.phi = []
        self.stop_lines = None        
        self.camera_image = None

        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)
        
        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)

        self.bridge = CvBridge()

        if USE_KERAS_MODEL:
            self.light_classifier = TLKeras()
        else:
            self.light_classifier = TLClassifier()

        self.listener = tf.TransformListener()

        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.state_count = 0
        self.stop_idxs = []
        
        # For image capture...
        self.last_capture_idx = 0
        
        sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)

        '''
        /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
        helps you acquire an accurate ground truth data source for the traffic light
        classifier by sending the current color state of all traffic lights in the
        simulator. When testing on the vehicle, the color state will not be available. You'll need to
        rely on the position of the light and the camera image to predict it.
        '''
        sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
        sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)
        
        rospy.spin()

    def pose_cb(self, msg):
        if msg:
            self.pos = self.get_index(msg.pose.position.x, msg.pose.position.y)
            self.pose = msg

    def get_index(self, x, y):
        rho = self.get_angle(x, y)
        # special case of wrap around when past the last waypoint
        if not self.phi or rho > self.phi[-1]:
            return 0
        
        idx = 0
        while rho > self.phi[idx]:
            idx += 1

        return idx

    def get_angle(self, x, y):
        # First center
        xc = x - self.x_ave
        yc = y - self.y_ave
        
        # and now rotate
        xr = xc * self.cos_rotate - yc * self.sin_rotate
        yr = yc * self.cos_rotate + xc * self.sin_rotate
        
        # rho now starts at 0 and goes to 2pi for the track waypoints
        rho = math.pi - math.atan2(xr, yr)
        return rho
        
    def waypoints_cb(self, lane):
        #self.waypoints.extend(lane.waypoints)
        self.waypoints = lane.waypoints
        x_tot = 0.0
        y_tot = 0.0
        for p in self.waypoints:
            x_tot += p.pose.pose.position.x
            y_tot += p.pose.pose.position.y

        # We use the average values to recenter the self.waypoints
        self.x_ave = x_tot / len(self.waypoints)
        self.y_ave = y_tot / len(self.waypoints)
        
        # The very first waypoint determines the angle we need to rotate
        # all waypoints by
        xc = self.waypoints[0].pose.pose.position.x - self.x_ave
        yc = self.waypoints[0].pose.pose.position.y - self.y_ave
        rotate = math.atan2(xc, yc) + math.pi
        self.cos_rotate = math.cos(rotate)
        self.sin_rotate = math.sin(rotate)

        for p in self.waypoints:
            rho = self.get_angle(p.pose.pose.position.x, p.pose.pose.position.y)
            self.phi.append(rho)

        # We can only process the stop_lines after the waypoints
        stop_line_positions = self.config['stop_line_positions']
        for stop_line in stop_line_positions:
            idx = self.get_index(stop_line[0], stop_line[1])
            self.stop_idxs.append(idx)
            
        rospy.loginfo(self.stop_idxs)
            

    def traffic_cb(self, msg):
        # It is possible that traffic_cb is called before we've had a
        # chance to process the waypoints, so do nothing (returns None
        # so we know if anyone tries to use this prematurely)
        if not self.waypoints or len(msg.lights) != len(self.stop_idxs):
            return
        
        # Note that we depend on the fact that the stop_lines and the
        # traffic lights appear in the same order in their config files
        
        self.stop_lines = []
        for i, light in enumerate(msg.lights):
            sidx = self.stop_idxs[i]
            self.stop_lines.append((sidx, light.state, light))
        self.stop_lines.sort()

    def get_next_stop_line(self):
        if not self.stop_lines or not self.stop_idxs:
            return (None, None, None)
        elif self.pos > self.stop_lines[-1][0]:
            return self.stop_lines[0]
        idx = 0
        num_lights = len(self.stop_lines)
        while self.pos > self.stop_lines[idx][0]:
            idx += 1
            if idx >= len(self.stop_lines):
                rospy.logerr("stop lines idx: %d" % idx)

        # for debug. a positions past the last stop_line will trigger
        # if self.pos > self.stop_lines[idx][0]:
        #     rospy.loginfo("get_next_stop_line self.pos %d  stop_lines: %d" % \
        #                   (self.pos, self.stop_lines[idx][0]))
            
        return self.stop_lines[idx]

    def image_cb(self, msg):
        """Identifies red lights in the incoming camera image and publishes the index
            of the waypoint closest to the red light's stop line to /traffic_waypoint

        Args:
            msg (Image): image from car-mounted camera

        """
        self.has_image = True
        self.camera_image = msg
        incoming_light_wp, state = self.process_traffic_lights()

        '''
        Publish upcoming red lights at camera frequency.
        Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
        of times till we start using it. Otherwise the previous stable state is
        used.
        '''
        if self.state != state:
            self.state_count = 0
            self.state = state
            light_wp = self.last_wp
        elif self.state_count >= STATE_COUNT_THRESHOLD:
            self.last_state = self.state
            light_wp = incoming_light_wp if state == TrafficLight.RED or \
                       state == TrafficLight.YELLOW else -1
            self.last_wp = light_wp
        else:
            light_wp = self.last_wp

        # We refactored Udacity's code to make sure we publish on every cycle
        self.upcoming_red_light_pub.publish(Int32(light_wp))
        self.state_count += 1

    def get_closest_waypoint(self, pose):
        """Identifies the closest path waypoint to the given position
            https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        Args:
            pose (Pose): position to match a waypoint to

        Returns:
            int: index of the closest waypoint in self.waypoints

        """
        return self.get_index(pose.position.x, pose.position.y)


    def save_image(self, image):
        ''' Image capture for generating training images'''
        if not COLLECT_IMAGES:
            return
        sl_idx, state, light = self.get_next_stop_line()

        if sl_idx in capture_idxs and \
           sl_idx - self.pos < CAPTURE_DISTANCE and \
           self.pos > self.last_capture_idx + 10:
            self.last_capture_idx = self.pos
            fname = CAPTURE_DIR + TRAFFIC_LIGHT_2_NAME[state] + \
                    str(sl_idx) + "_" + str(self.pos) + ".jpg"
            # fake out cv2 to write rgb
            img = image[..., ::-1]
            cv2.imwrite(fname, img, [cv2.IMWRITE_JPEG_QUALITY, 90])
            

    def get_light_state(self, light):
        """Determines the current color of the traffic light

        Args:
            light (TrafficLight): light to classify

        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        if not self.has_image:
            self.prev_light_loc = None
            return False

        # Changed this to rgb since that is default for tensorflow / keras
        # Avoids unnecessary color conversions
        cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "rgb8")

        # only for data collecton
        self.save_image(cv_image)
            
        # Get classification
        return self.light_classifier.get_classification(cv_image)

    def process_traffic_lights(self):
        """Finds closest visible traffic light, if one exists, and determines its
            location and color.  If LOOP_ONCE is True, then alway return the last waypoint
            as a red light.

        Returns:
            int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        light = None

        # List of positions that correspond to the line to stop in front of for a given intersection
        stop_line_positions = self.config['stop_line_positions']
        if  self.pose:
            # car_position is now maintained by pose_cb as self.pos
            # Find the closest visible traffic light (if one exists)
            stop_line_wp, state, light = self.get_next_stop_line()
	    outstr = "Car position : " + str(self.pos) + " stop_line_wp : " + str(stop_line_wp)
            #rospy.loginfo(outstr)
            

        if light:
            if USE_CLASSIFICATION:
                state = self.get_light_state(light)
                #rospy.logwarn("light state: %d" % state)
                
            if LOOP_ONCE:
                # Only if our position is past the last traffic light, we return the
                # index of the very last waypoint as a red light
                if self.pos > self.stop_idxs[-1]:
                    state = TrafficLight.RED
                    stop_line_wp = len(self.waypoints) - 1
                
            return stop_line_wp, state

        return -1, TrafficLight.UNKNOWN
예제 #27
0
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector')

        self.pose = None
        self.waypoints = None
        self.camera_image = None
        self.lights = []
        self.light_image_cnt = 0

        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)
        # List of positions that correspond to the line to stop in front of for a given intersection
        self.stop_line_positions = self.config['stop_line_positions']
        self.is_site = self.config[
            'is_site']  # get status the code is run sim or real world

        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint',
                                                      Int32,
                                                      queue_size=1)

        self.bridge = CvBridge()

        self.listener = tf.TransformListener()

        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.state_count = 0

        self.waypoint_tree = None
        self.waypoints_2d = None

        self.has_image = False
        self.light_image_num = 0
        self.light_classifier = TLClassifier(self.is_site)

        sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
        '''
        /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
        helps you acquire an accurate ground truth data source for the traffic light
        classifier by sending the current color state of all traffic lights in the
        simulator. When testing on the vehicle, the color state will not be available. You'll need to
        rely on the position of the light and the camera image to predict it.
        '''
        sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray,
                                self.traffic_cb)
        sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)
        rospy.spin()

    def pose_cb(self, msg):
        self.pose = msg

    def waypoints_cb(self, waypoints):
        self.waypoints = waypoints
        if not self.waypoints_2d:
            self.waypoints_2d = [[
                waypoint.pose.pose.position.x, waypoint.pose.pose.position.y
            ] for waypoint in waypoints.waypoints]
            self.waypoint_tree = KDTree(self.waypoints_2d)

    def traffic_cb(self, msg):
        self.lights = msg.lights  # Simulator send the traffic light position and light color

    def image_cb(self, msg):
        """Identifies red lights in the incoming camera image and publishes the index
            of the waypoint closest to the red light's stop line to /traffic_waypoint

        Args:
            msg (Image): image from car-mounted camera

        """
        self.has_image = True
        self.camera_image = msg
        self.light_image_cnt = self.light_image_cnt + 1
        if self.light_image_cnt % 2 == 0:
            if self.waypoint_tree:
                light_wp, state = self.process_traffic_lights()
                if self.state != state:
                    self.state_count = 0
                    self.state = state
                elif self.state_count >= STATE_COUNT_THRESHOLD:
                    self.last_state = self.state
                    light_wp = light_wp if state == TrafficLight.RED else -1
                    self.last_wp = light_wp
                    self.upcoming_red_light_pub.publish(Int32(light_wp))
                else:
                    self.upcoming_red_light_pub.publish(Int32(self.last_wp))
                self.state_count += 1
        '''
        Publish upcoming red lights at camera frequency.
        Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
        of times till we start using it. Otherwise the previous stable state is
        used.
        '''

    def get_closest_waypoint(self, x, y):
        """Identifies the closest path waypoint to the given position
            https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        Args:
            pose (Pose): position to match a waypoint to

        Returns:
            int: index of the closest waypoint in self.waypoints

        """
        #TODO implement
        closest_idx = self.waypoint_tree.query([x, y], 1)[1]

        # Check if closest is ahead or behind vehicles
        closest_coord = self.waypoints_2d[closest_idx]
        pre_coord = self.waypoints_2d[closest_idx - 1]

        # Equation for hyperplane through closest_coords
        cl_vect = np.array(closest_coord)
        prev_vect = np.array(pre_coord)
        pos_vect = np.array([x, y])

        val = np.dot(cl_vect - prev_vect, pos_vect - cl_vect)

        if val > 0:
            closest_idx = (closest_idx + 1) % len(self.waypoints_2d)
        return closest_idx

    def get_light_state(self, light):
        """Determines the current color of the traffic light

        Args:
            light (TrafficLight): light to classify

        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """

        if not self.has_image:
            self.prev_light_loc = None
            return False

        cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")
        # cv.namedWindow('input_image', cv.WINDOW_NORMAL)
        # cv.imshow('input_image', cv_image)
        # cv.waitKey(1)
        # self.light_image_num = self.light_image_num + 1
        # filename = 'data/light_v2_'+ str(self.light_image_num)+'.png'
        # cv.imwrite(filename, cv_image)

        #Get classification
        return self.light_classifier.get_classification(cv_image)
        #return light.state

    def process_traffic_lights(self):
        """Finds closest visible traffic light, if one exists, and determines its
            location and color

        Returns:
            int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        closest_light = None
        line_wp_idx = None

        if (self.pose):
            car_wp_idx = self.get_closest_waypoint(self.pose.pose.position.x,
                                                   self.pose.pose.position.y)

            #TODO find the closest visible traffic light (if one exists)
            diff = len(self.waypoints.waypoints)
            for i, light in enumerate(self.lights):
                line = self.stop_line_positions[i]
                temp_wp_idx = self.get_closest_waypoint(line[0], line[1])
                d = temp_wp_idx - car_wp_idx
                if d >= 0 and d < diff:
                    diff = d
                    closest_light = light
                    line_wp_idx = temp_wp_idx

            if closest_light:
                state = self.get_light_state(closest_light)
                return line_wp_idx, state

        # just for test
        if self.is_site:
            state = self.get_light_state(closest_light)
        return -1, TrafficLight.UNKNOWN
예제 #28
0
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector', log_level=rospy.INFO)

        self.pose = None
        self.waypoints = None
        self.camera_image = None
        # self.lights = []
        self.prev_wp_idx = 0

        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)

        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint',
                                                      Int32,
                                                      queue_size=1)

        self.bridge = CvBridge()
        self.light_classifier = TLClassifier()
        self.listener = tf.TransformListener()

        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.state_count = 0

        self.first_image_time = None
        self.image_count = -1

        # [x,y] coordinates of stopping lines before traffic lights
        self.stop_line_positions = self.config['stop_line_positions']
        # Indices in self.waypoints of the waypoints closest to the respective stopping lines, as reported in self.stop_line_positions
        self.stop_line_idxs = None  # Can be initialised only after receiving the list of waypoints, see waypoints_cb()

        rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
        '''
        /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
        helps you acquire an accurate ground truth data source for the traffic light
        classifier by sending the current color state of all traffic lights in the
        simulator. When testing on the vehicle, the color state will not be available. You'll need to
        rely on the position of the light and the camera image to predict it.
        '''
        # rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
        ''' Subscription to /image_color is inside self.waypoints_cb(), as we need the list of track waypoints before
        being able to process messages from /image_color'''

        rospy.spin()

    def pose_cb(self, msg):
        self.pose = msg

    def waypoints_cb(self, waypoints):
        self.waypoints = waypoints.waypoints
        self.stop_line_idxs = [
            np.argmin([
                euler_distance(
                    (wp.pose.pose.position.x, wp.pose.pose.position.y),
                    stop_line_pos) for wp in self.waypoints
            ]) for stop_line_pos in self.stop_line_positions
        ]
        rospy.Subscriber('/image_color', Image, self.image_cb)

    '''
    def traffic_cb(self, msg):
        self.lights = msg.lights
    '''

    def image_cb(self, msg):
        """Identifies red lights in the incoming camera image and publishes the index
            of the waypoint closest to the red light's stop line to /traffic_waypoint

        Args:
            msg (Image): image from car-mounted camera

        """
        self.image_count += 1
        if self.first_image_time is None:
            self.first_image_time = time.time()

        if self.image_count >= 1:
            rospy.logdebug("Images per second: {}".format(
                self.image_count / (time.time() - self.first_image_time)))

        self.has_image = True
        self.camera_image = msg
        light_wp_i, state = self.process_traffic_lights()
        rospy.logdebug("light_wp_i={} state={}".format(light_wp_i, state))
        '''
        Publish upcoming red lights at camera frequency.
        Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
        of times till we start using it. Otherwise the previous stable state is
        used.
        '''
        if self.state != state:
            self.state_count = 0
            self.state = state
        elif self.state_count >= STATE_COUNT_THRESHOLD:
            self.last_state = self.state
            light_wp_i = light_wp_i if state == TrafficLight.RED or state == TrafficLight.YELLOW else -1
            self.last_wp = light_wp_i
            self.upcoming_red_light_pub.publish(Int32(light_wp_i))
        else:
            self.upcoming_red_light_pub.publish(Int32(self.last_wp))
        self.state_count += 1

    def get_light_state(self):
        """Determines the current color of the traffic light

        Args:

        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        if not self.has_image:
            self.prev_light_loc = None
            return False

        cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")

        #Get classification
        return self.light_classifier.get_classification(cv_image)

    def process_traffic_lights(self):
        """Finds closest visible traffic light, if one exists, and determines its
            location and color

        Returns:
            int: index of waypoint closest to the upcoming stop line for a traffic light (-1 if none exists)
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """

        # List of positions that correspond to the line to stop in front of for a given intersection
        stop_line_positions = self.config['stop_line_positions']
        if self.pose is not None:
            car_pose = self.pose.pose
            car_x, car_y, _ = unpack_pose(car_pose)
            # Find the closest visible traffic light (if one exists)
            min_distance = sys.float_info.max
            min_distance_i = -1
            quaternion = (car_pose.orientation.x, car_pose.orientation.y,
                          car_pose.orientation.z, car_pose.orientation.w)
            _, _, car_yaw = tf.transformations.euler_from_quaternion(
                quaternion)
            for pos_i, pos in enumerate(stop_line_positions):
                dist = ((pos[0] - car_x)**2 + (pos[1] - car_y)**2)**.5
                if dist < min_distance:
                    tl_car_ref_x, _ = universal2car_ref(
                        pos[0], pos[1], car_x, car_y, car_yaw)
                    # Check for a light with stop line before or slightly after the car
                    if tl_car_ref_x >= -1.4:
                        min_distance = dist
                        min_distance_i = pos_i
            rospy.logdebug('min_distance_i={} min_distance={}\n'.format(
                min_distance_i, min_distance))
            if min_distance_i >= 0 and min_distance < 80:
                state = self.get_light_state()
                return self.stop_line_idxs[min_distance_i], state
            else:
                return -1, TrafficLight.UNKNOWN
        else:
            return -1, TrafficLight.UNKNOWN
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector')

        self.pose = None
        self.waypoints = None
        self.camera_image = None
        self.lights = []

        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.state_count = 0

        self.image_lock = threading.RLock()

        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)

        self.bridge = CvBridge()

        self.light_classifier = TLClassifier()
        self.listener = tf.TransformListener()

        self.switcher = {
            TrafficLight.RED: "RED",
            TrafficLight.YELLOW: "YELLOW",
            TrafficLight.GREEN: "GREEN",
        }

        self.colors = {
            "RED": (255, 0, 0),
            "YELLOW": (255, 255, 0),
            "GREEN": (0, 255, 0)
        }

        self.light_detector = rospy.get_param('~light_detector', False)
        self.debug_window = rospy.get_param('~debug_window', False)

        rospy.logwarn('light detector = {}'.format(self.light_detector))
        rospy.logwarn('debug window = {}'.format(self.debug_window))

        self.trafficLightState = rospy.Publisher('/dbg/traffic_light_state',
                                                 Image,
                                                 queue_size=1)

        sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
        '''
        /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
        helps you acquire an accurate ground truth data source for the traffic light
        classifier by sending the current color state of all traffic lights in the
        simulator. When testing on the vehicle, the color state will not be available. You'll need to
        rely on the position of the light and the camera image to predict it.
        '''
        sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray,
                                self.traffic_cb)
        sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)

        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint',
                                                      Int32,
                                                      queue_size=1)

        rospy.spin()

    def pose_cb(self, msg):
        self.pose = msg

    def waypoints_cb(self, msg):
        self.waypoints = msg.waypoints
        rospy.loginfo('Waypoints Received')

    def traffic_cb(self, msg):
        self.lights = msg.lights

    def image_cb(self, msg):
        """Identifies red lights in the incoming camera image and publishes the index
            of the waypoint closest to the red light's stop line to /traffic_waypoint
        Args:
            msg (Image): image from car-mounted camera
        """
        self.has_image = True
        self.camera_image = msg

        light_wp, state = self.process_traffic_lights()
        light_class = self.switcher.get(state, "UNKNOWN")

        rospy.logwarn('light {} light_wp {} state {}'.format(
            light_class, light_wp, Int32(state)))
        '''
        Publish upcoming red lights at camera frequency.
        Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
        of times till we start using it. Otherwise the previous stable state is
        used.
        '''
        if self.state != state:
            self.state_count = 0
            self.state = state
        elif self.state_count >= rospy.get_param('~state_count_threshold', 3):
            self.last_state = self.state
            light_wp = light_wp if state == TrafficLight.RED or state == TrafficLight.YELLOW else -1
            self.last_wp = light_wp
            self.upcoming_red_light_pub.publish(Int32(light_wp))
        else:
            self.upcoming_red_light_pub.publish(Int32(self.last_wp))
        self.state_count += 1

    def get_closest_waypoint(self, pose):
        """Identifies the closest path waypoint to the given position
            https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        Args:
            pose (Pose): position to match a waypoint to
        Returns:
            int: index of the closest waypoint in self.waypoints
        """
        # if way points are empty
        if self.waypoints is None:
            return None

        # transform waypoints to array
        waypoints_array = np.asarray([(w.pose.pose.position.x,
                                       w.pose.pose.position.y)
                                      for w in self.waypoints])

        position_array = np.asarray([pose.position.x, pose.position.y])
        #calculate euclidian distance
        distance = np.sum(np.sqrt((waypoints_array - position_array)**2),
                          axis=1)
        # get closest
        index = np.argmin(distance)

        return index

    def get_distance(self, p1, p2):
        x, y, z = p1.x - p2.x, p1.y - p2.y, p1.z - p2.z
        return math.sqrt(x * x + y * y + z * z)

    def get_stop_line_waypoint(self, stop_line_positions):

        closest_idx = -1
        min_dist = LARGE_NUMBER
        for pos in stop_line_positions:
            stop_line = PoseStamped()
            stop_line.pose.position.x = float(pos[0])
            stop_line.pose.position.y = float(pos[1])

            dist = self.get_distance(stop_line.pose.position,
                                     self.pose.pose.position)
            if dist < min_dist and self.get_closest_waypoint(
                    stop_line.pose) > self.get_closest_waypoint(
                        self.pose.pose):
                closest_idx = self.get_closest_waypoint(stop_line.pose)
                min_dist = dist

        return closest_idx

    def process_traffic_lights(self):
        """Finds closest visible traffic light, if one exists, and determines its
            location and color
        Returns:
            int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)
        """
        light = None
        light_wp = None
        cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "rgb8")

        config_string = rospy.get_param("/traffic_light_config")
        stop_line_positions = yaml.load(config_string)['stop_line_positions']
        #self.stop_line_wps = self.get_stop_line_waypoints(stop_line_positions)

        # List of positions that correspond to the line to stop in front of for a given intersection
        stop_line_positions = self.config['stop_line_positions']
        if (self.pose):
            car_position = self.get_closest_waypoint(self.pose.pose)

        # find the closest visible traffic light (if one exists)
        if self.light_detector:
            time_start = time.time()
            state, score = self.light_classifier.get_classification(cv_image)
            elapsed_time = time.time() - time_start
            rospy.logwarn('light_detector time: {0}'.format(elapsed_time))

            #rospy.logwarn('light_wp_idx: {0}'.format(light_wp))
            light_wp = self.get_stop_line_waypoint(stop_line_positions)
            #rospy.logwarn('light_wp_idx: {0}'.format(light_wp))

            if self.debug_window:
                if state == TrafficLight.UNKNOWN:
                    self.trafficLightState.publish(
                        self.bridge.cv2_to_imgmsg(
                            cv2.cvtColor(np.zeros((600, 800), np.uint8),
                                         cv2.COLOR_GRAY2RGB), "rgb8"))
                else:
                    light_class = self.switcher.get(state, "UNKNOWN")
                    cv2.putText(cv_image, "%s %f" % (light_class, score),
                                (10, 550), cv2.FONT_HERSHEY_SIMPLEX, 1,
                                self.colors.get(light_class), 2)

                    self.trafficLightState.publish(
                        self.bridge.cv2_to_imgmsg(cv_image, "rgb8"))

            time_end = time.time()

            #rospy.loginfo('Traffic lights detection (ms) {} '.format((time_end - time_start) * 1000))

            return light_wp, state

        self.waypoints = None
        return -1, TrafficLight.UNKNOWN
예제 #30
0
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector')

        self.pose = None
        self.waypoints = None
        self.camera_image = None
        self.light_map = None
        self.light_distance = None
        self.light_is_behind = None
        self.closest = None

        sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)

        '''
        /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
        helps you acquire an accurate ground truth data source for the traffic light
        classifier by sending the current color state of all traffic lights in the
        simulator. When testing on the vehicle, the color state will not be available. You'll need to
        rely on the position of the light and the camera image to predict it.
        '''

        sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)

        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)

        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)

        self.bridge = CvBridge()

        self.STATE_COUNT_THRESHOLD = rospy.get_param("~state_count_threshold")

        use_simulator_classifier = rospy.get_param("~traffic_light_classifier_sim")
        self.light_classifier = TLClassifier(sim = use_simulator_classifier)

        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.state_count = 0
        self.has_image = False

        rate = rospy.get_param("~rate")

        r = rospy.Rate(rate)
        while not rospy.is_shutdown():
            self.process_image()
            r.sleep()

    def pose_cb(self, msg):
        self.pose = msg

    def waypoints_cb(self, waypoints): # type: Lane
        self.waypoints = waypoints
        if self.light_map is None:
            stop_line_positions = self.config['stop_line_positions']
            self.light_map = LightMap(waypoints, stop_line_positions)

    def process_image(self):
        if (self.has_image):
            light_wp, state = self.process_traffic_lights()

            '''
            Publish upcoming red lights at camera frequency.
            Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
            of times till we start using it. Otherwise the previous stable state is
            used.
            '''
            if self.state != state:
                self.state_count = 0
                self.state = state
            elif self.state_count >= self.STATE_COUNT_THRESHOLD:
                self.last_state = self.state
                light_wp = light_wp if state == TrafficLight.RED else -1
                self.last_wp = light_wp
                self.upcoming_red_light_pub.publish(Int32(light_wp))
            else:
                self.upcoming_red_light_pub.publish(Int32(self.last_wp))
            self.state_count += 1

    def image_cb(self, msg):
        """Identifies red lights in the incoming camera image and publishes the index
            of the waypoint closest to the red light's stop line to /traffic_waypoint

        Args:
            msg (Image): image from car-mounted camera

        """
        self.has_image = True
        self.camera_image = msg

    def get_closest_waypoint(self, pose):
        """Identifies the closest path waypoint to the given position
            https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        Args:
            pose (Pose): position to match a waypoint to

        Returns:
            int: index of the closest waypoint in self.waypoints

        """
        return util.get_closest_waypoint(self.waypoints, pose)

    def project_to_image_plane(self, point_in_world):
        return (0, 0)

    def get_light_state(self):
        """Determines the current color of the traffic light

        Args:
            light (TrafficLight): light to classify

        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        if(not self.has_image):
            self.prev_light_loc = None
            return False

        cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")

        #Get classification
        pred = self.light_classifier.get_classification(cv_image)
        return pred

    def process_traffic_lights(self):
        """Finds closest visible traffic light, if one exists, and determines its
            location and color

        Returns:
            int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        start_time = rospy.get_time()

        if self.pose and self.light_map:
            closest_waypoint_to_light = self.light_map.get_closest_waypoint_to_upcoming_light(self.pose)
        else:
            closest_waypoint_to_light = -1

        end_time = rospy.get_time()
        get_closest_waypoint_timespan = end_time - start_time
        start_time = end_time

        state = self.get_light_state()

        end_time = rospy.get_time()
        get_light_state_timespan = end_time - start_time
        start_time = end_time

        rospy.loginfo('(get_closest_waypoint, get_light_state): ({}, {})'.format(get_closest_waypoint_timespan, get_light_state_timespan))
        return closest_waypoint_to_light, state
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector')

        self.pose = None
        self.waypoints = None
        self.waypoints_2d = None
        self.camera_image = None
        self.lights = []

        sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)

        '''
        /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
        helps you acquire an accurate ground truth data source for the traffic light
        classifier by sending the current color state of all traffic lights in the
        simulator. When testing on the vehicle, the color state will not be available. You'll need to
        rely on the position of the light and the camera image to predict it.
        '''
        sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
        # Combining queue_size and buff_size to minimize lag, see https://answers.ros.org/question/220502/image-subscriber-lag-despite-queue-1/
        sub6 = rospy.Subscriber('/image_color', Image, self.image_cb, queue_size=1, buff_size=10000000)

        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)

        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)

        self.bridge = CvBridge()
        self.light_classifier = TLClassifier()
        self.light_score_confidence_threshold = 0.4  # when score is above this threshold, we are confident about the prediction
        self.light_score_unknown_threashold = 0.2  # when score is below this threshold, the light is classified as unknown
        #for matching tensorflow class (R:1, G:2, Y:3) with TrafficLight msg (R:0, Y:1, G:2)
        self.light_class_dict = {1:0, 2:2, 3:1}
        #TrafficLight msg (R:0, Y:1, G:2, Un:4) to names
        self.light_name_dict = {0:'Red_traffic_light',
                                1:'Yellow_traffic_light',
                                2:'Green_traffic_light',
                                4:'Unkown'}
        self.listener = tf.TransformListener()

        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.state_count = 0

        rospy.spin()

    def pose_cb(self, msg):
        self.pose = msg

    def waypoints_cb(self, waypoints):
        self.waypoints = waypoints
        if not self.waypoints_2d:
            self.waypoint_tree = KDTree([[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y]
                                            for waypoint in waypoints.waypoints])

    def traffic_cb(self, msg):
        self.lights = msg.lights

    def image_cb(self, msg):
        """Identifies red lights in the incoming camera image and publishes the index
            of the waypoint closest to the red light's stop line to /traffic_waypoint

        Args:
            msg (Image): image from car-mounted camera

        """
        self.has_image = True
        self.camera_image = msg
        light_wp, state, score = self.process_traffic_lights()
        rospy.loginfo('Current detection state: {}, Light waypoint index: {}'.format(
                                            self.light_name_dict[state],
                                            light_wp))
        '''
        Publish upcoming red lights at camera frequency.
        Each predicted state has to eiher occur `STATE_COUNT_THRESHOLD` number
        of times or has a high predicted score till we start using it.
        Otherwise the previous stable state is used.
        '''
        if (self.state != state) and (score < self.light_score_confidence_threshold):
            self.state_count = 1
            self.state = state
        elif (self.state_count >= STATE_COUNT_THRESHOLD) or (
            score >= self.light_score_confidence_threshold):
            self.last_state = self.state
            light_wp = light_wp if state in (
                TrafficLight.RED, TrafficLight.YELLOW) else -1
            self.last_wp = light_wp
            self.upcoming_red_light_pub.publish(Int32(light_wp))
        else:
            self.upcoming_red_light_pub.publish(Int32(self.last_wp))
        self.state_count += 1

        if self.last_wp == -1:
            rospy.loginfo('Execution: continue driving')
        else:
            rospy.loginfo('Execution: STOPPING')

    def get_closest_waypoint(self, x, y):
        """Identifies the closest path waypoint to the given position
            https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        Args:
            pose (Pose): position to match a waypoint to

        Returns:
            int: index of the closest waypoint in self.waypoints

        """
        closest_idx = self.waypoint_tree.query([x, y], 1)[1]
        return closest_idx

    def get_light_state(self, light):
        """Calling the classifier to get the current color of the traffic light

        Args:
            light (TrafficLight): light to classify

        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)
            score: score of predicted traffic light state. Higher prediction score means higher confidence.
        """
        if not self.has_image:
            self.prev_light_loc = None
            return False

        cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "rgb8")

        # expand_dims to shape [1, None, None, 3].
        image_expanded = np.expand_dims(cv_image, axis=0)

        t_start = rospy.get_time()
        boxes, scores, classes, num  = self.light_classifier.get_classification(image_expanded)
        classification_time = rospy.get_time() - t_start

        rospy.loginfo('Detected Class: {}, Score: {:.4f},  Inference time: {:.4f}s'.format(
                                                self.light_name_dict[self.light_class_dict[classes[0][0]]],
                                                scores[0][0],
                                                classification_time))
        if num > 0 and scores[0][0] > self.light_score_unknown_threashold:
            return self.light_class_dict[classes[0][0]], scores[0][0]

        return TrafficLight.UNKNOWN, 0

    def process_traffic_lights(self):
        """Finds closest visible traffic light, if one exists, and determines its
            location and color

        Returns:
            int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)
            float: predicted score of the traffic light state. Higher score means higher prediction confidence.
        """
        closest_light = None
        light_wp_idx = None

        # List of positions that correspond to the line to stop in front of for a given intersection
        stop_line_positions = self.config['stop_line_positions']
        if(self.pose):
            car_wp_idx = self.get_closest_waypoint(self.pose.pose.position.x, self.pose.pose.position.y)
            rospy.loginfo("Car waypoint idx: " + str(car_wp_idx))
            # find the closest visible traffic light (if one exists)
            diff = len(self.waypoints.waypoints)
            for i, light in enumerate(self.lights):
                #get stopline waypoint index
                line = stop_line_positions[i]
                temp_wp_idx = self.get_closest_waypoint(line[0], line[1])
                d = temp_wp_idx - car_wp_idx
                if d >= 0 and d < diff:
                    diff = d
                    closest_light = light
                    light_wp_idx = temp_wp_idx

        do_detection = self.is_within_detection_distance(self.waypoints.waypoints, car_wp_idx, light_wp_idx)

        if closest_light and do_detection:
            state, score = self.get_light_state(closest_light)
            return light_wp_idx, state, score
        else:
            if closest_light:
                rospy.loginfo("Did not run detector. Next light is too far away.")
            return -1, TrafficLight.UNKNOWN, 0

    def is_within_detection_distance(self, waypoints, wp1, wp2):
        """
        return true if the distance between the two waypoints is
        close enough to activate the traffic light classifier.

        """
        if wp1 is None or wp2 is None:
            return True
        dist = 0
        dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2  + (a.z-b.z)**2)
        # jumping every 3 wps because we just need a coarse estimate
        for i in range(wp1, wp2+1, 3):
            dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
            wp1 = i
            if dist > TL_DETECTION_DISTANCE_THRESHOLD:
                return False
        return True
예제 #32
0
class TLDetector(object):
    def __init__(self):

        rospy.init_node('tl_detector')

        self.initialized = False
        self.pose_received = False
        self.waypoints_received = False
        self.image_received = False

        self.pose = None
        self.waypoints = None
        self.waypoints_2d = None
        self.waypoint_tree = None
        self.camera_image = None
        self.lights = []
        self.bridge = CvBridge()

        self.td_id = 1
        self.img_count = 0

        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.state_count = 0

        self.process_count = 0
        self.last_img_processed = -1
        self.process_rate = CAMERA_IMG_PROCESS_RATE

        rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
        rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
        rospy.Subscriber('/image_color', Image, self.image_cb, queue_size=1)

        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)
        self.is_site = self.config["is_site"]

        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)
        self.tl_detector_state_pub = rospy.Publisher('/tl_detector_state', Int32, queue_size=1)
        self.is_site_pub = rospy.Publisher('/is_site', Int32, queue_size=1)

        self.state_count_th = STATE_COUNT_THRESHOLD

        if self.is_site:
            self.state_count_th = STATE_COUNT_THRESHOLD_SITE

        # Choose classifier
        if not self.is_site:
            self.classifier = TLClassifier(model_path=os.path.join(MODEL_PATH, 'sim_frozen_inference_graph.pb'),
                                           label_map_path=os.path.join(MODEL_PATH, 'tl_label_map.pbtxt'))
        else:
            self.classifier = TLClassifier(model_path=os.path.join(MODEL_PATH, 'site_frozen_inference_graph.pb'),
                                           label_map_path=os.path.join(MODEL_PATH, 'tl_label_map.pbtxt'))

        self.td_img_path = os.path.join(OUTPUT_PATH, 'IMG')

        if COLLECT_TD:
            if not os.path.exists(OUTPUT_PATH):
                os.mkdir(OUTPUT_PATH)
            else:
                raise Exception('Directory {} already exists. Please choose a different name.'.format(OUTPUT_PATH))

            if not os.path.exists(self.td_img_path):
                os.mkdir(self.td_img_path)

        rospy.init_node('tl_detector')
        self.last_img_processed = 0
        self.listener = tf.TransformListener()
        self.initialized = True
        rospy.spin()

    def is_initialized(self):
        return self.pose_received and self.waypoints_received and self.image_received and self.initialized

    def pose_cb(self, msg):
        self.pose = msg
        self.pose_received = True

    def waypoints_cb(self, waypoints):

        rospy.logwarn("Waypoints received")
        self.waypoints = waypoints
        self.waypoints_received = True

        if not self.waypoints_2d:
            self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]
            self.waypoint_tree = KDTree(self.waypoints_2d)

    def traffic_cb(self, msg):
        self.lights = msg.lights

    def image_cb(self, msg):
        """Identifies red lights in the incoming camera image and publishes the index
            of the waypoint closest to the red light's stop line to /traffic_waypoint

        Args:
            msg (Image): image from car-mounted camera

        """
        self.image_received = True
        is_initialized = self.is_initialized()

        self.tl_detector_state_pub.publish(Int32(is_initialized))
        self.is_site_pub.publish(Int32(self.is_site))

        if not is_initialized:
            return

        time_elapsed = timer() - self.last_img_processed

        # Do not process the camera image unless some time has passed from last processing
        if time_elapsed < self.process_rate:
            return

        self.camera_image = msg

        self.last_img_processed = timer()
        light_wp, state = self.process_traffic_lights()
        rospy.logwarn('INFERENCE TIME: {:.3f} s'.format(timer()-self.last_img_processed))

        '''
        Collect Training Data
        '''
        self.img_count += 1
        label = tl_utils.tl_state_to_label(state)
        if COLLECT_TD and light_wp != -1 and self.img_count % TD_RATE == 0:
            cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")
            tl_utils.save_td(uid=self.td_id, cv_image=cv_image, label=label, csv_path=OUTPUT_PATH,
                             img_path=self.td_img_path)
            self.td_id += 1

        '''
        Publish upcoming red lights at camera frequency.
        Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
        of times till we start using it. Otherwise the previous stable state is
        used.
        '''

        # Ensure that the light state hasn't changed before taking any option
        if self.last_state != state:
            self.state_count = 0
            self.last_state = state

        self.state_count += 1

        if self.state_count >= self.state_count_th:
            self.last_state = state
            light_wp = light_wp if state == TrafficLight.RED else -1
            self.last_wp = light_wp

        self.upcoming_red_light_pub.publish(Int32(self.last_wp))

    def get_closest_waypoint(self, x, y):
        """Identifies the closest path waypoint to the given position
        Args:
            x: x position
            y: y position

        Returns:
            int: index of the closest waypoint in self.waypoints
        """
        return self.waypoint_tree.query([x, y], 1)[1]

    def get_light_state(self, light):
        """
        Determines the current color of the traffic light
        :param light: light to classify
        :return: ID of traffic light color (specified in styx_msgs/TrafficLight)
        """

        # Return light state as ground truth
        if COLLECT_TD and not self.is_site:
            return light.state
        else:
            # convert camera image to cv image
            cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")

            if self.is_site:
                cv_image = prep.run_preprocessor_pipeline(cv_image)

            # Get classification
            return self.classifier.get_classification(cv_image, min_score=0.65)

    def process_traffic_lights(self):
        """Finds closest visible traffic light, if one exists, and determines its
            location and color

        Returns:
            int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        closest_light = None
        line_wp_idx = None

        # List of positions that correspond to the line to stop in front of for a given intersection
        stop_line_positions = self.config['stop_line_positions']

        if self.pose:
            car_wp_idx = self.get_closest_waypoint(self.pose.pose.position.x, self.pose.pose.position.y)

            # find the closest visible traffic light (if one exists)
            diff = len(self.waypoints.waypoints)

            for i, light in enumerate(self.lights):
                # Get stop line waypoint index
                line = stop_line_positions[i]
                temp_wp_idx = self.get_closest_waypoint(line[0], line[1])
                # Find closest stop line waypoint index
                d = temp_wp_idx - car_wp_idx
                if d >= 0 and d < diff:
                    diff = d
                    closest_light = light
                    line_wp_idx = temp_wp_idx

        '''
        elif self.is_site:
            self.process_count += 1
            state = self.get_light_state(None)

            if (self.process_count % LOGGING_THROTTLE_FACTOR) == 0:
                if state is not TrafficLight.UNKNOWN:
                    rospy.logwarn("Detected {color} traffic light".format(
                        color=StateToString[state]))
        '''

        if closest_light:
            distance = line_wp_idx - car_wp_idx
            if distance <= WAYPOINT_DIFFERENCE:
                self.process_count += 1
                state = self.get_light_state(closest_light)

                if (self.process_count % LOGGING_THROTTLE_FACTOR) == 0:
                    rospy.logwarn("TRAFFIC: Detected {color} traffic light at {idx}, distance={distance} ".format(
                        idx=line_wp_idx, color=StateToString[state], distance=distance))

                return line_wp_idx, state

        return -1, TrafficLight.UNKNOWN
예제 #33
0
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector')

        self.pose = None
        self.waypoints = None
        self.camera_image = None
        self.lights = None

        self.light_dict = None
        self.light_waypoints = None

        self.bridge = CvBridge()
        self.light_classifier = TLClassifier()

        self.state = TrafficLight.UNKNOWN
        self.state_count = 0
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.closest_waypoint = None

        self.image_date = datetime.now()

        sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        self.waypoints_sub = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
        self.closest_waypoint_sub = rospy.Subscriber('/closest_waypoint', Int32, self.closest_waypoint_cb)

        '''
        /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
        helps you acquire an accurate ground truth data source for the traffic light
        classifier by sending the current color state of all traffic lights in the
        simulator. When testing on the vehicle, the color state will not be available. You'll need to
        rely on the position of the light and the camera image to predict it.
        '''
        self.traffic_lights_sub = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
        sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)
        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)

        config_string = rospy.get_param('/traffic_light_config')
        self.config = yaml.load(config_string)

        rospy.spin()

    def pose_cb(self, msg):
        self.pose = msg.pose

    def waypoints_cb(self, msg):
        self.waypoints = msg.waypoints
        self.waypoints_sub.unregister()

    def closest_waypoint_cb(self, msg):
        self.closest_waypoint = int(msg.data)

    def traffic_cb(self, msg):
        self.lights = msg.lights
        # --- Find all waypoints for traffic light.
        if (not self.light_waypoints or not self.light_dict) and self.waypoints:
            self.light_dict = {}
            for idx, alight in enumerate(self.lights):
                alight_wp = self.get_closest_waypoint(alight.pose.pose)
                self.light_dict[alight_wp] = alight
            self.light_waypoints = sorted(self.light_dict.keys())
            self.traffic_lights_sub.unregister()

    def image_cb(self, msg):
        """Identifies red lights in the incoming camera image and publishes the index
            of the waypoint closest to the red light's stop line to /traffic_waypoint

        Args:
            msg (Image): image from car-mounted camera

        """
        # Skip certain images to lower the processing rate.
        # TODO: Tune skip time.
        now = datetime.now()
        if now - self.image_date < timedelta(milliseconds=350):
            # Assume same as previous.
            light_wp, state = self.last_wp, self.state
        else:
            self.image_date = now
            # Process frame.
            self.has_image = True
            self.camera_image = msg
            light_wp, state = self.process_traffic_lights()

        '''
        Publish upcoming red lights at camera frequency.
        Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
        of times till we start using it. Otherwise the previous stable state is
        used.
        '''
        if self.state != state:
            self.state_count = 0
            self.state = state
        elif self.state_count >= STATE_COUNT_THRESHOLD:
            self.last_state = self.state
            light_wp = light_wp if state == TrafficLight.RED else -1
            self.last_wp = light_wp
            self.upcoming_red_light_pub.publish(Int32(light_wp))
        else:
            self.upcoming_red_light_pub.publish(Int32(self.last_wp))
        self.state_count += 1

    def get_closest_waypoint(self, pose):
        """Identifies the closest path waypoint to the given position
            https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        Args:
            pose (Pose): position to match a waypoint to

        Returns:
            int: index of the closest waypoint in self.waypoints

        """
        closest_gap = float('inf')
        closest_idx = 0

        for idx, awp in enumerate(self.waypoints):
            agap = (awp.pose.pose.position.x - pose.position.x)**2 + \
                   (awp.pose.pose.position.y - pose.position.y)**2
            if agap < closest_gap:
                closest_gap = agap
                closest_idx = idx

        return closest_idx

    def get_light_state(self, light):
        """Determines the current color of the traffic light

        Args:
            light (TrafficLight): light to classify

        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        # if(not self.has_image):
        #     self.prev_light_loc = None
        #     return False

        cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")

        # Get classification
        prediction = self.light_classifier.get_classification(cv_image)
        # print '--->', prediction
        return prediction

    def process_traffic_lights(self):
        """Finds closest visible traffic light, if one exists, and determines its
            location and color

        Returns:
            int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        light_positions = self.config['stop_line_positions']

        if self.closest_waypoint and self.waypoints and self.lights and self.light_waypoints and self.light_dict:
            car_wp = self.closest_waypoint

            # --- Determine the next traffice light in waypoint.
            light = None
            light_wp = None

            if car_wp <= self.light_waypoints[0]:
                light_wp = self.light_waypoints[0] # Before the first light waypoint.
            elif car_wp >= self.light_waypoints[-1]:
                light_wp = self.light_waypoints[0] # After the last light waypoint. So loop around.
            else:
                for light1_wp, light2_wp in zip(self.light_waypoints, self.light_waypoints[1:]):
                    if car_wp > light1_wp and car_wp <= light2_wp:
                        light_wp = light2_wp

            # --- Get next traffic light ahead.
            light = self.light_dict.get(light_wp)

            # Found light_wp and if it is close enough.
            # if light:
            #     print(light_wp - car_wp)
            if light and (light_wp - car_wp) < 200 and (light_wp - car_wp) > 30:
                state = self.get_light_state(light)
                # print(state)
                # --- TESTING ONLY
                # state = light.state
                # --- TESTING ONLY
                return light_wp, state
        return -1, TrafficLight.UNKNOWN
예제 #34
0
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector')

        self.pose = None
        self.waypoints = None
        self.waypoints_2d = None
        self.waypoint_tree = None
        self.use_camera = True
        self.camera_image = None
        self.has_image = False
        self.lights = []
        self.img_count = 0

        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)

        if self.config['is_site']:
            rospy.loginfo("Carla is being used")
        else:
            rospy.loginfo("Simulation is being used")

        if self.use_camera:
            rospy.loginfo("Camera is being currently used")
        else:
            rospy.loginfo("Camera is not being used now")

        sub_pose = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        sub_lane = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
        '''
        /vehicle/traffic_lights provides you with the location of the traffic light 
        in 3D map space and helps you acquire an accurate ground truth data source for the traffic light
        classifier by sending the current color state of all traffic lights in the
        simulator. When testing on the vehicle, the color state will not be available. You'll need to
        rely on the position of the light and the camera image to predict it.
        '''

        sub_traffic = rospy.Subscriber('/vehicle/traffic_lights',
                                       TrafficLightArray, self.traffic_cb)
        if self.config['is_site']:
            sub_image = rospy.Subscriber('/image_raw', Image, self.image_cb)
        else:
            sub_image = rospy.Subscriber('/image_color', Image, self.image_cb)

        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint',
                                                      Int32,
                                                      queue_size=1)

        self.bridge = CvBridge()
        self.light_classifier = TLClassifier(self.config['is_site'])
        self.listener = tf.TransformListener()

        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.state_count = 0

        rospy.spin()

    def pose_cb(self, msg):
        """
        Function    : Function that copies the topic information to the class pose 
        
        Input Args  : topic message
        Output      : None
        """
        self.pose = msg

    def waypoints_cb(self, waypoints):
        """
        Function    : waypoint callback that populates the values in the waypoint class variable 
                      waypoints callback from the subscriber when the system receives the base waypoint
        
        Input Args  : waypoints
        Output      : None
        """
        self.waypoints = waypoints
        if not self.waypoints_2d:
            self.waypoints_2d = [[
                waypoint.pose.pose.position.x, waypoint.pose.pose.position.y
            ] for waypoint in waypoints.waypoints]
            self.waypoint_tree = KDTree(self.waypoints_2d)

    def traffic_cb(self, msg):
        """
        Function    : waypoint callback that populates 
        the values in the waypoint class variable 
        # traffic callback from the subscriber
        # when the system receives trafic light image(s) they are processed
        # if a traffic light is found in the image, it gets the closest waypoint 
        
        Input Args  : traffic image information
        Output      : None
        """
        self.lights = msg.lights
        if not self.use_camera and self.waypoints:
            light_wp, state = self.process_traffic_lights()

            if self.state != state:
                self.state_count = 0
                self.state = state
            elif self.state_count >= STATE_COUNT_THRESHOLD:
                self.last_state = self.state
                light_wp = light_wp if state == TrafficLight.RED else -1
                self.last_wp = light_wp
                self.upcoming_red_light_pub.publish(Int32(light_wp))
            else:
                self.upcoming_red_light_pub.publish(Int32(self.last_wp))
            self.state_count += 1

    def image_cb(self, msg):
        """
        Function    : image callback 
        This function identifies red lights in the input camera images
        imput variables: image from the camera on the car
        output : Publishes the closest waypoint index to the red light's stopping line to /traffic_waypoint
        There is no format return      
        
        Input Args  : traffic image information
        Output      : None
        """

        if self.use_camera and self.waypoints:
            if self.img_count >= IMG_COUNT_THRESHOLD:
                self.img_count = 0
                self.has_image = True
                self.camera_image = msg
                light_wp, state = self.process_traffic_lights()
                '''
                Publish upcoming red lights at camera frequency.
                Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
                of times till we start using it. Otherwise the previous stable state is
                used.
                '''
                if self.state != state:
                    self.state_count = 0
                    self.state = state
                elif self.state_count >= STATE_COUNT_THRESHOLD:
                    self.last_state = self.state
                    light_wp = light_wp if state == TrafficLight.RED else -1
                    #rospy.loginfo("Publishing new waypoint")
                    self.upcoming_red_light_pub.publish(Int32(light_wp))
                    self.last_wp = light_wp
                else:
                    rospy.loginfo("Publishing old waypoints")
                    self.upcoming_red_light_pub.publish(Int32(self.last_wp))
                self.state_count += 1
            else:
                self.img_count += 1

    def get_closest_waypoint(self, x, y):
        """
        Function    : 
        This function identifies the closest path waypoint of the car's given position
        Reference : https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
       
        Input Args  : position for waypoint matching
        Output      : index of the closest waypoint  
        """
        #TODO implement
        closest_wp_idx = None
        if self.waypoint_tree:
            closest_wp_idx = self.waypoint_tree.query([x, y], 1)[1]
        return closest_wp_idx

    def get_light_state(self, light):
        """
        Function    : 
        This function obtains the color of the traffic light
        
        Input Args  : TrafficLight : which gives the light to classify
        Output      : ID of traffic light color as given in styx_msgs/TrafficLight
        """

        if (not self.has_image):
            self.prev_light_loc = None
            rospy.loginfo("Traffic light not present in image")
            return light.state

        cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")
        return self.light_classifier.get_classification(
            cv_image, self.config['is_site'])

    def process_traffic_lights(self):
        """
        Function    : This function finds closest visible traffic light and 
                      determines its location and color and hence the state of the light
        
        Input Args  : None
        Output      : index of waypoint closest to the upcoming stop line 
                      for a traffic light (-1 if none exists)
        """

        closest_light = None
        line_wp_idx = None

        # List of positions that correspond to the line to stop in front of for a given intersection
        stop_line_positions = self.config['stop_line_positions']
        # default code uses pose directly.  But I wanted to try using the x,y coordinates for better performance
        if (self.pose):
            car_wp_idx = self.get_closest_waypoint(self.pose.pose.position.x,
                                                   self.pose.pose.position.y)
        else:
            car_wp_idx = 0

        #TODO find the closest visible traffic light (if one exists)
        # Find the number of waypoints
        wp_len = len(self.waypoints.waypoints)
        for i, light in enumerate(self.lights):
            # Get stop line waypoint index
            line = stop_line_positions[i]
            interim_wp_idx = self.get_closest_waypoint(line[0], line[1])
            # Find closest stop line waypoint index
            dist_wp = interim_wp_idx - car_wp_idx
            if dist_wp >= 0 and dist_wp < wp_len:
                wp_len = dist_wp
                closest_light = light
                line_wp_idx = interim_wp_idx

        if closest_light:
            if wp_len < 180:
                state = self.get_light_state(closest_light)
            else:
                state = TrafficLight.UNKNOWN
            return line_wp_idx, state

        return -1, TrafficLight.UNKNOWN
예제 #35
0
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector')
        self.simulator = True if rospy.get_param('~sim') == 1 else False

        self.pose = None
        self.waypoints = None
        self.camera_image = None
        self.lights = []
        self.bridge = CvBridge()
        self.light_classifier = TLClassifier(self.simulator)
        self.listener = tf.TransformListener()
        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.state_count = 0
        self.camera_image = None
        self.seq = 0
        self.count = 0
        self.misscount = 0.
        self.totcount = 0.
        self.sight = 75.  # cautionary distance
        self.wpAcquired = False
        self.lastCrop = None
        self.imageAcquired = False

        sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
        '''
        /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
        helps you acquire an accurate ground truth data source for the traffic light
        classifier by sending the current color state of all traffic lights in the
        simulator. When testing on the vehicle, the color state will not be available. You'll need to
        rely on the position of the light and the camera image to predict it.
        '''
        sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray,
                                self.traffic_cb)

        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)

        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint',
                                                      Int32,
                                                      queue_size=1)

        sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)
        #rospy.spin()
        # Operations loop to identify red lights in the incoming camera image and publishes
        # the index of the waypoint closest to the red light's stop line to /traffic_waypoint
        rate = rospy.Rate(PUBLISHING_RATE)

        while not rospy.is_shutdown():
            if self.imageAcquired:
                light_wp, state = self.process_traffic_lights()
                '''
                Publish upcoming red lights at PUBLISHING_RATE frequency.
                Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
                of times until we start using it. Otherwise the previous stable
                state is used.
                '''
                if self.state != state:
                    self.state_count = 0
                    self.state = state
                elif self.state_count >= STATE_COUNT_THRESHOLD:
                    self.last_state = self.state
                    light_wp = light_wp if state == TrafficLight.RED or state == TrafficLight.YELLOW else -1
                    self.last_wp = light_wp
                    self.upcoming_red_light_pub.publish(Int32(light_wp))
                else:
                    self.upcoming_red_light_pub.publish(Int32(self.last_wp))
                self.state_count += 1
            rate.sleep()

    def pose_cb(self, msg):
        self.pose = msg

    def waypoints_cb(self, msg):
        if not self.wpAcquired:
            self.wpAcquired = True
            self.waypoints = self.filterWaypoints(msg)
            if not self.simulator:
                self.sight = 16.  # adjust cautionary distance for site test
            print "cautionary distance", self.sight
            print self.waypoints[20].twist.twist.linear.x

    def traffic_cb(self, msg):
        self.lights = msg.lights

    def image_cb(self, msg):
        """Capture the inbound camera image
        Args:
            msg (Image): image from car-mounted camera
        """
        self.imageAcquired = True
        self.camera_image = msg

    def distance(self, pos1, pos2):
        return math.sqrt((pos1.position.x - pos2.position.x)**2 +
                         (pos1.position.y - pos2.position.y)**2)

    def get_closest_waypoint(self, pose):
        """Identifies the closest path waypoint to the given position
            https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        Args:
            pose (Pose): position to match a waypoint
        Returns:
            int: index of the closest waypoint in self.waypoints
        """
        index = -1  #Return if waypoints are empty
        if self.waypoints is None:
            return index

        lowDistance = 0
        #rospy.loginfo("len(wp): %d", len(self.waypoints))
        for i in range(len(self.waypoints)):
            distance = self.distance(pose, self.waypoints[i].pose.pose)

            if index == -1 or distance < lowDistance:
                index = i
                lowDistance = distance

        return index

    def get_light_state(self, light):
        """Determines the current color of the traffic light
        Args:
            light (TrafficLight): light to classify
        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)
        """
        if (not self.imageAcquired):
            self.prev_light_loc = None
            return TrafficLight.RED

        # fix camera encoding
        if hasattr(self.camera_image, 'encoding'):
            if self.camera_image.encoding == '8UC3':
                self.camera_image.encoding = "rgb8"
        else:
            self.camera_image.encoding = 'rgb8'
        cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "rgb8")

        # get classification (simulator classifies whole image)
        if self.simulator:
            return self.light_classifier.get_classification(cv_image)

        # get classification (test site detects boxes using SSD and classifies with OpenCV)
        # if previous location is not known, scan most of the image
        if self.lastCrop is None:
            gostop, found, location = self.light_classifier.get_classification(
                cv_image[0:500, 50:50 + 700])
            if found:
                # check y extents
                if location[0] < 150:
                    top = 0
                    bot = 300
                else:
                    top = location[0] - 150
                    if location[0] > 350:
                        top = 200
                        bot = 500
                    else:
                        bot = location[0] + 150

                # check x extents (remember, offset by 50)
                if location[1] + 50 < 150:
                    left = 50
                    right = 350
                else:
                    left = location[1] + 50 - 150
                    if location[1] + 50 > 600:
                        left = 450
                        right = 750
                    else:
                        right = location[1] + 50 + 150
                self.lastCrop = (top, bot, left, right)
                #print "first", self.lastCrop
            # (no else) TL not found, next cycle will be a complete search again
        # otherwise, use last known location as crop starting point
        else:
            (top, bot, left, right) = self.lastCrop
            gostop, found, location = self.light_classifier.get_classification(
                cv_image[top:bot, left:right])
            if found:  # determine crop for next cycle
                # check y extents, offset by top
                otop, oleft = top, left
                if location[0] + otop < 150:
                    top = 0
                    bot = 300
                else:
                    top = location[0] + otop - 150
                    if location[0] + otop > 350:
                        top = 200
                        bot = 500
                    else:
                        bot = location[0] + otop + 150

                # check x extents, offset by left
                if location[1] + oleft + 50 < 200:
                    left = 50
                    right = 350
                else:
                    left = location[1] + oleft + 50 - 150
                    if location[1] + oleft + 50 > 600:
                        left = 450
                        right = 750
                    else:
                        right = location[1] + oleft + 50 + 150

                self.lastCrop = (top, bot, left, right)
                #print "next", self.lastCrop
            else:
                self.lastCrop = None
                #print "none"
        return gostop

    def get_nearest_stop_line(self, waypoint_start_index):
        stop_line = None
        # List of positions that correspond to the line to stop in front of for a given intersection
        stop_line_positions = self.config['stop_line_positions']
        last_index = 99999

        for i in range(0, len(stop_line_positions)):
            stopline_pose = PoseStamped()
            stopline_pose.pose.position.x = float(stop_line_positions[i][0])
            stopline_pose.pose.position.y = float(stop_line_positions[i][1])
            index = self.get_closest_waypoint(stopline_pose.pose)
            if index > waypoint_start_index and index < last_index:
                last_index = index
                stop_line = stopline_pose

        return stop_line, last_index

    def process_traffic_lights(self):
        """Finds closest visible traffic light, if one exists, and determines its
            location and color
        Returns:
            int: index of waypoint closest to the upcoming stop line for a traffic light (-1 if none exists)
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)
        """
        if (self.pose):
            car_position = self.get_closest_waypoint(self.pose.pose)
            #rospy.loginfo("car_position: %d", car_position)
            if car_position > 0:
                stop_pos, stop_waypoint = self.get_nearest_stop_line(
                    car_position)

                if stop_pos is not None:
                    #rospy.loginfo("stop_pos x: %.2f, stop_waypoint: %d", stop_pos.pose.position.x, stop_waypoint)
                    state = TrafficLight.UNKNOWN

                    # if the traffic light is within sight, then attempt to classify
                    if self.distance(self.pose.pose,
                                     stop_pos.pose) < self.sight:

                        if CLASSIFIER_ENABLED:
                            state = self.get_light_state(None)
                            if self.lights is not None:
                                stateTruth = TrafficLight.UNKNOWN
                                for light in self.lights:
                                    # Get the ground truth from /vehicle/traffic_lights
                                    if self.distance(light.pose.pose,
                                                     stop_pos.pose) < 30.:
                                        stateTruth = light.state
                                        break
                                if (((stateTruth is 4) or
                                     (stateTruth is 2)) and
                                    (state is 0)) or (((stateTruth is 0) or
                                                       (stateTruth is 1)) and
                                                      (state is 4)):
                                    print "Classifier: ", state, " Truth: ", stateTruth
                                    state = stateTruth
                                    #self.saveImage(self.camera_image, stateTruth)
                                    self.misscount += 1.
                                self.totcount += 1.
                                #print "mismatch%: ", self.misscount / self.totcount
                                #self.saveImage(self.camera_image, stateTruth)
                        else:
                            for light in self.lights:
                                # This section uses only /vehicle/traffic_lights
                                if self.distance(light.pose.pose,
                                                 stop_pos.pose) < 30.:
                                    state = light.state
                                    #rospy.loginfo("light state: %d, x: %.2f, y: %.2f", state, self.pose.pose.position.x, self.pose.pose.position.y)
                                    #rospy.loginfo("dist tl to stop: %.2f", self.distance(light.pose.pose, stop_pos.pose))
                                    break
                        return stop_waypoint, state
        #self.waypoints = None
        return -1, TrafficLight.UNKNOWN

    def saveImage(self, img, state):
        #if self.count%2==0:
        # fix camera encoding
        if hasattr(img, 'encoding'):
            if img.encoding == '8UC3':
                img.encoding = "rgb8"
        else:
            img.encoding = 'rgb8'
        img = self.bridge.imgmsg_to_cv2(img, "rgb8")

        image_data = cv2.resize(img, (224, 224))
        img = PIL_Image.fromarray(image_data, 'RGB')
        #if state == TrafficLight.RED:
        #    img.save('/home/student/data/red/out'+str(self.seq).zfill(5)+'.png', 'PNG')
        #    self.seq += 1
        if state == TrafficLight.YELLOW:
            img.save(
                '/home/student/data/yellow/out' + str(self.seq).zfill(5) +
                '.png', 'PNG')
            self.seq += 1
        elif state == TrafficLight.GREEN:
            img.save(
                '/home/student/data/green/out' + str(self.seq).zfill(5) +
                '.png', 'PNG')
            self.seq += 1
        #else:
        #    img.save('/home/student/data/unknown/out'+str(self.seq).zfill(5)+'.png', 'PNG')
        #    self.seq += 1

    def filterWaypoints(self, wp):
        if wp.waypoints[0].pose.pose.position.x == 10.4062:
            waypoints = []
            path = rospy.get_param('~path')
            if not os.path.isfile(path):
                return wp.waypoints
            with open(path) as wfile:
                reader = csv.DictReader(wfile, ['x', 'y', 'z', 'yaw'])
                for wp in reader:
                    p = Waypoint()
                    p.pose.pose.position.x = float(wp['x'])
                    p.pose.pose.position.y = float(wp['y'])
                    p.pose.pose.position.z = float(wp['z'])
                    q = tf.transformations.quaternion_from_euler(
                        0., 0., float(wp['yaw']))
                    p.pose.pose.orientation = Quaternion(*q)
                    p.twist.twist.linear.x = 2.7777778
                    waypoints.append(p)
            return waypoints
        return wp.waypoints
예제 #36
0
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector')

        self.use_classifier = rospy.get_param('~use_classifier', False)
        print(("self.use_classifier:", self.use_classifier))

        self.current_pose = None
        self.base_waypoints = None

        self.has_image = False
        self.camera_image = None
        self.lights = []

        rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)

        '''
        /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
        helps you acquire an accurate ground truth data source for the traffic light
        classifier by sending the current color state of all traffic lights in the
        simulator. When testing on the vehicle, the color state will not be available. You'll need to
        rely on the position of the light and the camera image to predict it.
        '''
        rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
        #sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)
        self.sub_image = rospy.Subscriber('/image_color', Image, self.image_cb, queue_size=1) #, buff_size=4*52428800) 
        print("subscribed")

        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)

        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)

        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = NO_CAMERA_YET
        self.state_count = 0

        self.stop_waypoints = []

        self.car_wp = -1
        self.nlight_i = NO_STOP
        self.nlight_wp = NO_CAMERA_YET
        self.nlight = None

        self.last100 = -1

        self.initializing = True

        self.bridge = CvBridge()
        self.light_classifier = TLClassifier()
        self.listener = tf.TransformListener()

        self.loop()

    def loop(self):
        rate = rospy.Rate(1)
        while not rospy.is_shutdown():

            if not self.has_image:
                self.upcoming_red_light_pub.publish(Int32(NO_CAMERA_YET))

            else:                 
                if self.current_pose and self.base_waypoints and len(self.lights) > 0 and len(self.stop_waypoints) > 0:
                    if len(self.lights) != len(self.stop_waypoints):
                      print("We have different number of stops and lights!!!")
                      break

                    # we are near this point
                    self.car_wp = self.get_closest_waypoint(self.current_pose.pose)
                    if self.last100 != self.car_wp // 100:
                      self.last100 = self.car_wp // 100
                      print(("Car waypoint:", self.last100*100))
                   
                    self.nlight_i = self.find_next_stop(self.car_wp)

                    self.nlight_wp = NO_RED
                    self.nlight = None

                    if self.nlight_i >= 0 and (self.car_wp < self.stop_waypoints[self.nlight_i] + LOOK_BEHIND) and (self.car_wp > self.stop_waypoints[self.nlight_i] - LOOK_AHEAD):

                        self.nlight_wp = self.stop_waypoints[self.nlight_i]
                        self.nlight = self.lights[self.nlight_i]

                    if self.nlight_wp >= 0 and self.sub_image is None:

                        self.sub_image = rospy.Subscriber('/image_color', Image, self.image_cb, queue_size=1) #, buff_size=4*52428800) 
                        print("subscribed")

                    elif self.nlight_wp < 0 and self.sub_image is not None:

                        self.sub_image.unregister()
                        self.sub_image = None
                        print("unsubscribed")
                        self.last_wp = NO_RED
                        self.upcoming_red_light_pub.publish(Int32(NO_RED))

                else:
                    if not self.current_pose:
                        print("No pose!!!")
                    elif not self.base_waypoints:
                        print("No waypoints!!!")
                    elif len(self.lights) <= 0:
                        print("# of lights is zero!!!")
                    elif len(self.stop_waypoints) <= 0:
                        print("# of stops is zero!!!")

                    break

            rate.sleep()

    def pose_cb(self, msg):
        self.current_pose = msg

    def waypoints_cb(self, msg):
        self.base_waypoints = msg.waypoints;

        # List of positions that correspond to the line to stop in front of for a given intersection
        self.stop_positions = self.config['stop_line_positions']

        for stop_pos in self.stop_positions:
           stop_pose_stamped = self.create_dummy_pose(stop_pos[0], stop_pos[1], stop_pos[2], stop_pos[3]) if len(stop_pos) == 4 \
                               else self.create_dummy_pose(stop_pos[0], stop_pos[1])
           stop_wp = self.get_closest_waypoint(stop_pose_stamped.pose)
           self.stop_waypoints.append(stop_wp)

        print(self.stop_waypoints)

    def traffic_cb(self, msg):
        self.lights = msg.lights

    def image_cb(self, msg):
        """Identifies red lights in the incoming camera image and publishes the index
            of the waypoint closest to the red light's stop line to /traffic_waypoint

        Args:
            msg (Image): image from car-mounted camera

        """
        self.has_image = True
        self.camera_image = msg
        light_wp, state = self.process_traffic_lights()

        '''
        Publish upcoming red lights at camera frequency.
        Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
        of times till we start using it. Otherwise the previous stable state is
        used.
        '''
        if self.state != state:
            self.state_count = 0
            self.state = state
        elif self.state_count >= STATE_COUNT_THRESHOLD:
            self.last_state = self.state
            
            if self.initializing:
                light_wp = STILL_INIT
            else:
                light_wp = light_wp if state == TrafficLight.RED or state == TrafficLight.YELLOW else NO_RED

            if state == TrafficLight.RED:
              print(("nlight_i:", self.nlight_i, "RED", self.state_count, "car_wp:", self.car_wp, "nlight_wp:", self.nlight_wp))
            if state == TrafficLight.YELLOW:
              print(("nlight_i:", self.nlight_i, "YELLOW", self.state_count, "car_wp:", self.car_wp, "nlight_wp:", self.nlight_wp))

            self.last_wp = light_wp
            print(("light_wp", light_wp, "car_wp:", self.car_wp))
            self.upcoming_red_light_pub.publish(Int32(light_wp))
        else:
            print(("last_wp", self.last_wp))
            self.upcoming_red_light_pub.publish(Int32(self.last_wp))
        self.state_count += 1

    def create_dummy_pose(self, x, y, z=0., yaw=0.):
        pose = PoseStamped()

        pose.pose.position.x = x
        pose.pose.position.y = y
        pose.pose.position.z = z

        q = tf.transformations.quaternion_from_euler(0., 0., math.pi * yaw/180.)
        pose.pose.orientation = Quaternion(*q)

        return pose

    def find_next_stop(self, wp):
        for i in range(len(self.stop_waypoints)):
           if wp < self.stop_waypoints[i] + LOOK_BEHIND:
              return i
        return NO_STOP

    def distance_sq(self, p1, p2):
        x, y, z = p1.x - p2.x, p1.y - p2.y, p1.z - p2.z
        return x*x + y*y + z*z

    def get_closest_waypoint(self, pose):
        """Identifies the closest path waypoint to the given position
            https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        Args:
            pose (Pose): position to match a waypoint to

        Returns:
            int: index of the closest waypoint in self.base_waypoints

        """
        self_pos = pose.position
        distances = np.array([self.distance_sq(self_pos, way_pos.pose.pose.position) for way_pos in self.base_waypoints])
        return np.argmin(distances)

    def get_light_state(self, light):
        """Determines the current color of the traffic light

        Args:
            light (TrafficLight): light to classify

        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        #if(not self.has_image):
        #    self.prev_light_loc = None
        #    return TrafficLight.UNKNOWN

        cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")

        #Get classification
        return self.light_classifier.get_classification(cv_image)

    def process_traffic_lights(self):
        """Finds closest visible traffic light, if one exists, and determines its
            location and color

        Returns:
            int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        wp, color = STILL_INIT, TrafficLight.UNKNOWN

        if self.nlight_wp >= 0 and self.nlight:
            if self.use_classifier:
                wp, color = self.nlight_wp, self.get_light_state(self.nlight_wp)
            else:
                wp, color = self.nlight_wp, self.nlight.state

            if self.initializing:
                self.initializing = False

        elif self.initializing:
            wp, color = STILL_INIT, TrafficLight.UNKNOWN

        else:
            wp, color = NO_STOP, TrafficLight.UNKNOWN

        return wp, color
예제 #37
0
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector')

        self.current_pos = None
        self.waypoints = None
        self.waypoints_tree = None
        self.camera_image = None
        self.lights = []
        self.wpi_stop_pos_dict = dict()
        self.distance_to_stop = 0
        self.publishing_rate = 10.0 #50.0
        self.new_image = False
        self.tl_sight_dist = 50. # meters before sending images to classifier.
        self.is_simulator = True if rospy.get_param("~simulator") == 1 else False
        rospy.loginfo("SIMULATOR:{0}".format(self.is_simulator))

        rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)

        '''
        /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
        helps you acquire an accurate ground truth data source for the traffic light
        classifier by sending the current color state of all traffic lights in the
        simulator. When testing on the vehicle, the color state will not be available. You'll need to
        rely on the position of the light and the camera image to predict it.
        '''
        # subscribers and publishers.
        rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray,
                self.traffic_cb)
        rospy.Subscriber('/image_color', Image, self.image_cb)
        rospy.Subscriber('/vehicle/dbw_enabled', Bool, self.dbw_enabled_cb)
        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)
        # Stop light config
        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)

        self.bridge = CvBridge()
        model_path = 'light_classification/frozen_inference_graph.pb'
        if not self.is_simulator:
            model_path = 'light_classification/real_frozen_graph.pb'

        self.light_classifier = None
        if USE_CLASSIFIER:
            self.light_classifier = TLClassifier(model_path)
        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_tl_wp = -1
        self.state_count = 0
        self.dbw_enabled = False

        self.loop()

    def loop(self):
        '''
        Loop at sampling rate to publish detected traffic light indexs.
        '''

        rate = rospy.Rate(self.publishing_rate)
        while not rospy.is_shutdown():

            if not (self.new_image and self.current_pos and self.waypoints_tree
                    and self.dbw_enabled):
                rate.sleep()
                continue

            light_wp, state = self.process_traffic_lights()

            '''
            Publish upcoming red lights at camera frequency.
            Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
            of times till we start using it. Otherwise use previous stable
            state.
            '''

            if self.state != state:
                self.state_count = 0
                self.state = state
            elif self.state_count >= STATE_COUNT_THRESHOLD:
                self.last_state = self.state
                light_wp = light_wp if state == TrafficLight.RED else -1
                self.last_tl_wp = light_wp
                self.upcoming_red_light_pub.publish(Int32(light_wp))
            else:
                self.upcoming_red_light_pub.publish(Int32(self.last_tl_wp))
            self.state_count += 1

        rate.sleep()

    def pose_cb(self, msg):
        self.current_pos = msg.pose.position

    def waypoints_cb(self, msg):
        '''
        Callback for getting a list of waypoints.
        '''

        # Dont process further if we are getting similar waypoints.
        if self.waypoints and (sorted(self.waypoints) ==
                sorted(msg.waypoints)):
            return

        self.waypoints = msg.waypoints
        waypoints_2d = [[wp.pose.pose.position.x, wp.pose.pose.position.y]
                for wp in self.waypoints]
        self.waypoints_tree = KDTree(waypoints_2d)

        self.update_stop_light_waypoints()
        rospy.loginfo("Number of Waypoints received: {0}".format(
                len(self.waypoints)))

    def traffic_cb(self, msg):
        self.lights = msg.lights

    def image_cb(self, msg):
        """Identifies red lights in the incoming camera image and publishes the index
            of the waypoint closest to the red light's stop line to /traffic_waypoint

        Args:
            msg (Image): image from car-mounted camera

        """

        self.new_image = True
        self.camera_image = msg

    def dbw_enabled_cb(self, msg):
        '''
        Returns if vehicle is in DBW mode.
        '''
        self.dbw_enabled = msg.data

    def get_closest_waypoint(self, x, y):
        """Identifies the closest path waypoint to the given position
            https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        Args:
            pose (Pose): position to match a waypoint to

        Returns:
            int: index of the closest waypoint in self.waypoints

        """

        closest_id = self.waypoints_tree.query([x, y], 1)[1]
        return closest_id

    def get_light_state(self, light):
        """Determines the current color of the traffic light

        Args:
            light (TrafficLight): light to classify

        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """

        try:
            cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")
        except CvBridgeError as e:
            rospy.logwarn(e)
            return TrafficLight.UNKNOWN, 0

        traffic_class, score = self.light_classifier.get_classification(cv_image)

        return traffic_class, score

    def update_stop_light_waypoints(self):
        '''
        Map traffic lights to its closest waypoint index.
        '''

        sl_positions = self.config['stop_line_positions']
        for stop_pos in sl_positions:
            idx = self.get_closest_waypoint(stop_pos[0], stop_pos[1])
            self.wpi_stop_pos_dict[idx] = stop_pos

        rospy.loginfo("Stop line positions from config: {0}".
                format(self.wpi_stop_pos_dict))

    def process_traffic_lights(self):
        """Finds closest visible traffic light, if one exists, and determines its
            location and color

        Returns:
            int: index of waypoint closest to the upcoming stop line for a
            traffic light (-1 if none exists)
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        stop_pos = None

        # Traffic light and waypoints are fixed; or updated less frequently.
        # If * = Waypoints; # = Traffic light waypoints. Representation:
        #  * * * # * * # * * * * # * * * * # * * # * # ** * * * * # * *

        next_stop_line_wpi = -1
        car_wpi = self.get_closest_waypoint(self.current_pos.x,
                self.current_pos.y)

        # Get closest stopline waypoint to car position.
        if car_wpi < max(self.wpi_stop_pos_dict.keys()):
            next_stop_line_wpi = min([x for x in self.wpi_stop_pos_dict.keys()
                if x >= car_wpi])
        else: # Circular.
            next_stop_line_wpi = min(self.wpi_stop_pos_dict.keys())

        # Get the stop position corrosponding to the waypoint index.
        stop_pos = self.wpi_stop_pos_dict.values()[
                    self.wpi_stop_pos_dict.keys().index(next_stop_line_wpi)]
        dl = lambda x, y, x1, y1 : math.sqrt((x-x1)**2 + (y-y1)**2)
        self.distance_to_stop = dl(self.current_pos.x, self.current_pos.y,
                stop_pos[0], stop_pos[1])

        # Process TL images only if the car is in delta range.
        if self.distance_to_stop <= self.tl_sight_dist:
            if USE_CLASSIFIER:
                state, score = self.get_light_state(stop_pos)
                if state == TrafficLight.RED:
                    rospy.loginfo("Traffic light:{0}; Red, score:{1}".format(
                            state, score))
                    return next_stop_line_wpi, state
                elif (state == TrafficLight.YELLOW and
                        self.distance_to_stop >= MIN_STOPPING_DISTANCE):
                    # Treat as Red; stop tl.
                    state = TrafficLight.RED
                    rospy.loginfo("Traffic light:{0}; Yellow, score:{1}".format(
                            state, score))
                    return next_stop_line_wpi, state
                else:
                    return -1, state
            else:
                for light in self.lights:
                    if dl(light.pose.pose.position.x, light.pose.pose.position.y,
                            stop_pos[0], stop_pos[1]) <= 25.0:
                        return next_stop_line_wpi, light.state

        return -1, TrafficLight.UNKNOWN
예제 #38
0
class TLDetector(object):
    def __init__(self):
        rospy.init_node('tl_detector')

        self.pose = None
        self.waypoints = None
        self.camera_image = None
        self.lights = []

        sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
        sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)

        '''
        /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
        helps you acquire an accurate ground truth data source for the traffic light
        classifier by sending the current color state of all traffic lights in the
        simulator. When testing on the vehicle, the color state will not be available. You'll need to
        rely on the position of the light and the camera image to predict it.
        '''
        sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
        sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)

        config_string = rospy.get_param("/traffic_light_config")
        self.config = yaml.load(config_string)

        self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)

        self.bridge = CvBridge()
        self.light_classifier = TLClassifier()
        self.listener = tf.TransformListener()

        self.state = TrafficLight.UNKNOWN
        self.last_state = TrafficLight.UNKNOWN
        self.last_wp = -1
        self.state_count = 0

        rospy.spin()

    def pose_cb(self, msg):
        self.pose = msg

    def waypoints_cb(self, waypoints):
        self.waypoints = waypoints

    def traffic_cb(self, msg):
        self.lights = msg.lights

    def image_cb(self, msg):
        """Identifies red lights in the incoming camera image and publishes the index
            of the waypoint closest to the red light's stop line to /traffic_waypoint

        Args:
            msg (Image): image from car-mounted camera

        """
        self.has_image = True
        self.camera_image = msg
        light_wp, state = self.process_traffic_lights()

        '''
        Publish upcoming red lights at camera frequency.
        Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
        of times till we start using it. Otherwise the previous stable state is
        used.
        '''
        if self.state != state:
            self.state_count = 0
            self.state = state
        elif self.state_count >= STATE_COUNT_THRESHOLD:
            self.last_state = self.state
            light_wp = light_wp if state == TrafficLight.RED else -1
            self.last_wp = light_wp
            self.upcoming_red_light_pub.publish(Int32(light_wp))
        else:
            self.upcoming_red_light_pub.publish(Int32(self.last_wp))
        self.state_count += 1

    def get_closest_waypoint(self, pose):
        """Identifies the closest path waypoint to the given position
            https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
        Args:
            pose (Pose): position to match a waypoint to

        Returns:
            int: index of the closest waypoint in self.waypoints

        """
        #TODO implement
        return 0

    def get_light_state(self, light):
        """Determines the current color of the traffic light

        Args:
            light (TrafficLight): light to classify

        Returns:
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        if(not self.has_image):
            self.prev_light_loc = None
            return False

        cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")

        #Get classification
        return self.light_classifier.get_classification(cv_image)

    def process_traffic_lights(self):
        """Finds closest visible traffic light, if one exists, and determines its
            location and color

        Returns:
            int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
            int: ID of traffic light color (specified in styx_msgs/TrafficLight)

        """
        light = None

        # List of positions that correspond to the line to stop in front of for a given intersection
        stop_line_positions = self.config['stop_line_positions']
        if(self.pose):
            car_position = self.get_closest_waypoint(self.pose.pose)

        #TODO find the closest visible traffic light (if one exists)

        if light:
            state = self.get_light_state(light)
            return light_wp, state
        self.waypoints = None
        return -1, TrafficLight.UNKNOWN