def __init__(self):
        super().__init__('simple_trajectory')

        # Parameters
        self.length_m = self.declare_parameter("length_m", 20.0).value
        self.min_speed_mps = self.declare_parameter("min_speed_mps", 1.0).value
        self.max_speed_mps = self.declare_parameter("max_speed_mps", 8.0).value
        self.discretization_m = self.declare_parameter("discretization_m", 0.5).value
        self.speed_increments = self.declare_parameter("speed_increments", 0.2).value
        self.offset_distnace_m = self.declare_parameter("offset_distnace_m", 0.0).value
        self.one_time = self.declare_parameter("one_time", False).value
        self.timer_period_sec = self.declare_parameter("timer_period_sec", 0.0).value
        if self.max_speed_mps < self.min_speed_mps:
            self.max_speed_mps = self.min_speed_mps

        # Publisher / Subscriber
        qos = QoSProfile(depth=10)
        if (self.one_time):
            qos.durability = QoSDurabilityPolicy.RMW_QOS_POLICY_DURABILITY_TRANSIENT_LOCAL
            qos.depth = 1

        self.publisher_trajectory_ = self.create_publisher(
            Trajectory, "trajectory", qos
        )
        self.subscriber_vehicle_kinematic_state_ = self.create_subscription(
            VehicleKinematicState, "vehicle_kinematic_state",
            self.vehicle_kinematic_state_cb, 0
        )

        # Local variables
        self.trajectory_msg_ = None
        self.vehicle_kinematic_state_msg_ = None

        if (not self.one_time):
            self.timer = self.create_timer(self.timer_period_sec, self.publish_trajectory)
Esempio n. 2
0
def run_topic_listening(node, topic_monitor, options):
    """Subscribe to relevant topics and manage the data received from susbcriptions."""
    already_ignored_topics = set()
    while rclpy.ok():
        # Check if there is a new topic online
        # TODO(dhood): use graph events rather than polling
        topic_names_and_types = node.get_topic_names_and_types()

        for topic_name, type_names in topic_names_and_types:
            # Infer the appropriate QoS profile from the topic name
            topic_info = topic_monitor.get_topic_info(topic_name)
            if topic_info is None:
                # The topic is not for being monitored
                continue

            if len(type_names) != 1:
                if topic_name not in already_ignored_topics:
                    node.get_logger().info(
                        "Warning: ignoring topic '%s', which has more than one type: [%s]"
                        % (topic_name, ', '.join(type_names)))
                    already_ignored_topics.add(topic_name)
                continue

            type_name = type_names[0]
            if not topic_monitor.is_supported_type(type_name):
                if topic_name not in already_ignored_topics:
                    node.get_logger().info(
                        "Warning: ignoring topic '%s' because its message type (%s)"
                        'is not suported.' % (topic_name, type_name))
                    already_ignored_topics.add(topic_name)
                continue

            is_new_topic = topic_name and topic_name not in topic_monitor.monitored_topics
            if is_new_topic:
                # Register new topic with the monitor
                qos_profile = QoSProfile(depth=10)
                qos_profile.depth = QOS_DEPTH
                if topic_info['reliability'] == 'best_effort':
                    qos_profile.reliability = \
                        QoSReliabilityPolicy.BEST_EFFORT
                topic_monitor.add_monitored_topic(Header, topic_name, node,
                                                  qos_profile,
                                                  options.expected_period,
                                                  options.allowed_latency,
                                                  options.stale_time)

        # Wait for messages with a timeout, otherwise this thread will block any other threads
        # until a message is received
        rclpy.spin_once(node, timeout_sec=0.05)
Esempio n. 3
0
    def __init__(self):
        super().__init__('minimal_publisher')

        qos_at_start = QoSProfile()
        print("Configuring", end=',')
        qos_at_start.reliability = QoSReliabilityPolicy.RMW_QOS_POLICY_RELIABILITY_BEST_EFFORT
        qos_at_start.depth = 10
        qos_at_start.history = QoSHistoryPolicy.RMW_QOS_POLICY_HISTORY_KEEP_ALL
        qos_at_start.durability = QoSDurabilityPolicy.RMW_QOS_POLICY_DURABILITY_TRANSIENT_LOCAL
        print("Configured")
        #self.get_logger().info('Publishing: "%s"' % qos_profile_at_start)
        #print("  ", qos_at_start)
        self.publisher_ = self.create_publisher(String,
                                                'topic',
                                                qos_profile=qos_at_start)
        print(self.publisher_)
        timer_period = 0.5  # seconds
        self.timer = self.create_timer(timer_period, self.timer_callback)
        self.i = 0
Esempio n. 4
0
    def __init__(self):
        super().__init__('minimal_subscriber')

        qos_at_start = QoSProfile()

        qos_at_start.reliability = QoSReliabilityPolicy.RMW_QOS_POLICY_RELIABILITY_BEST_EFFORT
        #qos_at_start.reliability = QoSReliabilityPolicy.RMW_QOS_POLICY_RELIABILITY_RELIABLE

        qos_at_start.depth = 10
        qos_at_start.history = QoSHistoryPolicy.RMW_QOS_POLICY_HISTORY_KEEP_ALL
        qos_at_start.durability = QoSDurabilityPolicy.RMW_QOS_POLICY_DURABILITY_TRANSIENT_LOCAL
        self.group = ThrottledCallbackGroup(self)
        self.subscription = self.create_subscription(String,
                                                     'topic',
                                                     self.listener_callback,
                                                     qos_profile=qos_at_start,
                                                     callback_group=self.group)
        print(self.subscription)
        self.subscription  # prevent unused variable warning
    def __init__(self,
                 topic,
                 node_handle,
                 msg_type=None,
                 latched_client_id=None,
                 queue_size=100):
        """ Register a publisher on the specified topic.

        Keyword arguments:
        topic    -- the name of the topic to register the publisher to
        node_handle -- Handle to a rclpy node to create the publisher.
        msg_type -- (optional) the type to register the publisher as.  If not
        provided, an attempt will be made to infer the topic type
        latch    -- (optional) if a client requested this publisher to be latched,
                    provide the client_id of that client here

        Throws:
        TopicNotEstablishedException -- if no msg_type was specified by the
        caller and the topic is not yet established, so a topic type cannot
        be inferred
        TypeConflictException        -- if the msg_type was specified by the
        caller and the topic is established, and the established type is
        different to the user-specified msg_type

        """
        # First check to see if the topic is already established
        topics_names_and_types = dict(node_handle.get_topic_names_and_types())
        topic_type = topics_names_and_types.get(topic)

        # If it's not established and no type was specified, exception
        if msg_type is None and topic_type is None:
            raise TopicNotEstablishedException(topic)

        # topic_type is a list of types or None at this point; only one type is supported.
        if topic_type is not None:
            if len(topic_type) > 1:
                node_handle.get_logger().warning(
                    'More than one topic type detected: {}'.format(topic_type))
            topic_type = topic_type[0]

        # Use the established topic type if none was specified
        if msg_type is None:
            msg_type = topic_type

        # Load the message class, propagating any exceptions from bad msg types
        msg_class = ros_loader.get_message_class(msg_type)

        # Make sure the specified msg type and established msg type are same
        msg_type_string = msg_class_type_repr(msg_class)
        if topic_type is not None and topic_type != msg_type_string:
            raise TypeConflictException(topic, topic_type, msg_type_string)

        # Create the publisher and associated member variables
        self.clients = {}
        self.latched_client_id = latched_client_id
        self.topic = topic
        self.node_handle = node_handle
        self.msg_class = msg_class
        # Adding a lifespan solves the problem of late-joining subscribers
        # without the need of a custom message publisher implementation.
        publisher_qos = QoSProfile(depth=queue_size,
                                   durability=QoSDurabilityPolicy.
                                   RMW_QOS_POLICY_DURABILITY_TRANSIENT_LOCAL)

        # For latched clients, no lifespan has to be specified (i.e. latch forever).
        # Otherwise we want to keep the messages for a second to prevent late-joining subscribers from
        # missing messages.
        if latched_client_id is None:
            publisher_qos.lifespan = Duration(seconds=1)
        else:
            publisher_qos.depth = 1

        self.publisher = node_handle.create_publisher(
            msg_class, topic, qos_profile=publisher_qos)
Esempio n. 6
0
def main():
    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument(
        'data_name', nargs='?', default='topic1',
        help='Name of the data (must comply with ROS topic rules)')

    parser.add_argument(
        '--best-effort', action='store_true', default=False,
        help='Set QoS reliability option to "best effort"')

    parser.add_argument(
        '--transient-local', action='store_true', default=False,
        help='Set QoS durability option to "transient local"')

    parser.add_argument(
        '--depth', type=int, default=default_depth, help='Size of the QoS history depth')

    parser.add_argument(
        '--keep-all', action='store_true', default=False,
        help='Set QoS history option to "keep all" (unlimited depth, subject to resource limits)')

    parser.add_argument(
        '--payload-size', type=int, default=0, help='Size of data payload to send')

    parser.add_argument(
        '--period', type=float, default=0.5, help='Time in seconds between messages')

    parser.add_argument(
        '--end-after', type=int, help='Script will exit after publishing this many messages')

    args = parser.parse_args()

    reliability_suffix = '_best_effort' if args.best_effort else ''
    topic_name = '{0}_data{1}'.format(args.data_name, reliability_suffix)

    rclpy.init()
    node = rclpy.create_node('%s_pub' % topic_name)
    node_logger = node.get_logger()

    qos_profile = QoSProfile()

    if args.best_effort:
        node_logger.info('Reliability: best effort')
        qos_profile.reliability = QoSReliabilityPolicy.RMW_QOS_POLICY_RELIABILITY_BEST_EFFORT
    else:
        node_logger.info('Reliability: reliable')
        qos_profile.reliability = QoSReliabilityPolicy.RMW_QOS_POLICY_RELIABILITY_RELIABLE

    if args.keep_all:
        node_logger.info('History: keep all')
        qos_profile.history = QoSHistoryPolicy.RMW_QOS_POLICY_HISTORY_KEEP_ALL
    else:
        node_logger.info('History: keep last')
        qos_profile.history = QoSHistoryPolicy.RMW_QOS_POLICY_HISTORY_KEEP_LAST

    node_logger.info('Depth: {0}'.format(args.depth))
    qos_profile.depth = args.depth

    if args.transient_local:
        node_logger.info('Durability: transient local')
        qos_profile.durability = QoSDurabilityPolicy.RMW_QOS_POLICY_DURABILITY_TRANSIENT_LOCAL
    else:
        node_logger.info('Durability: volatile')
        qos_profile.durability = QoSDurabilityPolicy.RMW_QOS_POLICY_DURABILITY_VOLATILE

    data_pub = node.create_publisher(
        Header, topic_name, qos_profile=qos_profile)
    node_logger.info('Publishing on topic: {0}'.format(topic_name))

    node_logger.info('Payload size: {0}'.format(args.payload_size))
    data = 'a' * args.payload_size

    msg = Header()
    cycle_count = 0

    def publish_msg(val):
        msg.frame_id = '{0}_{1}'.format(val, data)
        data_pub.publish(msg)
        node_logger.info('Publishing: "{0}"'.format(val))

    while rclpy.ok():
        if args.end_after is not None and cycle_count >= args.end_after:
            publish_msg(-1)
            sleep(0.1)  # allow reliable messages to get re-sent if needed
            return

        publish_msg(cycle_count)
        cycle_count += 1

        try:
            sleep(args.period)
        except KeyboardInterrupt:
            publish_msg(-1)
            sleep(0.1)
            raise