コード例 #1
0
    def _wait_for_ready_callbacks(
        self,
        timeout_sec: float = None,
        nodes: List['Node'] = None,
        condition: Callable[[], bool] = lambda: False,
    ) -> Generator[Tuple[Task, WaitableEntityType, 'Node'], None, None]:
        """
        Yield callbacks that are ready to be executed.

        :raise TimeoutException: on timeout.
        :raise ShutdownException: on if executor was shut down.

        :param timeout_sec: Seconds to wait. Block forever if ``None`` or negative.
            Don't wait if 0.
        :param nodes: A list of nodes to wait on. Wait on all nodes if ``None``.
        :param condition: A callable that makes the function return immediately when it evaluates
            to True.
        """
        timeout_timer = None
        timeout_nsec = timeout_sec_to_nsec(timeout_sec)
        if timeout_nsec > 0:
            timeout_timer = Timer(None,
                                  None,
                                  timeout_nsec,
                                  self._clock,
                                  context=self._context)

        yielded_work = False
        while not yielded_work and not self._is_shutdown and not condition():
            # Refresh "all" nodes in case executor was woken by a node being added or removed
            nodes_to_use = nodes
            if nodes is None:
                nodes_to_use = self.get_nodes()

            # Yield tasks in-progress before waiting for new work
            tasks = None
            with self._tasks_lock:
                tasks = list(self._tasks)
            if tasks:
                for task, entity, node in reversed(tasks):
                    if (not task.executing() and not task.done()
                            and (node is None or node in nodes_to_use)):
                        yielded_work = True
                        yield task, entity, node
                with self._tasks_lock:
                    # Get rid of any tasks that are done
                    self._tasks = list(
                        filter(lambda t_e_n: not t_e_n[0].done(), self._tasks))

            # Gather entities that can be waited on
            subscriptions: List[Subscription] = []
            guards: List[GuardCondition] = []
            timers: List[Timer] = []
            clients: List[Client] = []
            services: List[Service] = []
            waitables: List[Waitable] = []
            for node in nodes_to_use:
                subscriptions.extend(
                    filter(self.can_execute, node.subscriptions))
                timers.extend(filter(self.can_execute, node.timers))
                clients.extend(filter(self.can_execute, node.clients))
                services.extend(filter(self.can_execute, node.services))
                node_guards = filter(self.can_execute, node.guards)
                waitables.extend(filter(self.can_execute, node.waitables))
                # retrigger a guard condition that was triggered but not handled
                for gc in node_guards:
                    if gc._executor_triggered:
                        gc.trigger()
                    guards.append(gc)
            if timeout_timer is not None:
                timers.append(timeout_timer)

            guards.append(self._guard)
            guards.append(self._sigint_gc)

            entity_count = NumberOfEntities(len(subscriptions), len(guards),
                                            len(timers), len(clients),
                                            len(services))

            # Construct a wait set
            wait_set = None
            with ExitStack() as context_stack:
                sub_handles = []
                for sub in subscriptions:
                    try:
                        context_stack.enter_context(sub.handle)
                        sub_handles.append(sub.handle)
                    except InvalidHandle:
                        entity_count.num_subscriptions -= 1

                client_handles = []
                for cli in clients:
                    try:
                        context_stack.enter_context(cli.handle)
                        client_handles.append(cli.handle)
                    except InvalidHandle:
                        entity_count.num_clients -= 1

                service_handles = []
                for srv in services:
                    try:
                        context_stack.enter_context(srv.handle)
                        service_handles.append(srv.handle)
                    except InvalidHandle:
                        entity_count.num_services -= 1

                timer_handles = []
                for tmr in timers:
                    try:
                        context_stack.enter_context(tmr.handle)
                        timer_handles.append(tmr.handle)
                    except InvalidHandle:
                        entity_count.num_timers -= 1

                guard_handles = []
                for gc in guards:
                    try:
                        context_stack.enter_context(gc.handle)
                        guard_handles.append(gc.handle)
                    except InvalidHandle:
                        entity_count.num_guard_conditions -= 1

                for waitable in waitables:
                    try:
                        context_stack.enter_context(waitable)
                        entity_count += waitable.get_num_entities()
                    except InvalidHandle:
                        pass

                context_stack.enter_context(self._context.handle)

                wait_set = _rclpy.WaitSet(
                    entity_count.num_subscriptions,
                    entity_count.num_guard_conditions, entity_count.num_timers,
                    entity_count.num_clients, entity_count.num_services,
                    entity_count.num_events, self._context.handle)

                wait_set.clear_entities()
                for sub_handle in sub_handles:
                    wait_set.add_subscription(sub_handle)
                for cli_handle in client_handles:
                    wait_set.add_client(cli_handle)
                for srv_capsule in service_handles:
                    wait_set.add_service(srv_capsule)
                for tmr_handle in timer_handles:
                    wait_set.add_timer(tmr_handle)
                for gc_handle in guard_handles:
                    wait_set.add_guard_condition(gc_handle)
                for waitable in waitables:
                    waitable.add_to_wait_set(wait_set)

                # Wait for something to become ready
                wait_set.wait(timeout_nsec)
                if self._is_shutdown:
                    raise ShutdownException()
                if not self._context.ok():
                    raise ExternalShutdownException()

                # get ready entities
                subs_ready = wait_set.get_ready_entities('subscription')
                guards_ready = wait_set.get_ready_entities('guard_condition')
                timers_ready = wait_set.get_ready_entities('timer')
                clients_ready = wait_set.get_ready_entities('client')
                services_ready = wait_set.get_ready_entities('service')

                # Mark all guards as triggered before yielding since they're auto-taken
                for gc in guards:
                    if gc.handle.pointer in guards_ready:
                        gc._executor_triggered = True

                # Check waitables before wait set is destroyed
                for node in nodes_to_use:
                    for wt in node.waitables:
                        # Only check waitables that were added to the wait set
                        if wt in waitables and wt.is_ready(wait_set):
                            handler = self._make_handler(
                                wt, node, lambda e: e.take_data(),
                                self._execute_waitable)
                            yielded_work = True
                            yield handler, wt, node

            # Process ready entities one node at a time
            for node in nodes_to_use:
                for tmr in node.timers:
                    if tmr.handle.pointer in timers_ready:
                        # Check timer is ready to workaround rcl issue with cancelled timers
                        if tmr.handle.is_timer_ready():
                            if tmr.callback_group.can_execute(tmr):
                                handler = self._make_handler(
                                    tmr, node, self._take_timer,
                                    self._execute_timer)
                                yielded_work = True
                                yield handler, tmr, node

                for sub in node.subscriptions:
                    if sub.handle.pointer in subs_ready:
                        if sub.callback_group.can_execute(sub):
                            handler = self._make_handler(
                                sub, node, self._take_subscription,
                                self._execute_subscription)
                            yielded_work = True
                            yield handler, sub, node

                for gc in node.guards:
                    if gc._executor_triggered:
                        if gc.callback_group.can_execute(gc):
                            handler = self._make_handler(
                                gc, node, self._take_guard_condition,
                                self._execute_guard_condition)
                            yielded_work = True
                            yield handler, gc, node

                for client in node.clients:
                    if client.handle.pointer in clients_ready:
                        if client.callback_group.can_execute(client):
                            handler = self._make_handler(
                                client, node, self._take_client,
                                self._execute_client)
                            yielded_work = True
                            yield handler, client, node

                for srv in node.services:
                    if srv.handle.pointer in services_ready:
                        if srv.callback_group.can_execute(srv):
                            handler = self._make_handler(
                                srv, node, self._take_service,
                                self._execute_service)
                            yielded_work = True
                            yield handler, srv, node

            # Check timeout timer
            if (timeout_nsec == 0
                    or (timeout_timer is not None
                        and timeout_timer.handle.pointer in timers_ready)):
                raise TimeoutException()
        if self._is_shutdown:
            raise ShutdownException()
        if condition():
            raise ConditionReachedException()
コード例 #2
0
ファイル: test_qos_event.py プロジェクト: asorbini/rclpy
    def test_call_subscription_rclpy_event_apis(self):
        # Go through the exposed apis and ensure that things don't explode when called
        # Make no assumptions about being able to actually receive the events
        subscription = self.node.create_subscription(EmptyMsg, self.topic_name,
                                                     Mock(), 10)
        with self.context.handle:
            wait_set = _rclpy.WaitSet(0, 0, 0, 0, 0, 3, self.context.handle)

        deadline_event_handle = self._create_event_handle(
            subscription, QoSSubscriptionEventType.
            RCL_SUBSCRIPTION_REQUESTED_DEADLINE_MISSED)
        with deadline_event_handle:
            deadline_event_index = wait_set.add_event(deadline_event_handle)
        self.assertIsNotNone(deadline_event_index)

        liveliness_event_handle = self._create_event_handle(
            subscription,
            QoSSubscriptionEventType.RCL_SUBSCRIPTION_LIVELINESS_CHANGED)
        with liveliness_event_handle:
            liveliness_event_index = wait_set.add_event(
                liveliness_event_handle)
        self.assertIsNotNone(liveliness_event_index)

        incompatible_qos_event_handle = self._create_event_handle(
            subscription, QoSSubscriptionEventType.
            RCL_SUBSCRIPTION_REQUESTED_INCOMPATIBLE_QOS)
        with incompatible_qos_event_handle:
            incompatible_qos_event_index = wait_set.add_event(
                incompatible_qos_event_handle)
        self.assertIsNotNone(incompatible_qos_event_index)

        # We live in our own namespace and have created no other participants, so
        # there can't be any of these events.
        wait_set.wait(0)
        self.assertFalse(wait_set.is_ready('event', deadline_event_index))
        self.assertFalse(wait_set.is_ready('event', liveliness_event_index))
        self.assertFalse(
            wait_set.is_ready('event', incompatible_qos_event_index))

        # Calling take data even though not ready should provide me an empty initialized message
        # Tests data conversion utilities in C side
        try:
            with deadline_event_handle:
                event_data = deadline_event_handle.take_event()
            self.assertIsInstance(event_data, QoSRequestedDeadlineMissedInfo)
            self.assertEqual(event_data.total_count, 0)
            self.assertEqual(event_data.total_count_change, 0)
        except NotImplementedError:
            pass

        try:
            with liveliness_event_handle:
                event_data = liveliness_event_handle.take_event()
            self.assertIsInstance(event_data, QoSLivelinessChangedInfo)
            self.assertEqual(event_data.alive_count, 0)
            self.assertEqual(event_data.alive_count_change, 0)
            self.assertEqual(event_data.not_alive_count, 0)
            self.assertEqual(event_data.not_alive_count_change, 0)
        except NotImplementedError:
            pass

        try:
            with incompatible_qos_event_handle:
                event_data = incompatible_qos_event_handle.take_event()
            self.assertIsInstance(event_data, QoSRequestedIncompatibleQoSInfo)
            self.assertEqual(event_data.total_count, 0)
            self.assertEqual(event_data.total_count_change, 0)
            self.assertEqual(event_data.last_policy_kind,
                             QoSPolicyKind.INVALID)
        except NotImplementedError:
            pass

        self.node.destroy_subscription(subscription)
コード例 #3
0
ファイル: test_qos_event.py プロジェクト: ksuszka/rclpy
    def test_call_publisher_rclpy_event_apis(self):
        # Go through the exposed apis and ensure that things don't explode when called
        # Make no assumptions about being able to actually receive the events
        publisher = self.node.create_publisher(EmptyMsg, self.topic_name, 10)
        with self.context.handle:
            wait_set = _rclpy.WaitSet(0, 0, 0, 0, 0, 3, self.context.handle)

        deadline_event_handle = self._create_event_handle(
            publisher,
            QoSPublisherEventType.RCL_PUBLISHER_OFFERED_DEADLINE_MISSED)
        with deadline_event_handle:
            deadline_event_index = wait_set.add_event(deadline_event_handle)
        self.assertIsNotNone(deadline_event_index)

        liveliness_event_handle = self._create_event_handle(
            publisher, QoSPublisherEventType.RCL_PUBLISHER_LIVELINESS_LOST)
        with liveliness_event_handle:
            liveliness_event_index = wait_set.add_event(
                liveliness_event_handle)
        self.assertIsNotNone(liveliness_event_index)

        try:
            incompatible_qos_event_handle = self._create_event_handle(
                publisher,
                QoSPublisherEventType.RCL_PUBLISHER_OFFERED_INCOMPATIBLE_QOS)
            with incompatible_qos_event_handle:
                incompatible_qos_event_index = wait_set.add_event(
                    incompatible_qos_event_handle)
            self.assertIsNotNone(incompatible_qos_event_index)
        except UnsupportedEventTypeError:
            self.assertTrue(self.is_fastrtps)

        # We live in our own namespace and have created no other participants, so
        # there can't be any of these events.
        wait_set.wait(0)
        self.assertFalse(wait_set.is_ready('event', deadline_event_index))
        self.assertFalse(wait_set.is_ready('event', liveliness_event_index))
        if not self.is_fastrtps:
            self.assertFalse(
                wait_set.is_ready('event', incompatible_qos_event_index))

        # Calling take data even though not ready should provide me an empty initialized message
        # Tests data conversion utilities in C side
        try:
            with deadline_event_handle:
                event_data = deadline_event_handle.take_event()
            self.assertIsInstance(event_data, QoSOfferedDeadlineMissedInfo)
            self.assertEqual(event_data.total_count, 0)
            self.assertEqual(event_data.total_count_change, 0)
        except NotImplementedError:
            pass

        try:
            with liveliness_event_handle:
                event_data = liveliness_event_handle.take_event()
            self.assertIsInstance(event_data, QoSLivelinessLostInfo)
            self.assertEqual(event_data.total_count, 0)
            self.assertEqual(event_data.total_count_change, 0)
        except NotImplementedError:
            pass

        if not self.is_fastrtps:
            try:
                with incompatible_qos_event_handle:
                    event_data = incompatible_qos_event_handle.take_event()
                self.assertIsInstance(event_data,
                                      QoSOfferedIncompatibleQoSInfo)
                self.assertEqual(event_data.total_count, 0)
                self.assertEqual(event_data.total_count_change, 0)
                self.assertEqual(event_data.last_policy_kind,
                                 QoSPolicyKind.INVALID)
            except NotImplementedError:
                pass

        self.node.destroy_publisher(publisher)