Esempio n. 1
0
    def update_nodelta(self, params_dt):
        """
        Running updates of nothing (workaround until params are handled by cache)
        :param params_dt:
        :return:
        """
        # we re NOT done here, we might still need to update params
        params_if_dt = self.params_if_pool.update_delta(params_dt=params_dt)

        topic_types_dt = DiffTuple(
            added=[],
            removed=[]  # shouldnt matter
        )

        services_if_dt = DiffTuple([], [])
        # TMP : NOT dropping topics early (just be patient and wait for the cache callback to come...)
        # topics_if_dt = self.topics_pool.update_delta(topics_dt, topic_types_dt)
        subscribers_if_dt = DiffTuple([], [])
        publishers_if_dt = DiffTuple([], [])

        # and here we need to return to not do the normal full update
        dt = DiffTuple(added=params_if_dt.added + services_if_dt.added +
                       subscribers_if_dt.added + publishers_if_dt.added,
                       removed=params_if_dt.removed + services_if_dt.removed +
                       subscribers_if_dt.removed + publishers_if_dt.removed)

        self._debug_logger.debug("""
                            ROS INTERFACE DIFF ADDED : {dt.added}
                            ROS INTERFACE DIFF REMOVED : {dt.removed}
                        """.format(**locals()))

        return dt
    def compute_state(self, subscribers_dt, topic_types_dt):
        """
        This is called only if there is a cache proxy with a callback, and expects DiffTuple filled up with names or types
        :param topics_dt:
        :return:
        """
        added_subs = {t[0]: t[1] for t in subscribers_dt.added}
        removed_subs = {t[0]: t[1] for t in subscribers_dt.removed}

        for apn, ap in added_subs.iteritems():
            for rpn, rp in removed_subs.iteritems():
                if rp in ap:  # remove nodes that are added and removed -> no change seen
                    ap.remove(rp)
                    removed_subs[rpn].remove(rp)

        for rpn, rp in removed_subs.iteritems():
            for apn, ap in added_subs.iteritems():
                if ap in rp:  # remove nodes that are removed and added -> no change seen
                    rp.remove(ap)
                    added_subs[apn].remove(ap)

        subscribers_dt = DiffTuple(
            added=[[k, v] for k, v in added_subs.iteritems()],
            removed=[[k, v] for k, v in removed_subs.iteritems()])
        computed_subscribers_dt = DiffTuple([], [])
        _logger.debug("removed_subs_dt : {subscribers_dt}".format(**locals()))
        for t in subscribers_dt.added:
            tt = next(
                ifilter(lambda ltt: t[0] == ltt[0], topic_types_dt.added), [])
            ttp = TopicTuple(name=t[0],
                             type=tt[1] if len(tt) > 0 else None,
                             endpoints=set(t[1]))
            if ttp.name in self.available:
                # if already available, we only update the endpoints list
                self.available[ttp.name].endpoints |= ttp.endpoints
                # no change here, no need to add that topic to the computed diff.
            else:
                self.available[ttp.name] = ttp
                computed_subscribers_dt.added.append(t[0])

        for t in subscribers_dt.removed:
            tt = next(
                ifilter(lambda ltt: t[0] == ltt[0], topic_types_dt.removed),
                [])
            ttp = TopicTuple(name=t[0],
                             type=tt[1] if len(tt) > 0 else None,
                             endpoints=set(t[1]))
            if ttp.name in self.available:
                self.available[ttp.name].endpoints -= ttp.endpoints
                if not self.available[ttp.name].endpoints:
                    self.available.pop(ttp.name, None)
                    computed_subscribers_dt.removed.append(t[0])

        # We still need to return DiffTuples
        return computed_subscribers_dt
        def appear_disappear():
            # create the publisher and then try exposing the topic again, simulating
            # it coming online before expose call.
            nonexistent_pub = rospy.Subscriber(topicname, Empty, queue_size=1)

            with Timeout(5) as t:
                dt = DiffTuple([], [])
                while not t.timed_out and nonexistent_pub.resolved_name not in dt.added:
                    subscribers, topic_types = self.get_system_state()
                    dt = self.subscriber_if_pool.update(
                        subscribers, topic_types)
                    self.assertEqual(dt.removed, [])  # nothing removed
                    time.sleep(0.1)  # to avoid spinning out of control

            self.assertTrue(not t.timed_out)
            self.assertTrue(nonexistent_pub.resolved_name
                            in dt.added)  # detected
            # TODO : do we need a test with subscriber ?

            # every added topic should be in the list of args
            self.assertTrue(
                topicname in self.subscriber_if_pool.subscribers_args)
            # topic backend has been created
            self.assertTrue(
                topicname in self.subscriber_if_pool.subscribers.keys())

            # up to here possible sequences should have been already tested by previous tests
            # Now comes our actual disappearance / withholding test
            nonexistent_pub.unregister()

            # every added topic should be in the list of args
            self.assertTrue(
                topicname in self.subscriber_if_pool.subscribers_args)
            # the backend should STILL be there
            self.assertTrue(
                topicname in self.subscriber_if_pool.subscribers.keys())
            # Note the Topic implementation should take care of possible errors in this case

            with Timeout(5) as t:
                dt = DiffTuple([], [])
                while not t.timed_out and topicname not in dt.removed:
                    subscribers, topic_types = self.get_system_state()
                    dt = self.subscriber_if_pool.update(
                        subscribers, topic_types)
                    self.assertEqual(dt.added, [])  # nothing added
                    time.sleep(0.1)  # to avoid spinning out of control

            self.assertTrue(not t.timed_out)
            self.assertTrue(topicname in dt.removed)  # detected lost
            # every exposed topic should remain in the list of args ( in case regex match another topic )
            self.assertTrue(
                topicname in self.subscriber_if_pool.subscribers_args)
            # make sure the topic backend should NOT be there any longer
            self.assertTrue(
                topicname not in self.subscriber_if_pool.subscribers.keys())
    def test_service_update_disappear_withhold(self):
        """
        Test service exposing functionality for a service which already exists in
        the ros environment. Simple Normal usecase
        Sequence : UPDATE -> DISAPPEAR -> WITHHOLD
        :return:
        """

        servicename = '/test/absentsrv1'
        # every added service should be in the list of args
        self.assertTrue(servicename not in self.service_if_pool.services_args)
        # the backend should not have been created
        self.assertTrue(servicename not in self.service_if_pool.services.keys())

        self.service_if_pool.expose_services([servicename])
        # every added service should be in the list of args
        self.assertTrue(servicename in self.service_if_pool.services_args)
        # service backend has not been created
        self.assertTrue(servicename not in self.service_if_pool.services.keys())

        dt = DiffTuple([], [])
        # create the service and then try updating again, simulating
        # it coming online after expose call.
        nonexistent_srv = rospy.Service(servicename, EmptySrv, srv_cb)
        try:
            # wait here until service actually appear in cache proxy
            with Timeout(5) as t:
                while not t.timed_out and nonexistent_srv.resolved_name not in dt.added:
                    services, service_types = self.get_system_state()
                    dt = self.service_if_pool.update(services, service_types)
                    self.assertEqual(dt.removed, [])  # nothing removed
                    time.sleep(0.1)  # to avoid spinning out of control

            self.assertTrue(not t.timed_out)
            self.assertTrue(nonexistent_srv.resolved_name in dt.added)  # nonexistent_srv added
            self.assertEqual(dt.removed, [])  # nothing removed

            # every added service should be in the list of args
            self.assertTrue(servicename in self.service_if_pool.services_args)
            # service backend has been created
            self.assertTrue(servicename in self.service_if_pool.services.keys())

        # up to here possible sequences should have been already tested by previous tests
        # Now comes our actual disappearance / withholding test
        finally:
            nonexistent_srv.shutdown('testing disappearing service')

        # every added service should be in the list of args
        self.assertTrue(servicename in self.service_if_pool.services_args)
        # the backend should STILL be there
        self.assertTrue(servicename in self.service_if_pool.services.keys())
        # Note the service implementation should take care of possible errors in this case

        self.service_if_pool.expose_services([])
        # every withhold service should NOT be in the list of args
        self.assertTrue(servicename not in self.service_if_pool.services_args)
        # service backend should NOT be there any longer
        self.assertTrue(servicename not in self.service_if_pool.services.keys())
    def test_service_expose_appear_update(self):
        """
        Test basic service adding functionality for a service which does not yet exist
        in the ros environment ( + corner cases )
        Sequence : (UPDATE? ->) -> EXPOSE -> (UPDATE? ->) APPEAR -> UPDATE
        :return:
        """
        servicename = '/test/absentsrv1'
        # every added service should be in the list of args
        self.assertTrue(servicename not in self.service_if_pool.services_args)
        # the backend should not have been created
        self.assertTrue(servicename not in self.service_if_pool.services.keys())
        # First update should not change state
        services, service_types = self.get_system_state()
        dt = self.service_if_pool.update(services, service_types)
        self.assertEqual(dt.added, [])  # nothing added
        self.assertEqual(dt.removed, [])  # nothing removed
        # every added service should be in the list of args
        self.assertTrue(servicename not in self.service_if_pool.services_args)
        # the backend should not have been created
        self.assertTrue(servicename not in self.service_if_pool.services.keys())

        self.service_if_pool.expose_services([servicename])
        # every added service should be in the list of args
        self.assertTrue(servicename in self.service_if_pool.services_args)
        # the backend should not have been created
        self.assertTrue(servicename not in self.service_if_pool.services.keys())
        services, service_types = self.get_system_state()
        dt = self.service_if_pool.update(services, service_types)
        self.assertEqual(dt.added, [])  # nothing added
        self.assertEqual(dt.removed, [])  # nothing removed
        # make sure the service is STILL in the list of args
        self.assertTrue(servicename in self.service_if_pool.services_args)
        # make sure the service backend has STILL not been created
        self.assertTrue(servicename not in self.service_if_pool.services.keys())

        # create the service and then try updating again, simulating
        # it coming online after expose call.
        nonexistent_srv = rospy.Service(servicename, EmptySrv, srv_cb)
        try:
            with Timeout(5) as t:
                dt = DiffTuple([], [])
                while not t.timed_out and nonexistent_srv.resolved_name not in dt.added:
                    services, service_types = self.get_system_state()
                    dt = self.service_if_pool.update(services, service_types)
                    self.assertEqual(dt.removed, [])  # nothing removed
                    time.sleep(0.1)  # to avoid spinning out of control

            self.assertTrue(not t.timed_out)
            self.assertTrue(nonexistent_srv.resolved_name in dt.added)  # nonexistent_srv added
            # every exposed service should remain in the list of args ( in case regex match another service )
            self.assertTrue(servicename in self.service_if_pool.services_args)
            # make sure the service backend has been created
            self.assertTrue(servicename in self.service_if_pool.services.keys())
        finally:
            nonexistent_srv.shutdown('testing complete')
Esempio n. 6
0
    def test_param_expose_appear_update(self):
        """
        Test basic param adding functionality for a param which does not yet exist
        in the ros environment ( + corner cases )
        Sequence : (UPDATE? ->) -> EXPOSE -> (UPDATE? ->) APPEAR -> UPDATE
        :return:
        """
        paramname = '/test/absentparam1'
        # every added param should be in the list of args
        self.assertTrue(paramname not in self.param_if_pool.params_args)
        # the backend should not have been created
        self.assertTrue(paramname not in self.param_if_pool.params.keys())
        # First update should not change state
        params = self.get_system_state()
        dt = self.param_if_pool.update(params)
        self.assertEqual(dt.added, [])  # nothing added
        self.assertEqual(dt.removed, [])  # nothing removed
        # every added param should be in the list of args
        self.assertTrue(paramname not in self.param_if_pool.params_args)
        # the backend should not have been created
        self.assertTrue(paramname not in self.param_if_pool.params.keys())

        self.param_if_pool.expose_params([paramname])
        # every added param should be in the list of args
        self.assertTrue(paramname in self.param_if_pool.params_args)
        # the backend should not have been created
        self.assertTrue(paramname not in self.param_if_pool.params.keys())
        params = self.get_system_state()
        dt = self.param_if_pool.update(params)
        self.assertEqual(dt.added, [])  # nothing added
        self.assertEqual(dt.removed, [])  # nothing removed
        # make sure the param is STILL in the list of args
        self.assertTrue(paramname in self.param_if_pool.params_args)
        # make sure the param backend has STILL not been created
        self.assertTrue(paramname not in self.param_if_pool.params.keys())

        # create the param and then try updating again, simulating
        # it coming online after expose call.
        rospy.set_param(paramname, 'param_value')
        try:
            with Timeout(5) as t:
                dt = DiffTuple([], [])
                while not t.timed_out and paramname not in dt.added:
                    params = self.get_system_state()
                    dt = self.param_if_pool.update(params)
                    self.assertEqual(dt.removed, [])  # nothing removed
                    time.sleep(0.1)  # to avoid spinning out of control

            self.assertTrue(not t.timed_out)
            self.assertTrue(paramname in dt.added)  # nonexistent_srv added
            # every exposed param should remain in the list of args ( in case regex match another service )
            self.assertTrue(paramname in self.param_if_pool.params_args)
            # make sure the param backend has been created
            self.assertTrue(paramname in self.param_if_pool.params.keys())
        finally:
            rospy.delete_param(paramname)
Esempio n. 7
0
    def update_fullstate(self, publishers, subscribers, services, params,
                         topic_types, service_types):
        # NORMAL full update
        self._debug_logger.debug("""SYSTEM STATE :
                    - publishers : {publishers}
                    - subscribers : {subscribers}
                    - services : {services}
                    - params : {params}
                    - topic_types : {topic_types}
                    - service_types : {service_types}
                """.format(**locals()))

        # TODO : unify with the reset behavior in case of cache...

        # Needs to be done first, since topic algorithm depends on it
        # print("PARAMS : {params}".format(**locals()))
        params_if_dt = self.params_if_pool.update(params=params)
        # print("PARAM IF DT : {params_if_dt}".format(**locals()))

        # print("SERVICES : {services}".format(**locals()))
        services_if_dt = self.services_if_pool.update(services, service_types)
        # print("SERVICE IF DT : {services_if_dt}".format(**locals()))

        # print("SUBSCRIBERS : {subscribers}".format(**locals()))
        subscribers_if_dt = self.subscribers_if_pool.update(
            subscribers, topic_types)
        # print("SUBSCRIBER IF DT : {subscribers_if_dt}".format(**locals()))

        # print("PUBLISHERS : {publishers}".format(**locals()))
        publishers_if_dt = self.publishers_if_pool.update(
            publishers, topic_types)
        # print("PUBLISHER IF DT : {publishers_if_dt}".format(**locals()))

        dt = DiffTuple(added=params_if_dt.added + services_if_dt.added +
                       subscribers_if_dt.added + publishers_if_dt.added,
                       removed=params_if_dt.removed + services_if_dt.removed +
                       subscribers_if_dt.removed + publishers_if_dt.removed)

        self._debug_logger.debug("""
                    ROS INTERFACE ADDED : {dt.added}
                    ROS INTERFACE REMOVED : {dt.removed}
                """.format(**locals()))

        return dt
    def test_subscriber_appear_expose_update(self):
        """
        Test topic exposing functionality for a topic which already exists in
        the ros environment. Simple Normal usecase
        Sequence : APPEAR -> EXPOSE -> UPDATE
        :return:
        """
        topicname = '/test/string'
        self.subscriber_if_pool.expose_subscribers([topicname])
        # every added topic should be in the list of args
        self.assertTrue(topicname in self.subscriber_if_pool.subscribers_args)
        # topic backend has not been created since the update didn't run yet
        self.assertTrue(
            topicname not in self.subscriber_if_pool.subscribers.keys())

        dt = DiffTuple([], [])
        # NOTE : We need to wait to make sure the tests nodes are started...
        with Timeout(5) as t:
            while not t.timed_out and topicname not in dt.added:
                subscribers, topic_types = self.get_system_state()
                dt = self.subscriber_if_pool.update(subscribers, topic_types)
                time.sleep(0.1)  # to avoid spinning out of control

        self.assertTrue(not t.timed_out)
        self.assertTrue(topicname in dt.added)  # has been detected

        # every exposed topic should remain in the list of args ( in case regex match another topic )
        self.assertTrue(topicname in self.subscriber_if_pool.subscribers_args)
        # make sure the topic backend has been created
        self.assertTrue(
            topicname in self.subscriber_if_pool.subscribers.keys())

        # cleaning up
        self.subscriber_if_pool.expose_subscribers([])
        # every added topic should be in the list of args
        self.assertTrue(
            topicname not in self.subscriber_if_pool.subscribers_args)
        # topic backend has not been created since the update didn't run yet
        self.assertTrue(
            topicname not in self.subscriber_if_pool.subscribers.keys())
Esempio n. 9
0
    def compute_state(self, params_dt):
        """
        called to update params from rospy.
        CAREFUL : this can be called from another thread (subscriber callback)
        """

        computed_params_dt = DiffTuple([], [])
        for p in params_dt.added:
            pt = ParamTuple(name=p, type=None)
            if pt.name in self.available:
                if self.available[pt.name].type is None or pt.type is not None:
                    self.available[pt.name].type = pt.type
            else:
                self.available[pt.name] = pt
                computed_params_dt.added.append(pt.name)

        for p in params_dt.removed:
            pt = ParamTuple(name=p, type=None)
            if pt.name in self.available:
                self.available.pop(pt.name, None)
                computed_params_dt.removed.append(pt.name)

        return computed_params_dt
Esempio n. 10
0
    def test_publisher_update_disappear_withhold(self):
        """
        Test topic exposing functionality for a topic which already exists in
        the ros environment. Simple Normal usecase
        Sequence : UPDATE -> DISAPPEAR -> WITHHOLD
        :return:
        """

        topicname = '/test/nonexistent5'
        # every added topic should be in the list of args
        self.assertTrue(
            topicname not in self.publisher_if_pool.publishers_args)
        # the backend should not have been created
        self.assertTrue(
            topicname not in self.publisher_if_pool.publishers.keys())
        # First update should not change state
        publishers, topic_types = self.get_system_state()
        dt = self.publisher_if_pool.update(publishers, topic_types)
        self.assertEqual(dt.added, [])  # nothing added
        self.assertEqual(dt.removed, [])  # nothing removed
        # every added topic should be in the list of args
        self.assertTrue(
            topicname not in self.publisher_if_pool.publishers_args)
        # the backend should not have been created
        self.assertTrue(
            topicname not in self.publisher_if_pool.publishers.keys())

        dt = self.publisher_if_pool.expose_publishers([topicname])
        self.assertEqual(dt.added, [])  # nothing added yet ( not existing )
        self.assertEqual(dt.removed, [])  # nothing removed
        # every added topic should be in the list of args
        self.assertTrue(topicname in self.publisher_if_pool.publishers_args)
        # topic backend has not been created
        self.assertTrue(
            topicname not in self.publisher_if_pool.publishers.keys())

        # create the publisher and then try exposing the topic again, simulating
        # it coming online before expose call.
        nonexistent_pub = rospy.Publisher(topicname, Empty, queue_size=1)

        with Timeout(5) as t:
            dt = DiffTuple([], [])
            while not t.timed_out and nonexistent_pub.resolved_name not in dt.added:
                publishers, topic_types = self.get_system_state()
                dt = self.publisher_if_pool.update(publishers, topic_types)
                self.assertEqual(dt.removed, [])  # nothing removed
                time.sleep(0.1)  # to avoid spinning out of control

        self.assertTrue(not t.timed_out)
        self.assertTrue(nonexistent_pub.resolved_name
                        in dt.added)  # added now because it just appeared
        self.assertEqual(dt.removed, [])  # nothing removed
        # TODO : do we need a test with subscriber ?

        # every added topic should be in the list of args
        self.assertTrue(topicname in self.publisher_if_pool.publishers_args)
        # topic backend has been created
        self.assertTrue(topicname in self.publisher_if_pool.publishers.keys())

        # up to here possible sequences should have been already tested by previous tests
        # Now comes our actual disappearrence / withholding test

        nonexistent_pub.unregister(
        )  # https://github.com/ros/ros_comm/issues/111 ( topic is still registered on master... )
        # TODO : test disappear ( how ? )

        # every added topic should be in the list of args
        self.assertTrue(topicname in self.publisher_if_pool.publishers_args)
        # the backend should STILL be there
        self.assertTrue(topicname in self.publisher_if_pool.publishers.keys())
        # Note the Topic implementation should take care of possible errors in this case

        self.publisher_if_pool.expose_publishers([])
        # every withhold topic should NOT be in the list of args
        self.assertTrue(
            topicname not in self.publisher_if_pool.publishers_args)
        # topic backend should NOT be there any longer
        self.assertTrue(
            topicname not in self.publisher_if_pool.publishers.keys())
    def test_service_withhold_update_disappear(self):
        """
        Test service witholding functionality for a service which doesnt exists anymore in
        the ros environment. Normal usecase.
        Sequence : (-> UPDATE ?) -> WITHHOLD -> UPDATE -> DISAPPEAR (-> UPDATE ?)
        :return:
        """
        servicename = '/test/absentsrv1'
        # every added service should be in the list of args
        self.assertTrue(servicename not in self.service_if_pool.services_args)
        # the backend should not have been created
        self.assertTrue(servicename not in self.service_if_pool.services.keys())

        self.service_if_pool.expose_services([servicename])
        # every added service should be in the list of args
        self.assertTrue(servicename in self.service_if_pool.services_args)
        # service backend has NOT been created yet
        self.assertTrue(servicename not in self.service_if_pool.services.keys())

        dt = DiffTuple([], [])
        # create the service and then try updating again, simulating
        # it coming online after expose call.
        nonexistent_srv = rospy.Service(servicename, EmptySrv, srv_cb)
        try:
            # wait here until service actually appear in cache proxy
            with Timeout(5) as t:
                while not t.timed_out and nonexistent_srv.resolved_name not in dt.added:
                    services, service_types = self.get_system_state()
                    dt = self.service_if_pool.update(services, service_types)
                    self.assertEqual(dt.removed, [])  # nothing removed
                    time.sleep(0.1)  # to avoid spinning out of control

            self.assertTrue(not t.timed_out)
            self.assertTrue(nonexistent_srv.resolved_name in dt.added)  # nonexistent_srv added
            self.assertEqual(dt.removed, [])  # nothing removed

            # every withhold service should STILL be in the list of args
            self.assertTrue(servicename in self.service_if_pool.services_args)
            # service backend has been created
            self.assertTrue(servicename in self.service_if_pool.services.keys())

            dt = self.service_if_pool.expose_services([])
            self.assertEqual(dt.added, [])  # nothing added
            self.assertTrue(nonexistent_srv.resolved_name in dt.removed)  # nonexistent_srv removed
            # every withhold service should NOT be in the list of args
            self.assertTrue(servicename not in self.service_if_pool.services_args)
            # service backend should be GONE
            self.assertTrue(servicename not in self.service_if_pool.services.keys())

            services, service_types = self.get_system_state()
            dt = self.service_if_pool.update(services, service_types)
            self.assertEqual(dt.added, [])  # nothing added
            self.assertEqual(dt.removed, [])  # nothing removed
            # every withhold service should STILL NOT be in the list of args
            self.assertTrue(servicename not in self.service_if_pool.services_args)
            # service backend should be GONE
            self.assertTrue(servicename not in self.service_if_pool.services.keys())
        finally:
            nonexistent_srv.shutdown('testing disappearing service')

        services, service_types = self.get_system_state()
        dt = self.service_if_pool.update(services, service_types)
        self.assertEqual(dt.added, [])  # nothing added
        self.assertEqual(dt.removed, [])  # nonexistent_srv already removed
        # every withhold service should STILL NOT be in the list of args
        self.assertTrue(servicename not in self.service_if_pool.services_args)
        # service backend should be GONE
        self.assertTrue(servicename not in self.service_if_pool.services.keys())
Esempio n. 12
0
    def update_statedelta(self, added_publishers, added_subscribers,
                          added_services, added_params, added_topic_types,
                          added_service_types, removed_publishers,
                          removed_subscribers, removed_services,
                          removed_params, removed_topic_types,
                          removed_service_types):
        params_dt = DiffTuple(added=added_params, removed=removed_params)

        # Needs to be done first, since topic algorithm depends on it
        params_if_dt = self.params_if_pool.update_delta(params_dt=params_dt)

        # here we need to get only the nodes' names to match ROs master API format
        services_dt = DiffTuple(
            added=[[k, [n[0] for n in nset]]
                   for k, nset in added_services.iteritems()],
            removed=[[k, [n[0] for n in nset]]
                     for k, nset in removed_services.iteritems()])

        service_types_dt = DiffTuple(added=added_service_types,
                                     removed=removed_service_types)

        services_if_dt = self.services_if_pool.update_delta(
            services_dt, service_types_dt)

        # here we need to get only the nodes' names to match ROs master API format
        # CAREFUL about unicity here, a Pub|Sub can have multiple endpoint within the same node...
        # We do not care about that information. If we have to care, then we should pass also the uri...
        publishers_dt = DiffTuple(
            added=[[k, [n[0] for n in set(nset)]]
                   for k, nset in added_publishers.iteritems()],
            removed=[[k, [n[0] for n in set(nset)]]
                     for k, nset in removed_publishers.iteritems()])
        subscribers_dt = DiffTuple(
            added=[[k, [n[0] for n in set(nset)]]
                   for k, nset in added_subscribers.iteritems()],
            removed=[[k, [n[0] for n in set(nset)]]
                     for k, nset in removed_subscribers.iteritems()])

        # NOW DONE IN update_delta
        # # CAREFUL, topic interface by itself also makes the topic detected on system
        # # Check if there are any pyros interface with it and ignore them
        # topics_to_drop = TopicBack.get_interface_only_topics()
        # # TODO : simplify : same as early_topics_to_drop ?
        #
        # # filtering the topic dict
        # for td, tnode in added_topics.iteritems():
        #     tnode = [n for n in tnode if n not in topics_to_drop.get(td, [])]
        #
        # added_topics_list = [[td, added_topics[td]] for td in added_topics if added_topics[td]]  # filtering out topics with no endpoints
        #
        # # we also need to simulate topic removal here (only names), to trigger a cleanup of interface if it s last one
        # removed_topics_list = [[td, removed_topics[td]] for td in removed_topics] + early_topics_to_drop

        topic_types_dt = DiffTuple(added=added_topic_types,
                                   removed=removed_topic_types)

        subscribers_if_dt = self.subscribers_if_pool.update_delta(
            subscribers_dt, topic_types_dt)
        publishers_if_dt = self.publishers_if_pool.update_delta(
            publishers_dt, topic_types_dt)

        if publishers_if_dt.added or publishers_if_dt.removed:
            self._debug_logger.debug(
                rospy.get_name() +
                " Pyros.ros : Publishers Delta {publishers_if_dt}".format(
                    **locals()))
        if subscribers_if_dt.added or subscribers_if_dt.removed:
            self._debug_logger.debug(
                rospy.get_name() +
                " Pyros.ros : Subscribers Delta {subscribers_if_dt}".format(
                    **locals()))
        if services_if_dt.added or services_if_dt.removed:
            self._debug_logger.debug(
                rospy.get_name() +
                " Pyros.ros : Services Delta {services_if_dt}".format(
                    **locals()))

        # TODO : put that in debug log and show based on python logger configuration
        # print("Pyros ROS interface UPDATE")
        # print("Params ADDED : {0}".format([p for p in params_dt.added]))
        # print("Params GONE : {0}".format([p for p in params_dt.removed]))
        # print("Topics ADDED : {0}".format([t[0] for t in topics_dt.added] + early_topics_dt.added))
        # print("Topics GONE : {0}".format([t[0] for t in topics_dt.removed] + early_topics_dt.removed))
        # print("Srvs ADDED: {0}".format([s[0] for s in services_dt.added]))
        # print("Srvs GONE: {0}".format([s[0] for s in services_dt.removed]))

        # update_on_diff wants only names
        # dt = super(RosInterface, self).update_on_diff(
        #         DiffTuple([s[0] for s in services_dt.added], [s[0] for s in services_dt.removed]),
        #         DiffTuple([t[0] for t in topics_dt.added] + early_topics_dt.added, [t[0] for t in topics_dt.removed] + early_topics_dt.removed),
        #         # Careful params_dt has a different content than service and topics, due to different ROS API
        #         # TODO : make this as uniform as possible
        #         DiffTuple([p for p in params_dt.added], [p for p in params_dt.removed])
        # )

        # and here we need to return to not do the normal full update
        dt = DiffTuple(added=params_if_dt.added + services_if_dt.added +
                       subscribers_if_dt.added + publishers_if_dt.added,
                       removed=params_if_dt.removed + services_if_dt.removed +
                       subscribers_if_dt.removed + publishers_if_dt.removed)

        self._debug_logger.debug("""
                                        ROS INTERFACE DIFF ADDED : {dt.added}
                                        ROS INTERFACE DIFF REMOVED : {dt.removed}
                                    """.format(**locals()))

        return dt
Esempio n. 13
0
    def update(self):

        backedup_complete_cb_ss = None

        #update will retrieve system state here
        publishers = []
        subscribers = []
        services = []
        params = []
        topic_types = []
        service_types = []

        # and populate these to represent the changes done on the interface
        params_if_dt = DiffTuple([], [])
        services_if_dt = DiffTuple([], [])
        subscribers_if_dt = DiffTuple([], [])
        publishers_if_dt = DiffTuple([], [])

        # Destroying connection cache proxy if needed
        if self.connection_cache is not None and not self.enable_cache:
            # removing existing connection cache proxy to force a reinit of everything
            # to make sure we dont get a messed up system state with wrong list/diff from
            # dynamically switching cache on and off.
            self.connection_cache = None

        # TODO Instead of one or the other, we should have "two layer behaviors" with different frequencies
        # Fast loop checking only diff
        # Slow loop checking full state
        # It will allow recovering from any mistakes because of wrong diffs (update speed/race conditions/etc.)
        if self.enable_cache:
            if self.connection_cache is None:  # Building Connection Cache Proxy if needed
                self.connection_cache = connection_cache_proxy_create(
                    self._proxy_cb)
                if self.connection_cache:
                    self.enable_cache = True
                else:
                    self.enable_cache = False

        # TMP until it s implemented in the connection cache
        # Because the cache doesnt currently do it
        params = set(rospy.get_param_names())
        # determining params diff despite lack of API
        params_dt = DiffTuple(
            added=[p for p in params if p not in self.params_available],
            removed=[p for p in self.params_available if p not in params])

        # If we have the connection_cache and a callback setup we process the diff (and maybe param changes)
        if self.connection_cache and (params_dt.added or params_dt.removed
                                      or backedup_complete_cb_ss is not None
                                      or self.cb_ss.qsize() > 0):
            try:
                if backedup_complete_cb_ss is not None:
                    # using the message backed up after latest diff process
                    cb_ss = backedup_complete_cb_ss
                    backedup_complete_cb_ss = None  #CAREFUL : untested...
                else:
                    cb_ss = self.cb_ss.get_nowait()
                # print("CC MSG !")  # if we didn't except on empty queue, so we got a message

            except Queue.Empty:

                return self.update_nodelta(params_dt)

            else:
                # if there was no change,
                # it means it s the first and we need to initialize with the full list
                if cb_ss.added is None and cb_ss.removed is None:
                    #print("CC COMPLETE !")
                    publishers = cb_ss.complete.get('publishers', [])
                    subscribers = cb_ss.complete.get('subscribers', [])
                    services = cb_ss.complete.get('services', [])
                    # NOT YET...
                    #params = cb_ss.complete.get('params', params)
                    topic_types = cb_ss.complete.get('topic_types', [])
                    service_types = cb_ss.complete.get('service_types', [])

                    # To convert to ROS master API format :
                    publishers = [[p, [n[0] for n in nset]]
                                  for p, nset in publishers.iteritems()]
                    subscribers = [[s, [n[0] for n in nset]]
                                   for s, nset in subscribers.iteritems()]
                    services = [[s, [n[0] for n in nset]]
                                for s, nset in services.iteritems()]

                    #print("CC COMPLETEDDDD !")

                    # we go back to normal flow
                    #print("UPDATE FULLSTATE ON CACHE LIST")
                    return self.update_fullstate(publishers, subscribers,
                                                 services, params, topic_types,
                                                 service_types)

                else:  # we have a delta, we can use it directly and skip the rest

                    #print("CC DELTA !")
                    next_cb_ss = cb_ss
                    while self.cb_ss.qsize() > 0 and not (
                            next_cb_ss.added is None and next_cb_ss is None):
                        # merging multiple diff messages as fast as possible, until the next complete status
                        next_cb_ss = self.cb_ss.get_nowait()
                        if not (next_cb_ss.added is None
                                and next_cb_ss is None):
                            cb_ss = CacheTuple(
                                complete=None,  # this is ignored here
                                # CAREFUL one of added or remove can still be None here...
                                added=connection_cache_merge_marshalled(
                                    next_cb_ss.added or {}, cb_ss.added),
                                removed=connection_cache_merge_marshalled(
                                    next_cb_ss.removed or {}, cb_ss.removed))
                        else:  # we need to pass next_cb_ss to handle complete list again...
                            backedup_complete_cb_ss = next_cb_ss

                    added_publishers = cb_ss.added.get('publishers', [])
                    added_subscribers = cb_ss.added.get('subscribers', [])
                    added_services = cb_ss.added.get('services', [])
                    # NOT YET
                    #added_params = cb_ss.added.get('params', params_dt.added)
                    added_params = params_dt.added
                    added_topic_types = cb_ss.added.get('topic_types', [])
                    added_service_types = cb_ss.added.get('service_types', [])

                    removed_publishers = cb_ss.removed.get('publishers', [])
                    removed_subscribers = cb_ss.removed.get('subscribers', [])
                    removed_services = cb_ss.removed.get('services', [])
                    # NOT YET
                    #removed_params = cb_ss.removed.get('params', params_dt.removed)
                    removed_params = params_dt.removed
                    removed_topic_types = cb_ss.removed.get('topic_types', [])
                    removed_service_types = cb_ss.removed.get(
                        'service_types', [])

                    #print("UPDATE STATEDELTA ON CACHE DIFF")
                    return self.update_statedelta(
                        added_publishers, added_subscribers, added_services,
                        added_params, added_topic_types, added_service_types,
                        removed_publishers, removed_subscribers,
                        removed_services, removed_params, removed_topic_types,
                        removed_service_types)
        elif not self.connection_cache:  # make sure we are not using connection cache (otherwise state representations might be out of sync !!)
            #print("GETTING STATE FROM MASTER")
            publishers, subscribers, services, params, topic_types, service_types = self.retrieve_system_state(
            )  # This will call the master

            #print("UPDATE FULLSTATE")
            # NOTE : we want to be certain here that we do not mix full state representation from master with representation from cache (out of sync !!!)
            return self.update_fullstate(publishers, subscribers, services,
                                         params, topic_types, service_types)
        else:
            # This happens when the update is triggered, we are supposed to use the cache, but we got no message from the connection cache proxy.
            # WE DO NOT WANT TO GET FULLSTATE FROM CACHE, since it is out of date (ex: removed publishers are not removed until next message...)
            # print("GETTING STATE FROM CACHE")
            # publishers, subscribers, services, params, topic_types, service_types = self.retrieve_system_state()  # This will call the cache
            #
            # print("UPDATE FULLSTATE")
            # return self.update_fullstate(publishers, subscribers, services, params, topic_types, service_types)
            # ==> We need to wait for next message...
            return self.update_nodelta(params_dt)
    def update_delta(self, subscribers_dt, topic_types_dt=None):

        # FILTERING OUT TOPICS CREATED BY INTERFACE :

        # First we get all pubs/subs interfaces only nodes
        subs_if_nodes_on, subs_if_nodes_off = self.get_sub_interfaces_only_nodes(
        )

        # print(" SUB ADDED DETECTED :")
        # print(subscribers_dt.added)

        # Second we filter out ON interface topics from received ADDED topics list
        subscribers_dt_added = [[
            t[0],
            [n for n in t[1] if n not in subs_if_nodes_on.get(t[0], set())]
        ] for t in subscribers_dt.added]

        # filtering out topics with no endpoints
        subscribers_dt_added = [[tl[0], tl[1]] for tl in subscribers_dt_added
                                if tl[1]]

        # print(" SUB ADDED FILTERED :")
        # print(subscribers_dt_added)
        #
        # print(" SUB REMOVED DETECTED :")
        # print(subscribers_dt.removed)

        # Second we filter out OFF interface topics from received REMOVED topics list
        subscribers_dt_removed = [
            [
                t[0],
                [
                    n for n in t[1]
                    #if n not in topics_if_nodes_off.get(t[0], set())
                    # NOT DOABLE CURRENTLY : we would also prevent re interfacing a node that came back up...
                    # Probably better to fix flow between direct update and callback first...
                    # BUT compute_state() should take care of this...
                ]
            ] for t in subscribers_dt.removed
        ]

        # # Second we merge in ON interface topics into received REMOVED topics list
        # # This is useful to drop topics interfaces that are satisfying themselves...
        # for t, nodeset in subs_if_nodes_on.iteritems():
        #     # note manipulating dictionaries will allow us to get rid of this mess
        #     found = False
        #     for td in subscribers_dt_removed:
        #         if td[0] == t:
        #             td[1] += nodeset
        #             found = True
        #             break
        #     if not found:
        #         subscribers_dt_removed.append([t, list(nodeset)])

        # filtering out topics with no endpoints
        subscribers_dt_removed = [[tl[0], tl[1]]
                                  for tl in subscribers_dt_removed if tl[1]]

        # print(" SUB REMOVED FILTERED :")
        # print(subscribers_dt_removed)

        # computing state representation
        subscribers_namelist_dt = self.compute_state(
            DiffTuple(added=subscribers_dt_added,
                      removed=subscribers_dt_removed), topic_types_dt or [])

        if subscribers_namelist_dt.added or subscribers_namelist_dt.removed:
            _logger.debug(rospy.get_name() +
                          " Subscribers Delta {subscribers_namelist_dt}".
                          format(**locals()))

        # TODO : put that in debug log and show based on python logger configuration

        # print("SUBSCRIBER APPEARED: {subscribers_namelist_dt.added}".format(**locals()))
        # print("SUBSCRIBER GONE : {subscribers_namelist_dt.removed}".format(**locals()))

        # update_services wants only names
        dt = self.transient_change_diff(
            transient_appeared=subscribers_namelist_dt.added,
            transient_gone=subscribers_namelist_dt.
            removed  # we want only hte name here
            # add_names=regexes_match_sublist(self.transients_args, [s[0] for s in topics_dt.added]),
            # remove_names=[s[0] for s in topics_dt.removed if s[0] not in self.get_transients_available()]
        )

        if dt.added or dt.removed:
            _logger.debug(rospy.get_name() +
                          " Update Delta {dt}".format(**locals()))
            # print(" UPDATE DELTA:")
            # print(dt)
        return dt