def test_remote_goal(self): method_prefix = self.__message_prefix + "TestRemoteGoal" planner_prefix = method_prefix + "Manager" m = Manager(activationThreshold=7, prefix=planner_prefix) topic_name = method_prefix + '/Topic' sensor = TopicSensor(topic=topic_name, message_type=Bool, initial_value=False) condition = Condition(sensor, BooleanActivator()) SetTrueBehavior(effect_name=sensor.name, topic_name=topic_name, name=method_prefix + "SetTrue", planner_prefix=planner_prefix) goal = GoalBase(method_prefix + 'CentralGoal', planner_prefix=planner_prefix) goal.add_condition(condition) for x in range(0, 3, 1): m.step() rospy.sleep(0.1) goal_proxy = m.goals[0] goal_proxy.fetchStatus(3) self.assertTrue(goal_proxy.satisfied, 'Goal is not satisfied')
def test_guarantee_decision(self): """ Testing guarantee_decision parameter of manager.step() """ method_prefix = self.__message_prefix + "test_guarantee_decision" planner_prefix = method_prefix + "Manager" manager = Manager(activationThreshold=7.0, prefix=planner_prefix) sensor_3 = Sensor(name="Sensor3", initial_value=False) behaviour_1 = BehaviourBase(name="Behaviour1", planner_prefix=planner_prefix) goal1 = GoalBase(name="Test_Goal1", conditions=[Condition(sensor_3, BooleanActivator())], planner_prefix=planner_prefix) manager.step() self.assertFalse( behaviour_1._isExecuting, "Behaviour 1 is executed even though it should not have enough activation" ) manager.step(guarantee_decision=True) self.assertTrue( behaviour_1._isExecuting, "Behaviour 1 is not executed even though a decision was forced")
def test_offline_goal(self): method_prefix = self.__message_prefix + "TestOfflineGoal" planner_prefix = method_prefix + "/Manager" m = Manager(activationThreshold=7, prefix=planner_prefix) topic_name = method_prefix + '/Topic' sensor = SimpleTopicSensor(topic=topic_name, message_type=Bool, initial_value=False) condition = Condition(sensor, BooleanActivator()) pddl_function_name = condition.getFunctionNames()[0] SetTrueBehavior(effect_name=pddl_function_name, topic_name=topic_name, name=method_prefix + "SetTrue", plannerPrefix=planner_prefix) goal = OfflineGoal('CentralGoal', planner_prefix=planner_prefix) goal.add_condition(condition) m.add_goal(goal) for x in range(0, 3, 1): m.step() rospy.sleep(0.1) goal.fetchStatus(3) self.assertTrue(goal.satisfied, 'Goal is not satisfied') goal.unregister()
def test_activator(self): method_prefix = self.__message_prefix + "TestOfflineGoal" planner_prefix = method_prefix + "Manager" m = Manager(activationThreshold=7, prefix=planner_prefix) topic_name = 'IncreaseTopicTest/Topic' sensor = TopicSensor(topic=topic_name, name='IncreaseTopicTestSensor', message_type=Int32, initial_value=0) activator = GreedyActivator() condition = Condition(sensor=sensor, activator=activator) IncreaserBehavior(topic_name=topic_name, effect_name=sensor.name, createLogFiles=True, planner_prefix=planner_prefix) goal = OfflineGoal(name=self.__message_prefix + 'CentralGoal', planner_prefix=planner_prefix) goal.add_condition(condition) m.add_goal(goal) number_of_steps = 15 for x in range(0, number_of_steps + 1 , 1): m.step() rospy.sleep(0.1) # it takes 2 steps until the activation has increased expected_behaviour_steps = number_of_steps - 2 self.assertEquals(expected_behaviour_steps, sensor.latestValue)
def test_rl_basic(self): method_prefix = self.__message_prefix + "test_rl_basic" planner_prefix = method_prefix + "Manager" register_in_factory(ActivationAlgorithmFactory) m = Manager(activationThreshold=7, prefix=planner_prefix, activation_algorithm="reinforcement") behaviour_a = BehaviourA(name=method_prefix + "A", planner_prefix=planner_prefix) behaviour_b = BehaviourB(name=method_prefix + "B", planner_prefix=planner_prefix) sensor = Sensor(name="bool_sensor", initial_value=False) # same effect so planner cannot distinguish behaviour_a.add_effect(Effect(sensor_name=sensor.name, indicator=1.0)) behaviour_b.add_effect(Effect(sensor_name=sensor.name, indicator=1.0)) goal = GoalBase(method_prefix + '_goal', planner_prefix=planner_prefix) goal.add_condition(Condition(sensor, BooleanActivator())) for x in range(0, 10, 1): m.step() rospy.sleep(0.1)
def test_independent_behaviour(self): """ Test behaviour property independentFromPlanner """ method_prefix = self.__message_prefix + "test_independent_behaviour" planner_prefix = method_prefix + "Manager" m = Manager(activationThreshold=7, prefix=planner_prefix) topic_name_1 = method_prefix + '/sensor_1' sensor = TopicSensor(topic=topic_name_1, message_type=Bool, initial_value=False) condition = Condition(sensor, BooleanActivator()) #independentFromPlanner and effects independent_behaviour = SetTrueBehavior(effect_name=sensor.name, topic_name=topic_name_1, name=method_prefix + "SetTrue", planner_prefix=planner_prefix, independentFromPlanner=True) #independentFromPlanner and no effects independent_behaviour2 = SetTrueBehavior(effect_name=None, topic_name=topic_name_1, name=method_prefix + "SetTrue2", planner_prefix=planner_prefix, independentFromPlanner=True) # not independentFromPlanner and no effects independent_behaviour3 = SetTrueBehavior(effect_name=None, topic_name=topic_name_1, name=method_prefix + "SetTrue3", planner_prefix=planner_prefix, independentFromPlanner=False) goal = GoalBase(method_prefix + 'CentralGoal', planner_prefix=planner_prefix) goal.add_condition(condition) for x in range(0, 3, 1): m.step() rospy.sleep(0.1) goal_proxy = m.goals[0] goal_proxy.fetchStatus(3) self.assertTrue(goal_proxy.satisfied, 'Goal is not satisfied') self.assertTrue(independent_behaviour._isExecuting, "independent_behaviour is not executed") self.assertFalse(independent_behaviour2.was_executed, "independent_behaviour2 was executed") self.assertFalse(independent_behaviour3.was_executed, "independent_behaviour2 was executed")
def test_conditions_in_multiple_levels(self): """ Testing conditions that are used as well on the highest manager hierarchy level as well as in a sub manager of a NetworkBehaviour. In particular one conditions is used as precondition, the other one as goal condition. """ method_prefix = self.__message_prefix + "/test_conditions_in_multiple_levels" pre_con_sensor = Sensor(name="Precon_sensor", initial_value=False) pre_con = Condition(pre_con_sensor, BooleanActivator(desiredValue=True)) topic_name = method_prefix + '/Topic' sensor = TopicSensor(topic=topic_name, message_type=Int32, initial_value=0) condition = Condition(sensor, ThresholdActivator(thresholdValue=3)) planner_prefix = method_prefix + "/Manager" m = Manager(activationThreshold=7, prefix=planner_prefix) goal = OfflineGoal('CentralGoal', planner_prefix=planner_prefix) goal.add_condition(condition) m.add_goal(goal) effect = Effect(sensor_name=sensor.name, indicator=1, sensor_type=int, activator_name=condition.activator.name) first_level_network = NetworkBehaviour(name=method_prefix + '/FirstLevel', planner_prefix=planner_prefix, createLogFiles=True) first_level_network.add_effects([effect]) first_level_network.add_precondition(pre_con) goal_with_same_cond = OfflineGoal('CentralGoal2', planner_prefix=planner_prefix) goal_with_same_cond.add_condition(condition) first_level_network.add_goal(goal_with_same_cond) increaser_behavior = IncreaserBehavior(effect_name=sensor.name, topic_name=topic_name, name=method_prefix + "TopicIncreaser", planner_prefix=first_level_network.get_manager_prefix()) increaser_behavior.add_precondition(pre_con) # activate the first_level_network increaser_Behavior for x in range(0, 3, 1): self.assertFalse(first_level_network._isExecuting) m.step() pre_con_sensor.update(True) rospy.sleep(0.1) self.assertTrue(first_level_network._isExecuting) for x in range(0, 4, 1): m.step() rospy.sleep(0.1) self.assertTrue(increaser_behavior._isExecuting)
def test_register_unregister(self): method_prefix = self.__message_prefix + "TestRegisterUnregisterGoal" planner_prefix = method_prefix + "Manager" m = Manager(activationThreshold=7, prefix=planner_prefix) topic_name = method_prefix + '/Topic' sensor = TopicSensor(topic=topic_name, message_type=Bool, initial_value=False) condition = Condition(sensor, BooleanActivator()) goal = GoalBase(method_prefix + 'Goal', planner_prefix=planner_prefix) goal.add_condition(condition) rospy.sleep(0.1) m.step() self.assertEquals(len(m.goals), 1, 'goal not registered properly') goal.unregister() rospy.sleep(0.1) m.step() self.assertEquals(len(m.goals), 0, 'goal not unregistered properly') # try to register again goal.register() rospy.sleep(0.1) m.step() self.assertEquals(len(m.goals), 1, 'goal not registered properly') goal.unregister(terminate_services=False) rospy.sleep(0.1) m.step() self.assertEquals(len(m.goals), 0, 'goal not unregistered properly') # try to register again goal.register() rospy.sleep(0.1) m.step() self.assertEquals(len(m.goals), 1, 'goal not registered properly')
def test_publisher_goal(self): method_prefix = self.__message_prefix + "TestPublisherGoal" planner_prefix = method_prefix + "Manager" m = Manager(activationThreshold=7, prefix=planner_prefix) topic_name = method_prefix + '/Topic' sensor = TopicSensor(topic=topic_name, message_type=Bool, initial_value=False) condition = Condition(sensor, BooleanActivator()) SetTrueBehavior(effect_name=sensor.name, topic_name=topic_name, name=method_prefix + "SetTrue", planner_prefix=planner_prefix) goal = PublisherGoal(method_prefix + 'PublisherGoal', planner_prefix=planner_prefix) goal.add_condition(condition) goal_activated_condition = goal.create_condition() m.step() rospy.sleep(0.1) sensor.update(True) m.step() rospy.sleep(0.1) goal_proxy = m.goals[0] goal_proxy.fetchStatus(2) self.assertFalse(goal_proxy.enabled, 'Goal still enabled') # manually updating this conditions because it is not registered somewhere goal_activated_condition.sync() goal_activated_condition.updateComputation() # Satisfaction should become 0.0 if the goal is deactivated, this also tests the publishing part indirectly self.assertEquals(goal_activated_condition.satisfaction, 0.0, 'goal_condition not properly updated')
def test_interruptable_behaviour(self): """ Test behaviour interruptable property """ method_prefix = self.__message_prefix + "test_interruptable_behaviour" planner_prefix = method_prefix + "Manager" m = Manager(activationThreshold=7, prefix=planner_prefix) topic_name_1 = method_prefix + '/sensor_1' non_interruptable_sensor = SimpleTopicSensor(topic=topic_name_1, message_type=Int32, initial_value=False) non_interruptable_condition = Condition(non_interruptable_sensor, GreedyActivator()) condition_function_name = non_interruptable_condition.getFunctionNames()[0] non_interruptable_behaviour = IncreaserBehavior(effect_name=condition_function_name, topic_name=topic_name_1, name=method_prefix + "TopicIncreaser", plannerPrefix=planner_prefix, interruptable=False) topic_name_2 = method_prefix + '/sensor_2' interruptable_sensor = SimpleTopicSensor(topic=topic_name_2, message_type=Int32, initial_value=False) interruptable_condition = Condition(interruptable_sensor, GreedyActivator()) condition_function_name2 = interruptable_condition.getFunctionNames()[0] interruptable_behaviour = IncreaserBehavior(effect_name=condition_function_name2, topic_name=topic_name_2, name=method_prefix + "TopicIncreaser2", plannerPrefix=planner_prefix, interruptable=True) enable_sensor = Sensor(name='enable_sensor', initial_value=True) enable_cond = Condition(enable_sensor, BooleanActivator()) non_interruptable_behaviour.addPrecondition(enable_cond) goal = GoalBase(method_prefix + 'CentralGoal', plannerPrefix=planner_prefix, permanent=True) goal.addCondition(non_interruptable_condition) goal.addCondition(interruptable_condition) # first normal operation: every behaviour runs as expected for x in range(0, 4, 1): m.step() rospy.sleep(0.1) self.assertTrue(non_interruptable_behaviour._isExecuting, "Non-Interruptable Behaviour is not executed") self.assertTrue(interruptable_behaviour._isExecuting, "Interruptable Behaviour is not executed") #disable non_interruptable_behaviour precondition and check if it is affected enable_sensor.update(False) for x in range(0, 1, 1): m.step() rospy.sleep(0.1) self.assertTrue(non_interruptable_behaviour._isExecuting, "Non-Interruptable Behaviour is not executed") self.assertTrue(interruptable_behaviour._isExecuting, "Interruptable Behaviour is not executed") #disable precondition of interruptable behaviour and check if it is disabled as well interruptable_behaviour.addPrecondition(enable_cond) for x in range(0, 1, 1): m.step() rospy.sleep(0.1) self.assertTrue(non_interruptable_behaviour._isExecuting, "Non-Interruptable Behaviour is not executed") self.assertFalse(interruptable_behaviour._isExecuting, "Interruptable Behaviour is executed")
class NetworkBehavior(BehaviourBase): """ Behavior, which encapsulates an additional manager and behaviors. This allows to build hierarchies of hybrid behaviour planners. """ MANAGER_POSTFIX = "Manager" def __init__(self, name, requires_execution_steps=True, only_running_for_deciding_interruptible=Manager. USE_ONLY_RUNNING_BEHAVIOURS_FOR_INTERRUPTIBLE_DEFAULT_VALUE, correlations=None, always_update_activation=False, **kwargs): """ :param correlations: tuple <Effect> :param name: name of the behaviour that is also used to create the sub manager name together with the NetworkBehavior.MANAGER_POSTFIX :param requires_execution_steps: whether the execution steps should be caused from the parent manager or not. If not, the step method must be called manually :param always_update_activation: if set to True the entire activation calculation of the sub manager is updated on each behaviour computation update :param kwargs: args for the manager, except the prefix arg """ super(NetworkBehavior, self).__init__(name=name, requires_execution_steps=requires_execution_steps, **kwargs) self.requires_execution_steps = requires_execution_steps self.always_update_activation = always_update_activation manager_args = {} manager_args.update(kwargs) manager_args['prefix'] = self.get_manager_prefix() self.__manager = Manager(activated=False, use_only_running_behaviors_for_interRuptible= only_running_for_deciding_interruptible, **manager_args) self.__goal_name_prefix = name + "/Goals/" self.__goal_counter = 0 if correlations is not None: self.add_effects(correlations) def updateComputation(self, manager_step): super(NetworkBehavior, self).updateComputation(manager_step) if not self._isExecuting: self.__manager.send_discovery() def _restore_condition_name_from_pddl_function_name( self, pddl_function_name, sensor_name): return Activator.restore_condition_name_from_pddl_function_name( pddl_function_name=pddl_function_name, sensor_name=sensor_name) def get_manager_prefix(self): """ Return the manager prefix generated by the behaviour name and the MANAGER_POSTFIX :return: the manager prefix str """ return self._name + '/' + NetworkBehavior.MANAGER_POSTFIX def __generate_goal_name(self, effect): """ :param effect: instance of type Effect :return: unique name for goal """ # x as separator between counter an sensor names, to prevent conflict, caused by unusual names name = self.__goal_name_prefix + str( self.__goal_counter) + 'X' + effect.sensor_name self.__goal_counter += 1 return name def _create_goal(self, sensor, effect, goal_name, activator_name): """ Generate goals, which made the manager trying to work infinitely on the given effect, until the network is stopped. Therefore the goal shouldn't reachable (except the goal for boolean effects) :param sensor: instance of type Sensor :param effect: instance of type Effect :param goal_name: unique name for the goal :return: a goal, which causes the manager to work on the effect during the whole time """ if effect.sensor_type == str(bool): desired_value = True if effect.indicator > 0 else False activator = BooleanActivator(name=activator_name, desiredValue=desired_value) condition = Condition(activator=activator, sensor=sensor) return OfflineGoal(name=goal_name, planner_prefix=self.get_manager_prefix(), permanent=True, conditions={condition}) if effect.sensor_type == str(int) or effect.sensor_type == str(float): activator = GreedyActivator(maximize=effect.indicator > 0, step_size=abs(effect.indicator), name=activator_name) condition = Condition(activator=activator, sensor=sensor) return OfflineGoal(goal_name, planner_prefix=self.get_manager_prefix(), permanent=True, conditions={condition}) raise RuntimeError( msg='Cant create goal for effect type \'' + effect.sensor_type + '\'. Overwrite the method _create_goal to handle the type') @deprecated def add_correlations(self, correlations): """ Adds the given effects to the correlations of this Behavior. DEPRECATED: Use *add_effects* instead :param correlations: list of Effects """ self.add_effects(correlations) @deprecated def add_correlations_and_goals(self, sensor_correlations): """ Adds the given effects to the correlations of this Behavior. Furthermore creates a goal for each Effect and registers it at the nested Manager DEPRECATED: Use *add_effects_and_goals* instead :param sensor_correlations: list of tuples of (Sensor, Effect) """ self.add_effects_and_goals(sensor_correlations) def add_effects(self, effects): """ Adds the given effects to this Behavior. :param effects: list of Effects """ self._correlations.extend(effects) def add_effects_and_goals(self, sensor_effect): """ Adds the given effects to the correlations of this Behavior. Furthermore creates a goal for each Effect and registers it at the nested Manager :param sensor_effect: list of tuples of (Sensor, Effect) """ #TODO this might has to be revised for sensor, effect in sensor_effect: goal_name = self.__generate_goal_name(effect) if not effect.activator_name: activator_name = self._restore_condition_name_from_pddl_function_name( effect.sensor_name, sensor.name) else: activator_name = effect.activator_name goal = self._create_goal(sensor=sensor, effect=effect, goal_name=goal_name, activator_name=activator_name) self.__manager.add_goal(goal) self._correlations.append(effect) def add_goal(self, goal): """ Adds the given goal to nested manager :param goal: AbstractGoalRepresentation """ self.__manager.add_goal(goal) def updateComputation(self, manager_step): super(NetworkBehavior, self).updateComputation(manager_step) # only trigger the update if not already activated because then it would be executed anyhow if self.always_update_activation and not self.__manager.activated: self.__manager.update_activation(plan_if_necessary=False) def do_step(self): self.__manager.step() def start(self): self.__manager.activate() def stop(self): self.__manager.deactivate() def _is_interruptible(self): return self.__manager.is_interruptible()
def test_inverted_activation(self): planner_prefix = "condition_elements_test" m = Manager(activationThreshold=7, prefix=planner_prefix, createLogFiles=True) satisfaction_threshold = 0.8 sensor1 = Sensor() activator_increasing = LinearActivator(zeroActivationValue=0, fullActivationValue=1) activator_decreasing = LinearActivator(zeroActivationValue=1, fullActivationValue=0) condition_increasing = Condition(sensor1, activator_increasing) condition_decreasing = Condition(sensor1, activator_decreasing) sensor1.update(newValue=0.8) condition_increasing.sync() condition_increasing.updateComputation() condition_decreasing.sync() condition_decreasing.updateComputation() wish_increasing = condition_increasing.getWishes()[0] wish_decreasing = condition_decreasing.getWishes()[0] self.assertAlmostEqual(0.2, wish_increasing.indicator, delta=0.001) self.assertAlmostEqual(-0.8, wish_decreasing.indicator, delta=0.001) increasing_precondition_precon_pddl = condition_increasing.getPreconditionPDDL( satisfaction_threshold=satisfaction_threshold) decreasing_precondition_precon_pddl = condition_decreasing.getPreconditionPDDL( satisfaction_threshold=satisfaction_threshold) increasing_precondition_state_pddl = condition_increasing.getStatePDDL( )[0] decreasing_precondition_state_pddl = condition_decreasing.getStatePDDL( )[0] sensor2 = Sensor() sensor2.update(newValue=0.4) average_condition = AverageSensorSatisfactionCondition( sensors=[sensor1, sensor2], activator=activator_decreasing) average_condition.sync() average_condition.updateComputation() wish_average = average_condition.getWishes() average_precon_pddl = average_condition.getPreconditionPDDL( satisfaction_threshold=satisfaction_threshold) average_state = average_condition.getStatePDDL() behaviour1 = BehaviourBase("behaviour_1", plannerPrefix=planner_prefix) behaviour1.add_effect( Effect(sensor_name=sensor1.name, indicator=-0.1, sensor_type=float)) behaviour1.addPrecondition(condition_increasing) behaviour2 = BehaviourBase("behaviour_2", plannerPrefix=planner_prefix) behaviour2.add_effect( Effect(sensor_name=sensor1.name, indicator=0.1, sensor_type=float)) behaviour2.addPrecondition(condition_decreasing) behaviour2.addPrecondition(average_condition) m.step() m.step() b1_state_pddl = behaviour1.getStatePDDL() b2_state_pddl = behaviour2.getStatePDDL() b1_action_pddl = behaviour1.getActionPDDL() b2_action_pddl = behaviour2.getActionPDDL() for x in range(0, 3, 1): m.step() rospy.sleep(0.1)
class ManagerNode(object): """ ROS node wrapper for the rhbp manager/planner """ def __init__(self): rospy.init_node('behaviourPlannerManager', log_level=rospy.WARN) prefix = rospy.get_param("~prefix", "") self._manager = Manager(prefix=prefix) self.rate = rospy.Rate(rospy.get_param("~frequency", 1)) self.automatic_stepping = rospy.get_param("~automatic_stepping", True) if not self.automatic_stepping: rospy.logwarn("Started in manual stepping mode") self._init_services(prefix) def _init_services(self, prefix): """ init all services handlers :param prefix: manager prefix """ self._set_automatic_stepping_service = rospy.Service(prefix + '/' + 'set_automatic_stepping', SetStepping, self._set_stepping_callback) self._get_automatic_stepping_service = rospy.Service(prefix + '/' + 'get_automatic_stepping', GetStepping, self._get_stepping_callback) self._stepping_service = rospy.Service(prefix + '/' + 'step', Empty, self._step_callback) def _set_stepping_callback(self, request): """ Callback service for enabling or disabling automatic stepping in the given frequency :param request: """ self.automatic_stepping = request.automatic_stepping rospy.loginfo("Automatic Stepping changed to " + str(self.automatic_stepping)) return SetSteppingResponse() def _get_stepping_callback(self, request): """ Callback service for getting the current automatic stepping setting :param request: """ response = GetSteppingResponse(self.automatic_stepping) return response def _step_callback(self, request): """ Service callback for manual planning/manager steps :param request: """ if not self.automatic_stepping: self._manager.step() else: rospy.logwarn("No manual stepping if automatic stepping is enabled") return EmptyResponse() def run(self): """ Executing the node after initialization """ while (not rospy.is_shutdown()): if self.automatic_stepping: self._manager.step() else: self._manager.send_discovery() self.rate.sleep()
class RhbpAgent(object): """ Main class of an agent, taking care of the main interaction with the mapc_ros_bridge """ def __init__(self): rospy.logdebug("RhbpAgent::init") rospy.init_node('agent_node', anonymous=True, log_level=rospy.DEBUG) self._agent_name = rospy.get_param('~agent_name', 'agentA1') # default for debugging 'agentA1' self._agent_topic_prefix = get_bridge_topic_prefix(agent_name=self._agent_name) # ensure also max_parallel_behaviours during debugging self._manager = Manager(prefix=self._agent_name, max_parallel_behaviours=1) # static things (terrain) self.behaviours = [] self.goals = [] self.perception_provider = PerceptionProvider() self.local_map = GridMap(agent_name=self._agent_name, live_plotting=True) # auction structure self.bids = {} self.number_of_agents = 2 # TODO: check if there's a way to get it automatically self._sim_started = False # subscribe to MAPC bridge core simulation topics rospy.Subscriber(self._agent_topic_prefix + "request_action", RequestAction, self._action_request_callback) rospy.Subscriber(self._agent_topic_prefix + "start", SimStart, self._sim_start_callback) rospy.Subscriber(self._agent_topic_prefix + "end", SimEnd, self._sim_end_callback) rospy.Subscriber(self._agent_topic_prefix + "bye", Bye, self._bye_callback) rospy.Subscriber(self._agent_topic_prefix + "generic_action", GenericAction, self._callback_generic_action) # start communication class self._communication = Communication(self._agent_name) # Map topic self._pub_map = self._communication.start_map(self._callback_map) # Personal message topic self._pub_agents = self._communication.start_agents(self._callback_agents) # Auction topic self._pub_auction = self._communication.start_auction(self._callback_auction) self.time_to_bid = True # only test debug puposes self.task_subdivision = {"task1": {"agents_needed": 3, "agents_assigned": [] } } self._received_action_response = False def _sim_start_callback(self, msg): """ here we could also evaluate the msg in order to initialize depending on the role etc. :param msg: the message :type msg: SimStart """ if not self._sim_started: # init only once here rospy.loginfo(self._agent_name + " started") # creating the actual RHBP model self._initialize_behaviour_model() self._sim_started = True def _callback_generic_action(self, msg): """ ROS callback for generic actions :param msg: ros message :type msg: GenericAction """ self._received_action_response = True def _sim_end_callback(self, msg): """ :param msg: the message :type msg: SimEnd """ rospy.loginfo("SimEnd:" + str(msg)) for g in self.goals: g.unregister() for b in self.behaviours: b.unregister() self._sim_started = False def _bye_callback(self, msg): """ :param msg: the message :type msg: Bye """ rospy.loginfo("Simulation finished") rospy.signal_shutdown('Shutting down {} - Simulation server closed'.format(self._agent_name)) def _action_request_callback(self, msg): """ here we just trigger the decision-making and planning while tracking the available time and behaviour responses :param msg: the message :type msg: RequestAction """ # calculate deadline for the current simulation step start_time = rospy.get_rostime() safety_offset = rospy.Duration.from_sec(0.2) # Safety offset in seconds deadline_msg = rospy.Time.from_sec(msg.deadline / 1000.0) current_msg = rospy.Time.from_sec(msg.time / 1000.0) deadline = start_time + (deadline_msg - current_msg) - safety_offset self.perception_provider.update_perception(request_action_msg=msg) # Print current perception for debugging purposes rospy.logdebug(('Simulationstep: {}'.format(msg.simulation_step))) rospy.logdebug('Obstacles: {}'.format(self.perception_provider.obstacles)) rospy.logdebug('Goals: {}'.format(self.perception_provider.goals)) rospy.logdebug('Dispensers: {}'.format(self.perception_provider.dispensers)) rospy.logdebug('Entities: {}'.format(self.perception_provider.entities)) rospy.logdebug('Agent: {}'.format(msg.agent)) # rospy.logdebug('Whole Perception: \n {}'.format(self.perception_provider)) # send the map if perceive the goal if self.local_map.goal_area_fully_discovered: map = self.local_map._representation top_left_corner = self.local_map._from_relative_to_matrix(self.local_map.goal_top_left) self._communication.send_map(self._pub_map, str(map), top_left_corner[0], top_left_corner[1]) # lm_x and lm_y to get ''' # send personal message test if self._agent_name == "agentA1": self._communication.send_message(self._pub_agents, "agentA2", "task", "[5,5]") self._received_action_response = False # send bid if self.time_to_bid: task_to_bid = "task1" if (self._agent_name == "agentA1" or self._agent_name == "agentA2"): self._communication.send_bid(self._pub_auction, task_to_bid, 10) else: self._communication.send_bid(self._pub_auction, task_to_bid, random.randint(1, 100)) self.time_to_bid = False rospy.loginfo(self._agent_name + " ha biddato") ''' # update map self.local_map.update_map(perception=self.perception_provider) rospy.logdebug('Updated Map') # self._received_action_response is set to True if a generic action response was received(send by any behaviour) while not self._received_action_response and rospy.get_rostime() < deadline: # wait until this agent is completely initialised if self._sim_started: # we at least wait our max time to get our agent initialised # action send is finally triggered by a selected behaviour self._manager.step(guarantee_decision=True) else: rospy.sleep(0.1) if self._received_action_response: # One behaviour replied with a decision duration = rospy.get_rostime() - start_time rospy.logdebug("%s: Decision-making duration %f", self._agent_name, duration.to_sec()) elif not self._sim_started: # Agent was not initialised in time rospy.logwarn("%s idle_action(): sim not yet started", self._agent_name) else: # Our decision-making has taken too long rospy.logwarn("%s: Decision-making timeout", self._agent_name) def _callback_map(self, msg): msg_id = msg.message_id map_from = msg.agent_id map_value = msg.map map_lm_x = msg.lm_x map_lm_y = msg.lm_y if map_from != self._agent_name: rospy.loginfo(self._agent_name + " received map from " + map_from + " | map value: " + map_value) map = np.array(map_value) # do map merging def _callback_agents(self, msg): msg_id = msg.message_id msg_from = msg.agent_id_from msg_type = msg.message_type msg_param = msg.params if msg.agent_id_to == self._agent_name: rospy.loginfo( self._agent_name + " received message from " + msg_from + " | id: " + msg_id + " | type: " + msg_type + " | params: " + msg_param) self._communication.send_message(self._pub_agents, msg_from, "received", msg_id) def _callback_auction(self, msg): msg_id = msg.message_id msg_from = msg.agent_id task_id = msg.task_id task_bid_value = msg.bid_value if (not task_id in self.bids): self.bids[task_id] = OrderedDict() self.bids[task_id]["done"] = False if (self.bids[task_id]["done"] == False): if (not msg_from in self.bids[task_id]): self.bids[task_id][msg_from] = task_bid_value if len(self.bids[task_id]) == self.number_of_agents + 1: # count the done ordered_task = OrderedDict(sorted(self.bids[task_id].items(), key=lambda x: (x[1], x[0]))) # rospy.loginfo(self._agent_name + " | " + str(ordered_task)) duplicate = -999 i = 0 for key, value in ordered_task.items(): if (i > 0): # skip done if (i == self.task_subdivision[task_id]["agents_needed"] + 1): break available = (len(ordered_task) - 1) - len(self.task_subdivision[task_id]["agents_assigned"]) - i # rospy.loginfo(self._agent_name + " |1: " + str(len(ordered_task) - 1) + " | 2: " + str(len(self.task_subdivision[task_id]["agents_assigned"])) + "i: " + str(i) + " | current:" + key) if (value != duplicate or available <= 0): self.task_subdivision[task_id]["agents_assigned"].append(key) duplicate = value i += 1 self.bids[task_id]["done"] = True rospy.loginfo("DONE: " + str(self.task_subdivision[task_id]["agents_assigned"])) def _initialize_behaviour_model(self): """ This function initialises the RHBP behaviour/goal model. """ # Manual Player Move/Exploration manual_move = ManualMove(name="manual_move", perception_provider=self.perception_provider, agent_name=self._agent_name) self.behaviours.append(manual_move) ''' # Communication test comm_test = CommunicationTest(name="comm_test", perception_provider=self.perception_provider, agent_name=self._agent_name) self.behaviours.append(comm_test) ''' '''
def test_plan_monitoring_float_sensors(self): """ Testing plan monitoring and replanning management """ method_prefix = self.__message_prefix + "test_plan_monitoring_float_sensors" planner_prefix = method_prefix + "Manager" manager = Manager(activationThreshold=7.0, prefix=planner_prefix, max_parallel_behaviours=1) sensor_1 = Sensor(name="Sensor1", initial_value=0) behaviour_1 = BehaviourBase(name="Behaviour1", planner_prefix=planner_prefix) behaviour_1.add_effect(Effect(sensor_1.name, 1, sensor_type=float)) sensor_2 = Sensor(name="Sensor2", initial_value=0) behaviour_2 = BehaviourBase(name="Behaviour2", planner_prefix=planner_prefix) behaviour_2.add_effect(Effect(sensor_2.name, 1, sensor_type=float)) behaviour_3 = BehaviourBase(name="Behaviour3", planner_prefix=planner_prefix) # adding precondition to get a reference to the sensor in the manager behaviour_3.add_precondition(Condition(sensor_2, BooleanActivator())) behaviour_3.add_effect(Effect(sensor_2.name, 1, sensor_type=float)) goal1 = GoalBase(name="Test_Goal1", conditions=[ Conjunction( Condition(sensor_1, ThresholdActivator(1)), Condition( sensor_2, ThresholdActivator(0), )) ], planner_prefix=planner_prefix, permanent=True) manager.step(guarantee_decision=True) self.assertTrue( behaviour_1._isExecuting, "Behaviour 1 is not executed even though a decision was forced and it would fulfill the plan" ) sensor_1.update( 0.5 ) # faking partial effect of behaviour1, all changes are induced by behaviour, no replanning manager.step(guarantee_decision=True) self.assertTrue( behaviour_1._isExecuting, "Behaviour 1 is not executed even though a decision was forced and it would fulfill the plan" ) self.assertTrue( manager._are_all_sensor_changes_from_executed_behaviours()) self.assertFalse(manager._are_effects_of_planned_behaviour_realised() ) # we did not yet reach the full effect self.assertTrue(manager._executed_behaviours_influenced_as_expected()) sensor_1.update( 1.0 ) # faking partial effect of behaviour1, all changes are induced by behaviour, no replanning manager.step(guarantee_decision=True) self.assertTrue( behaviour_1._isExecuting, "Behaviour 1 is not executed even though a decision was forced and it would fulfill the plan" ) self.assertTrue( manager._are_all_sensor_changes_from_executed_behaviours()) self.assertTrue(manager._executed_behaviours_influenced_as_expected()) manager._planExecutionIndex -= 1 # we have to manipulate here because it was incremented self.assertTrue(manager._are_effects_of_planned_behaviour_realised()) manager._planExecutionIndex += 1
def test_handle_interfering_correlations(self): """ Test manager interference resolving capabilities """ method_prefix = self.__message_prefix + "test_handle_interfering_correlations" planner_prefix = method_prefix + "Manager" m = Manager(activationThreshold=7.0, prefix=planner_prefix) topic_name_1 = method_prefix + '/sensor_1' sensor1 = TopicSensor(topic=topic_name_1, message_type=Int32, initial_value=False) condition1 = Condition(sensor1, GreedyActivator()) behaviour1 = IncreaserBehavior(effect_name=sensor1.name, topic_name=topic_name_1, name=method_prefix + "TopicIncreaser", planner_prefix=planner_prefix, interruptable=True) topic_name_2 = method_prefix + '/sensor_2' sensor2 = TopicSensor(topic=topic_name_2, message_type=Int32, initial_value=False) condition2 = Condition(sensor2, GreedyActivator()) behaviour2 = IncreaserBehavior(effect_name=sensor2.name, topic_name=topic_name_2, name=method_prefix + "TopicIncreaser2", planner_prefix=planner_prefix, interruptable=True) # add a conflict here "-1" behaviour1.add_effect( Effect(sensor_name=sensor2.name, indicator=-1, sensor_type=int)) goal = GoalBase(method_prefix + 'CentralGoal', planner_prefix=planner_prefix, permanent=True) goal.add_condition(condition1) goal.add_condition(condition2) # first one of the behaviours can not be executed due to a conflict for x in range(0, 4, 1): m.step() rospy.sleep(0.1) # behaviour 2 should be executed because it does not conflict self.assertFalse(behaviour1._isExecuting, "Behaviour 1 is executed in spite of a conflict") self.assertTrue(behaviour2._isExecuting, "Behaviour 2 is not executed") behaviour1.priority = 1 # increase priority for x in range(0, 1, 1): m.step() rospy.sleep(0.1) # now behaviour 1 should be executed self.assertTrue( behaviour1._isExecuting, "Behaviour 1 is not executed in spite of higher priority") self.assertFalse(behaviour2._isExecuting, "Behaviour 2 is executed in spite of lower priority") behaviour1.priority = 0 # reset priority # Manipulate activation of behaviour 2 with an extra goal, wish is positively influencing goal2 = GoalBase(method_prefix + 'ExtraGoal', planner_prefix=planner_prefix, permanent=True) goal2.add_condition( condition2) # this condition is targeted by behaviour 2 rospy.sleep(0.1) for x in range( 0, 5, 1 ): # it takes some time with configured decay factor to integrate the changed activation m.step() rospy.sleep(0.1) self.assertFalse(behaviour1._isExecuting, "Behaviour 1 is executed") self.assertTrue(behaviour2._isExecuting, "Behaviour 2 is not executed")
class NetworkBehaviour(BehaviourBase): """ Behavior, which encapsulates an additional manager and behaviors. This allows to build hierarchies of hybrid behaviour planners. """ MANAGER_POSTFIX = "Manager" TYPE_STRING = "Network" def __init__(self, name, requires_execution_steps=True, only_running_for_deciding_interruptible=Manager. USE_ONLY_RUNNING_BEHAVIOURS_FOR_INTERRUPTIBLE_DEFAULT_VALUE, correlations=None, always_update_activation=False, guarantee_decision=False, **kwargs): """ :param correlations: tuple <Effect> :param name: name of the behaviour that is also used to create the sub manager name together with the NetworkBehaviour.MANAGER_POSTFIX :param requires_execution_steps: whether the execution steps should be caused from the parent manager or not. If not, the step method must be called manually :param always_update_activation: if set to True the entire activation calculation of the sub manager is updated on each behaviour computation update :param guarantee_decision: if there are executable behaviours in the local network, adjust the thresholds until at least one behaviour is selected :param kwargs: args for the manager, except the prefix arg """ super(NetworkBehaviour, self).__init__(name=name, requires_execution_steps=requires_execution_steps, **kwargs) if "interruptable" in kwargs: rhbplog.logwarn( "Interruptable parameter will be ignored in a NetworkBehaviour. Interruptable attribute is " "evaluated based on the running or registered parameters, " "see 'only_running_for_deciding_interruptible'") self.requires_execution_steps = requires_execution_steps self.always_update_activation = always_update_activation self.guarantee_decision = guarantee_decision manager_args = {} manager_args.update(kwargs) manager_args['prefix'] = self.get_manager_prefix() self.__manager = Manager(enabled=False, use_only_running_behaviors_for_interRuptible= only_running_for_deciding_interruptible, **manager_args) self.__goal_name_prefix = name + "/Goals/" self.__goal_counter = 0 if correlations is not None: self.add_effects(correlations) def get_manager_prefix(self): """ Return the manager prefix generated by the behaviour name and the MANAGER_POSTFIX :return: the manager prefix str """ return self._name + '/' + NetworkBehaviour.MANAGER_POSTFIX def __generate_goal_name(self, effect): """ :param effect: instance of type Effect :return: unique name for goal """ # x as separator between counter an sensor names, to prevent conflict, caused by unusual names name = self.__goal_name_prefix + str( self.__goal_counter) + 'X' + effect.sensor_name self.__goal_counter += 1 return name def _create_goal(self, sensor, effect, goal_name): """ Generate goals, which made the manager trying to work infinitely on the given effect, until the network is stopped. Therefore the goal shouldn't reachable (except the goal for boolean effects) :param sensor: instance of type Sensor :param effect: instance of type Effect :param goal_name: unique name for the goal :return: a goal, which causes the manager to work on the effect during the whole time :raises RuntimeError: if the creation of a goal for an effect of this type is not possible """ try: condition = create_condition_from_effect(effect=effect, sensor=sensor) return OfflineGoal(name=goal_name, planner_prefix=self.get_manager_prefix(), permanent=True, conditions={condition}) except RuntimeError: raise RuntimeError( msg="Can't create goal for effect type '" + effect.sensor_type + "'.Overwrite the method _create_goal to handle the type") @deprecated def add_correlations(self, correlations): """ Adds the given effects to the correlations of this Behavior. DEPRECATED: Use *add_effects* instead :param correlations: list of Effects """ self.add_effects(correlations) @deprecated def add_correlations_and_goals(self, sensor_correlations): """ Adds the given effects to the correlations of this Behavior. Furthermore creates a goal for each Effect and registers it at the nested Manager DEPRECATED: Use *add_effects_and_goals* instead :param sensor_correlations: list of tuples of (Sensor, Effect) """ self.add_effects_and_goals(sensor_correlations) def add_effects(self, effects): """ Adds the given effects to this Behavior. :param effects: list of Effects """ self._correlations.extend(effects) def add_effects_and_goals(self, sensor_effect): """ Adds the given effects to the correlations of this Behavior. Furthermore creates a goal for each Effect and registers it at the nested Manager :param sensor_effect: list of tuples of (Sensor, Effect) """ for sensor, effect in sensor_effect: goal_name = self.__generate_goal_name(effect) goal = self._create_goal(sensor=sensor, effect=effect, goal_name=goal_name) self.__manager.add_goal(goal) self._correlations.append(effect) def add_goal(self, goal): """ Adds the given goal to nested manager :param goal: AbstractGoalRepresentation """ self.__manager.add_goal(goal) def updateComputation(self, manager_step): super(NetworkBehaviour, self).updateComputation(manager_step) # only trigger the update if not already activated because then it would be executed anyhow if self.always_update_activation and not self.__manager.enabled: self.__manager.update_activation(plan_if_necessary=False) if not self._isExecuting: self.__manager.send_discovery() def do_step(self): self.__manager.step(guarantee_decision=self.guarantee_decision) def start(self): self.__manager.enable() def stop(self): self.__manager.disable() def _is_interruptible(self): return self.__manager.is_interruptible()
class RhbpAgent(object): """ Main class of an agent, taking care of the main interaction with the mapc_ros_bridge """ def __init__(self): rospy.logdebug("RhbpAgent::init") rospy.init_node('agent_node', anonymous=True, log_level=rospy.INFO) self._agent_name = rospy.get_param( '~agent_name', 'agentA1') # default for debugging 'agentA1' self._agent_topic_prefix = get_bridge_topic_prefix( agent_name=self._agent_name) # ensure also max_parallel_behaviours during debugging self._manager = Manager(prefix=self._agent_name, max_parallel_behaviours=1) self.behaviours = [] self.goals = [] self.perception_provider = PerceptionProvider() self._sim_started = False # subscribe to MAPC bridge core simulation topics rospy.Subscriber(self._agent_topic_prefix + "request_action", RequestAction, self._action_request_callback) rospy.Subscriber(self._agent_topic_prefix + "start", SimStart, self._sim_start_callback) rospy.Subscriber(self._agent_topic_prefix + "end", SimEnd, self._sim_end_callback) rospy.Subscriber(self._agent_topic_prefix + "bye", Bye, self._bye_callback) rospy.Subscriber(self._agent_topic_prefix + "generic_action", GenericAction, self._callback_generic_action) self._received_action_response = False def _sim_start_callback(self, msg): """ here we could also evaluate the msg in order to initialize depending on the role etc. :param msg: the message :type msg: SimStart """ if not self._sim_started: # init only once here rospy.loginfo(self._agent_name + " started") # creating the actual RHBP model self._initialize_behaviour_model() self._sim_started = True def _callback_generic_action(self, msg): """ ROS callback for generic actions :param msg: ros message :type msg: GenericAction """ self._received_action_response = True def _sim_end_callback(self, msg): """ :param msg: the message :type msg: SimEnd """ rospy.loginfo("SimEnd:" + str(msg)) for g in self.goals: g.unregister() for b in self.behaviours: b.unregister() self._sim_started = False def _bye_callback(self, msg): """ :param msg: the message :type msg: Bye """ rospy.loginfo("Simulation finished") rospy.signal_shutdown( 'Shutting down {} - Simulation server closed'.format( self._agent_name)) def _action_request_callback(self, msg): """ here we just trigger the decision-making and planning while tracking the available time and behaviour responses :param msg: the message :type msg: RequestAction """ # calculate deadline for the current simulation step start_time = rospy.get_rostime() safety_offset = rospy.Duration.from_sec( 0.2) # Safety offset in seconds deadline_msg = rospy.Time.from_sec(msg.deadline / 1000.0) current_msg = rospy.Time.from_sec(msg.time / 1000.0) deadline = start_time + (deadline_msg - current_msg) - safety_offset self.perception_provider.update_perception(request_action_msg=msg) self._received_action_response = False # self._received_action_response is set to True if a generic action response was received(send by any behaviour) while not self._received_action_response and rospy.get_rostime( ) < deadline: # wait until this agent is completely initialised if self._sim_started: # we at least wait our max time to get our agent initialised # action send is finally triggered by a selected behaviour self._manager.step(guarantee_decision=True) else: rospy.sleep(0.1) if self._received_action_response: # One behaviour replied with a decision duration = rospy.get_rostime() - start_time rospy.logdebug("%s: Decision-making duration %f", self._agent_name, duration.to_sec()) elif not self._sim_started: # Agent was not initialised in time rospy.logwarn("%s idle_action(): sim not yet started", self._agent_name) else: # Our decision-making has taken too long rospy.logwarn("%s: Decision-making timeout", self._agent_name) def _initialize_behaviour_model(self): """ This function initialises the RHBP behaviour/goal model. """ # Random Move/Exploration random_move = RandomMove(name="random_move", agent_name=self._agent_name) self.behaviours.append(random_move) random_move.add_effect( Effect(self.perception_provider.dispenser_visible_sensor.name, indicator=True)) # Moving to a dispenser if in vision range move_to_dispenser = MoveToDispenser( name="move_to_dispense", perception_provider=self.perception_provider, agent_name=self._agent_name) self.behaviours.append(move_to_dispenser) move_to_dispenser.add_effect( Effect(self.perception_provider.closest_dispenser_distance_sensor. name, indicator=-1, sensor_type=float)) move_to_dispenser.add_precondition( Condition(self.perception_provider.dispenser_visible_sensor, BooleanActivator(desiredValue=True))) move_to_dispenser.add_precondition( Condition( self.perception_provider.closest_dispenser_distance_sensor, ThresholdActivator(isMinimum=True, thresholdValue=2))) # Dispense a block if close enough dispense = Dispense(name="dispense", perception_provider=self.perception_provider, agent_name=self._agent_name) self.behaviours.append(dispense) dispense.add_effect( Effect(self.perception_provider.number_of_blocks_sensor.name, indicator=+1, sensor_type=float)) dispense.add_precondition( Condition( self.perception_provider.closest_dispenser_distance_sensor, ThresholdActivator(isMinimum=False, thresholdValue=1))) # Our simple goal is to create more and more blocks dispense_goal = GoalBase( "dispensing", permanent=True, conditions=[ Condition(self.perception_provider.number_of_blocks_sensor, GreedyActivator()) ], planner_prefix=self._agent_name) self.goals.append(dispense_goal)
def test_multiple_embedded_network_behaviors(self): """ Tests the case, that one network behavior is embedded into another network behavior. The goal requires to receive an int (3) in a topic. """ method_prefix = self.__message_prefix + "/test_multiple_embedded_network_behaviors" topic_name = method_prefix + '/Topic' sensor = SimpleTopicSensor(topic=topic_name, message_type=Int32, initial_value=0) condition = Condition(sensor, ThresholdActivator(thresholdValue=3)) planner_prefix = method_prefix + "/Manager" m = Manager(activationThreshold=7, prefix=planner_prefix) goal = OfflineGoal('CentralGoal', planner_prefix=planner_prefix) goal.add_condition(condition) m.add_goal(goal) effect = Effect(sensor_name=sensor.name, indicator=1, sensor_type=int, activator_name=condition.activator.name) first_level_network = NetworkBehavior(name=method_prefix + '/FirstLevel', plannerPrefix=planner_prefix, createLogFiles=True) first_level_network.add_effects_and_goals([(sensor, effect)]) second_level_network = NetworkBehavior( name=method_prefix + '/SecondLevel', plannerPrefix=first_level_network.get_manager_prefix(), createLogFiles=True) # Doesnt matter, whether the effects are added via the constructor or the add method. # Both methods are used here, to demonstrate both ways. second_level_network.add_effects_and_goals([(sensor, effect)]) pddl_function_name = condition.getFunctionNames()[0] increaser_behavior = IncreaserBehavior( effect_name=pddl_function_name, topic_name=topic_name, name=method_prefix + "TopicIncreaser", plannerPrefix=second_level_network.get_manager_prefix()) # activate the first_level_network, second_level_network and increaser_Behavior for x in range(0, 3, 1): self.assertFalse(first_level_network._isExecuting) m.step() rospy.sleep(0.1) self.assertTrue(first_level_network._isExecuting) for x in range(0, 3, 1): self.assertFalse(second_level_network._isExecuting) first_level_network.do_step() rospy.sleep(0.1) self.assertTrue(second_level_network._isExecuting) for x in range(0, 3, 1): self.assertFalse(increaser_behavior._isExecuting) second_level_network.do_step() rospy.sleep(0.1) self.assertTrue(increaser_behavior._isExecuting) # Satisfy goal for step in range(0, 3, 1): sensor.sync() self.assertEqual(step, sensor.value) second_level_network.do_step() rospy.sleep(0.1) goal.fetchStatus(3) self.assertTrue(goal.satisfied, 'Goal is not satisfied')
class RhbpAgent(object): """ Main class of an agent, taking care of the main interaction with the mapc_ros_bridge """ def __init__(self): rospy.logdebug("RhbpAgent::init") rospy.init_node('agent_node', anonymous=True, log_level=rospy.INFO) self._agent_name = rospy.get_param('~agent_name', 'agentA1') # default for debugging 'agentA1' self._agent_topic_prefix = get_bridge_topic_prefix(agent_name=self._agent_name) # ensure also max_parallel_behaviours during debugging self._manager = Manager(prefix=self._agent_name, max_parallel_behaviours=1) self.behaviours = [] self.goals = [] self.perception_provider = PerceptionProvider() self._sim_started = False #if manual player is called, do not call other behaviours self.manual_player = False # subscribe to MAPC bridge core simulation topics rospy.Subscriber(self._agent_topic_prefix + "request_action", RequestAction, self._action_request_callback) rospy.Subscriber(self._agent_topic_prefix + "start", SimStart, self._sim_start_callback) rospy.Subscriber(self._agent_topic_prefix + "end", SimEnd, self._sim_end_callback) rospy.Subscriber(self._agent_topic_prefix + "bye", Bye, self._bye_callback) rospy.Subscriber(self._agent_topic_prefix + "generic_action", GenericAction, self._callback_generic_action) self._received_action_response = False def _sim_start_callback(self, msg): """ here we could also evaluate the msg in order to initialize depending on the role etc. :param msg: the message :type msg: SimStart """ if not self._sim_started: # init only once here rospy.loginfo(self._agent_name + " started") # creating the actual RHBP model self._initialize_behaviour_model() self._sim_started = True def _callback_generic_action(self, msg): """ ROS callback for generic actions :param msg: ros message :type msg: GenericAction """ self._received_action_response = True def _sim_end_callback(self, msg): """ :param msg: the message :type msg: SimEnd """ rospy.loginfo("SimEnd:" + str(msg)) for g in self.goals: g.unregister() for b in self.behaviours: b.unregister() self._sim_started = False def _bye_callback(self, msg): """ :param msg: the message :type msg: Bye """ rospy.loginfo("Simulation finished") rospy.signal_shutdown('Shutting down {} - Simulation server closed'.format(self._agent_name)) def _action_request_callback(self, msg): """ here we just trigger the decision-making and planning while tracking the available time and behaviour responses :param msg: the message :type msg: RequestAction """ # calculate deadline for the current simulation step start_time = rospy.get_rostime() safety_offset = rospy.Duration.from_sec(0.2) # Safety offset in seconds deadline_msg = rospy.Time.from_sec(msg.deadline / 1000.0) current_msg = rospy.Time.from_sec(msg.time / 1000.0) deadline = start_time + (deadline_msg - current_msg) - safety_offset self.perception_provider.update_perception(request_action_msg=msg) self._received_action_response = False # self._received_action_response is set to True if a generic action response was received(send by any behaviour) while not self._received_action_response and rospy.get_rostime() < deadline: # wait until this agent is completely initialised if self._sim_started: # we at least wait our max time to get our agent initialised # action send is finally triggered by a selected behaviour self._manager.step(guarantee_decision=True) else: rospy.sleep(0.1) if self._received_action_response: # One behaviour replied with a decision duration = rospy.get_rostime() - start_time rospy.logdebug("%s: Decision-making duration %f", self._agent_name, duration.to_sec()) elif not self._sim_started: # Agent was not initialised in time rospy.logwarn("%s idle_action(): sim not yet started", self._agent_name) else: # Our decision-making has taken too long rospy.logwarn("%s: Decision-making timeout", self._agent_name) def _initialize_behaviour_model(self): """ This function initialises the RHBP behaviour/goal model. """ if self.manual_player: control_player() control = AgentControl(name="manual_control", perception_provider=self.perception_provider, agent_name=self._agent_name) self.behaviours.append(control) else: # Exploration targeted at locating goal explorer = Explore_astar(name="explore", perception_provider=self.perception_provider, agent_name=self._agent_name,priority=1) self.behaviours.append(explorer) explorer.add_effect( Effect(self.perception_provider.count_goal_cells.name, indicator=+1, sensor_type=float)) #Reach dispenser once origin has been obtained reach_dispenser = MoveToDispenser(name="reach_dispenser", perception_provider=self.perception_provider, agent_name=self._agent_name,priority=2) self.behaviours.append(reach_dispenser) reach_dispenser.add_effect( Effect(self.perception_provider.closest_dispenser_distance_sensor.name, indicator=-1, sensor_type=float)) pre_cond2 = Condition(self.perception_provider.target_dispenser_selected_sensor, BooleanActivator(desiredValue=True)) reach_dispenser.add_precondition(pre_cond2) # #Dispense from dispenser if near dispense = Dispense(name="dispense", perception_provider=self.perception_provider, agent_name=self._agent_name,priority=3) self.behaviours.append(dispense) dispense.add_effect( Effect(self.perception_provider.sensor_dispensed_blocks.name, indicator=+1, sensor_type=float)) pre_cond3 = Condition(self.perception_provider.closest_dispenser_distance_sensor, ThresholdActivator(isMinimum=False, thresholdValue=1)) dispense.add_precondition(Conjunction(pre_cond2,pre_cond3)) # # Attach a block if close enough attach = Attach(name="attach", perception_provider=self.perception_provider, agent_name=self._agent_name,priority=4) self.behaviours.append(attach) pre_cond4 = Condition(self.perception_provider.sensor_dispensed_blocks, ThresholdActivator(isMinimum=True,thresholdValue=1)) pre_cond5 = Condition(self.perception_provider.closest_block_distance_sensor, ThresholdActivator(isMinimum=False, thresholdValue=1)) attach.add_effect( Effect(self.perception_provider.sensor_attached_blocks.name, indicator=+1, sensor_type=float)) attach.add_precondition(Conjunction(pre_cond4,pre_cond5)) #Move to goal once attached reach_goal = MoveToGoal(name="reach_goal", perception_provider=self.perception_provider, agent_name=self._agent_name,priority=10) self.behaviours.append(reach_goal) reach_goal.add_effect( Effect(self.perception_provider.submit_count.name, indicator=+1, sensor_type=float)) pre_cond6 = Condition(self.perception_provider.sensor_attached_blocks, ThresholdActivator(isMinimum=True,thresholdValue=1)) reach_goal.add_precondition(pre_cond6) #Submit Task if agent has reached location submit_task = Submit(name="submit_task",perception_provider=self.perception_provider, agent_name=self._agent_name,priority=20) self.behaviours.append(submit_task) submit_task.add_effect(Effect(self.perception_provider.score_sensor.name,indicator=+1, sensor_type=float)) pre_cond7 = Condition(self.perception_provider.submit_sensor, ThresholdActivator(isMinimum=True,thresholdValue=1)) submit_task.add_precondition(pre_cond7) #Keep dispensing if possible dispense_goal = GoalBase("dispense_goal",permanent=True, conditions=[Condition(self.perception_provider.sensor_dispensed_blocks, GreedyActivator())], planner_prefix=self._agent_name, priority=1) self.goals.append(dispense_goal) #Attach to dispensed blocks attach_goal = GoalBase("attach_goal",permanent=True, conditions=[Condition(self.perception_provider.sensor_attached_blocks, GreedyActivator())], planner_prefix=self._agent_name, priority=2) self.goals.append(attach_goal) #Primary goal to get more points submit_goal = GoalBase("submit_goal",permanent=True, conditions=[Condition(self.perception_provider.score_sensor, GreedyActivator())], planner_prefix=self._agent_name, priority=4)
class RhbpAgent(object): """ Main class of an agent, taking care of the main interaction with the mapc_ros_bridge """ def __init__(self): log_level = rospy.DEBUG if config.DEBUG_MODE else rospy.INFO rospy.logdebug("RhbpAgent::init") rospy.init_node('agent_node', anonymous=True, log_level=log_level) self._agent_name = rospy.get_param( '~agent_name', 'agentA1') # default for debugging 'agentA1' self._agent_topic_prefix = get_bridge_topic_prefix( agent_name=self._agent_name) # ensure also max_parallel_behaviours during debugging self._manager = Manager(prefix=self._agent_name, max_parallel_behaviours=1) self.behaviours = [] self.goals = [] self.perception_provider = PerceptionProvider() self.sensor_manager = SensorManager(agent=self) self.map = Map(agent=self, agent_name=self._agent_name) self.communication = Communication(agent_name=self._agent_name) self.map_pub = self.communication.start_map_pub_sub( callback=self._map_callback) self.received_map_messages = [] self.tasks = TaskCollection() self._sim_started = False # subscribe to MAPC bridge core simulation topics rospy.Subscriber(self._agent_topic_prefix + "request_action", RequestAction, self._action_request_callback) rospy.Subscriber(self._agent_topic_prefix + "start", SimStart, self._sim_start_callback) rospy.Subscriber(self._agent_topic_prefix + "end", SimEnd, self._sim_end_callback) rospy.Subscriber(self._agent_topic_prefix + "bye", Bye, self._bye_callback) rospy.Subscriber(self._agent_topic_prefix + "generic_action", GenericAction, self._callback_generic_action) self._received_action_response = False def _sim_start_callback(self, msg): """ here we could also evaluate the msg in order to initialize depending on the role etc. :param msg: the message :type msg: SimStart """ if not self._sim_started: # init only once here rospy.loginfo(self._agent_name + " started") # creating the actual RHBP model self._initialize_behaviour_model() self._sim_started = True def _callback_generic_action(self, msg): """ ROS callback for generic actions :param msg: ros message :type msg: GenericAction """ self._received_action_response = True def _sim_end_callback(self, msg): """ :param msg: the message :type msg: SimEnd """ rospy.loginfo("SimEnd:" + str(msg)) for g in self.goals: g.unregister() for b in self.behaviours: b.unregister() self._sim_started = False def _bye_callback(self, msg): """ :param msg: the message :type msg: Bye """ rospy.loginfo("Simulation finished") rospy.signal_shutdown( 'Shutting down {} - Simulation server closed'.format( self._agent_name)) def _map_callback(self, msg): if self.map.goal_area_discovered and msg.agent_name != self._agent_name: self.received_map_messages.append(msg) def _action_request_callback(self, msg): """ here we just trigger the decision-making and planning while tracking the available time and behaviour responses :param msg: the message :type msg: RequestAction """ # calculate deadline for the current simulation step start_time = rospy.get_rostime() safety_offset = rospy.Duration.from_sec( 0.2) # Safety offset in seconds deadline_msg = rospy.Time.from_sec(msg.deadline / 1000.0) current_msg = rospy.Time.from_sec(msg.time / 1000.0) deadline = start_time + (deadline_msg - current_msg) - safety_offset self.perception_provider.update_perception(request_action_msg=msg) self._received_action_response = False # Our code here # Move agent in its current map and expand map accordingly if agent moves in simulation if msg.agent.last_action_result == "success" and msg.agent.last_action == "move": self.map.move_agent(direction=msg.agent.last_action_params[0]) # TODO: Update attached/connected block sensors or maybe representation in map? if msg.agent.last_action_result == "success" and msg.agent.last_action == "attach": pass # TODO: Account for rotation? # Update agent's map with current perception self.map.update(obstacles=self.perception_provider.obstacles, goals=self.perception_provider.goals, dispensers=self.perception_provider.dispensers, blocks=self.perception_provider.blocks, entities=self.perception_provider.entities) # Measure merge duration with MeasureDuration() as m: if self.map.goal_area_discovered: # print("{}:: Goal area discovered".format(self._agent_name)) # Send own map self.communication.send_map(publisher=self.map_pub, obstacles=self.map.obstacles, goals=self.map.goals, blocks=self.map.blocks, dispensers=self.map.dispensers, width=self.map.width, height=self.map.height) # print("{}:: Sent map".format(self._agent_name)) # Process received maps for map_message in self.received_map_messages: other_map = Map(msg=map_message) self.map.merge_with(other_map=other_map) self.map._update_complete_representation() self.received_map_messages.remove(map_message) # print("{}:: Number of received map messages: {}".format(self._agent_name, len(self.received_map_messages))) # print("{}\n{}\n{}".format(self._agent_name, self.map.goals.shape, self.map)) # print(self._agent_name, 'agent_position_global', self.map.agent_position_global) # Tasks # TODO: Update attached/connected block sensors or maybe representation in map? if msg.agent.last_action_result == "success" and msg.agent.last_action == "submit": self.tasks.assigned_task.completed = True self.map.task_block_position = None self.map.task_dispenser_position = None self.sensor_manager.reset_task_sensors() self.tasks.update_tasks( tasks_percept=self.perception_provider.tasks, simulation_step=self.perception_provider.simulation_step) if self.tasks.assigned_task is None or self.tasks.assigned_task.completed: self.tasks.update_assigned_task() self.map.update_task_dispenser_position() self.map.update_task_block_position() # print("Assigned task: {}".format(self.tasks.assigned_task)) # print("x:{}, y:{}, type:{}".format(self.tasks.assigned_task.requirement.x, self.tasks.assigned_task.requirement.y, self.tasks.assigned_task.requirement.block_type)) # print('task_dispenser_position: {}'.format(self.map.task_dispenser_position)) # Update sensors self.sensor_manager.update_sensors(request_action_message=msg) # print('dispenser in map?', self.sensor_manager.dispenser_in_map_sensor._value) # print('dispenser distance?', self.sensor_manager.dispenser_distance_sensor._value) # print('block in map?', self.sensor_manager.block_in_map_sensor._value) # print('block distance?', self.sensor_manager.block_distance_sensor._value) # print('block attached?', self.sensor_manager.block_attached_sensor._value) # print('next to dispenser?', self.sensor_manager.next_to_dispenser_sensor._value) # print('next to block?', self.sensor_manager.next_to_block_sensor._value) # print('correct pattern position?', self.sensor_manager.correct_pattern_position_sensor._value) # print('on goal?', self.sensor_manager.on_goal_sensor._value) # /Our code here # self._received_action_response is set to True if a generic action response was received(send by any behaviour) while not self._received_action_response and rospy.get_rostime( ) < deadline: # wait until this agent is completely initialised if self._sim_started: # we at least wait our max time to get our agent initialised # action send is finally triggered by a selected behaviour self._manager.step(guarantee_decision=True) else: rospy.sleep(0.1) if self._received_action_response: # One behaviour replied with a decision duration = rospy.get_rostime() - start_time rospy.logdebug("%s: Decision-making duration %f", self._agent_name, duration.to_sec()) elif not self._sim_started: # Agent was not initialised in time rospy.logwarn("%s idle_action(): sim not yet started", self._agent_name) else: # Our decision-making has taken too long rospy.logwarn("%s: Decision-making timeout", self._agent_name) def _initialize_behaviour_model(self): """ This function initialises the RHBP behaviour/goal model. """ # RANDOM MOVE random_move = RandomMove(name="random_move", agent_name=self._agent_name) self.behaviours.append(random_move) random_move.add_effect( Effect(self.sensor_manager.dispenser_distance_sensor.name, indicator=True)) random_move.add_precondition( Condition(self.sensor_manager.dispenser_in_map_sensor, BooleanActivator(desiredValue=False))) # Exploration # explore_move = ExploreMove(name="explore_move", agent_name=self._agent_name, agent_map=self.map) # self.behaviours.append(explore_move) # explore_move.add_effect(Effect(self.perception_provider.dispenser_visible_sensor.name, indicator=True)) # MOVE TO DISPENSER move_to_dispenser = MoveToDispenser( name="move_to_dispenser", perception_provider=self.perception_provider, agent_name=self._agent_name, agent_map=self.map) self.behaviours.append(move_to_dispenser) move_to_dispenser.add_effect( Effect(self.sensor_manager.next_to_dispenser_sensor.name, indicator=True)) # move_to_dispenser.add_effect( # Effect(self.sensor_manager.pseudo_sensor.name, indicator=1, sensor_type=float)) move_to_dispenser.add_precondition( Condition(self.sensor_manager.dispenser_in_map_sensor, BooleanActivator(desiredValue=True))) move_to_dispenser.add_precondition( Condition(self.sensor_manager.next_to_dispenser_sensor, BooleanActivator(desiredValue=False))) # # Our simple goal is to move to the dispenser # move_to_dispenser_goal = GoalBase("move_to_dispenser_goal", permanent=True, # conditions=[Condition(self.sensor_manager.pseudo_sensor, GreedyActivator())], # planner_prefix=self._agent_name) # self.goals.append(move_to_dispenser_goal) # Request a block if close enough request = Request(name="request", perception_provider=self.perception_provider, agent_name=self._agent_name, agent_map=self.map) self.behaviours.append(request) # request.add_effect( # Effect(self.sensor_manager.pseudo_sensor.name, indicator=+1, sensor_type=float)) request.add_effect( Effect(self.sensor_manager.next_to_block_sensor.name, indicator=True)) request.add_precondition( Condition(self.sensor_manager.next_to_dispenser_sensor, BooleanActivator(desiredValue=True))) # # Our simple goal is to request a block # request_goal = GoalBase("request_goal", permanent=True, # conditions=[Condition(self.sensor_manager.pseudo_sensor, GreedyActivator())], # planner_prefix=self._agent_name) # self.goals.append(request_goal) # # Move to nearby block # move_to_block = MoveToBlock(name="move_to_block", perception_provider=self.perception_provider, # agent_name=self._agent_name, agent_map=self.map) # self.behaviours.append(move_to_block) # move_to_block.add_effect( # Effect(self.sensor_manager.block_distance_sensor.name, indicator=-1, sensor_type=float)) # # move_to_block.add_effect( # # Effect(self.sensor_manager.pseudo_sensor.name, indicator=1, sensor_type=float)) # move_to_block.add_precondition( # Condition(self.sensor_manager.block_in_map_sensor, BooleanActivator(desiredValue=True))) # move_to_block.add_precondition(Condition(self.sensor_manager.block_distance_sensor, # ThresholdActivator(isMinimum=True, thresholdValue=2))) # # Our simple goal is to move to a block # move_to_block_goal = GoalBase("move_to_block_goal", permanent=True, # conditions=[Condition(self.sensor_manager.pseudo_sensor, GreedyActivator())], # planner_prefix=self._agent_name) # self.goals.append(move_to_block_goal) # ATTACH attach = Attach(name="attach", perception_provider=self.perception_provider, agent_name=self._agent_name, agent_map=self.map) self.behaviours.append(attach) # attach.add_effect(Effect(self.sensor_manager.pseudo_sensor.name, indicator=+1, sensor_type=float)) attach.add_effect( Effect(self.sensor_manager.block_attached_sensor.name, indicator=True)) attach.add_precondition( Condition(self.sensor_manager.next_to_block_sensor, BooleanActivator(desiredValue=True))) # # Our simple goal is to attach to a block # attach_goal = GoalBase("attach_goal", permanent=True, # conditions=[Condition(self.sensor_manager.pseudo_sensor, GreedyActivator())], # planner_prefix=self._agent_name) # self.goals.append(attach_goal) # MOVE TO GOAL move_to_goal = MoveToGoal(name="move_to_goal", perception_provider=self.perception_provider, agent_name=self._agent_name, agent_map=self.map) self.behaviours.append(move_to_goal) # move_to_goal.add_effect(Effect(self.sensor_manager.pseudo_sensor.name, indicator=+1, sensor_type=float)) move_to_goal.add_effect( Effect(self.sensor_manager.on_goal_sensor.name, indicator=True)) move_to_goal.add_precondition( Condition(self.sensor_manager.block_attached_sensor, BooleanActivator(desiredValue=True))) # # Our simple goal is to attach to a block # move_to_goal_goal = GoalBase("move_to_goal_goal", permanent=True, # conditions=[Condition(self.sensor_manager.pseudo_sensor, GreedyActivator())], # planner_prefix=self._agent_name) # self.goals.append(move_to_goal_goal) # PREPARE SUBMISSION prepare_submission = PrepareSubmission( name="prepare_submission", perception_provider=self.perception_provider, agent_name=self._agent_name) self.behaviours.append(prepare_submission) prepare_submission.add_effect( Effect(self.sensor_manager.correct_pattern_position_sensor.name, indicator=True)) # prepare_submission.add_effect(Effect(self.sensor_manager.pseudo_sensor.name, indicator=+1, sensor_type=float)) prepare_submission.add_precondition( Condition(self.sensor_manager.on_goal_sensor, BooleanActivator(desiredValue=True))) prepare_submission.add_precondition( Condition(self.sensor_manager.block_attached_sensor, BooleanActivator(desiredValue=True))) # # Our simple goal is to rotate the pattern into the correct position # prepare_submission_goal = GoalBase("prepare_submission_goal", permanent=True, # conditions=[Condition(self.sensor_manager.pseudo_sensor, GreedyActivator())], # planner_prefix=self._agent_name) # self.goals.append(prepare_submission_goal) # SUBMIT submit = Submit(name="submit", perception_provider=self.perception_provider, agent_name=self._agent_name, agent=self) self.behaviours.append(submit) submit.add_effect( Effect(self.sensor_manager.pseudo_sensor.name, indicator=+1, sensor_type=float)) submit.add_precondition( Condition(self.sensor_manager.on_goal_sensor, BooleanActivator(desiredValue=True))) submit.add_precondition( Condition(self.sensor_manager.correct_pattern_position_sensor, BooleanActivator(desiredValue=True))) # Our simple goal is to submit a task submit_goal = GoalBase("submit_goal", permanent=True, conditions=[ Condition(self.sensor_manager.pseudo_sensor, GreedyActivator()) ], planner_prefix=self._agent_name) self.goals.append(submit_goal)
class RhbpAgent(object): """ Main class of an agent, taking care of the main interaction with the mapc_ros_bridge """ def __init__(self): ###DEBUG MODE### log_level = rospy.DEBUG if global_variables.DEBUG_MODE else rospy.INFO ################ rospy.logdebug("RhbpAgent::init") rospy.init_node('agent_node', anonymous=True, log_level=log_level) self._agent_name = rospy.get_param('~agent_name', 'agentA2') # default for debugging 'agentA1' self._agent_topic_prefix = get_bridge_topic_prefix(agent_name=self._agent_name) # ensure also max_parallel_behaviours during debugging self._manager = Manager(prefix=self._agent_name, max_parallel_behaviours=1) self.behaviours = [] self.goals = [] self.perception_provider = PerceptionProvider() # start communication class self._communication = Communication(self._agent_name) # Task update topic self._pub_subtask_update = self._communication.start_subtask_update(self._callback_subtask_update) # auction structure self.auction = Auction(self) self.number_of_agents = 2 # number_of_agents needs to match amount of launched agents self.map_communication = MapCommunication(self) self._sim_started = False # agent attributes self.local_map = GridMap(agent_name=self._agent_name, agent_vision=5) # instantiate the sensor manager passing a reference to this agent self.sensor_manager = SensorManager(self) # representation of tasks self.tasks = {} self.assigned_subtasks = [] # personal for the agent. the task at index 0 is the task the agent is currently executing # subscribe to MAPC bridge core simulation topics rospy.Subscriber(self._agent_topic_prefix + "request_action", RequestAction, self._action_request_callback) rospy.Subscriber(self._agent_topic_prefix + "start", SimStart, self._sim_start_callback) rospy.Subscriber(self._agent_topic_prefix + "end", SimEnd, self._sim_end_callback) rospy.Subscriber(self._agent_topic_prefix + "bye", Bye, self._bye_callback) rospy.Subscriber(self._agent_topic_prefix + "generic_action", GenericAction, self._callback_generic_action) self._received_action_response = False def start_rhbp_reasoning(self, start_time, deadline): self._received_action_response = False # self._received_action_response is set to True if a generic action response was received(send by any behaviour) while not self._received_action_response and rospy.get_rostime() < deadline: # wait until this agent is completely initialised if self._sim_started: # we at least wait our max time to get our agent initialised # action send is finally triggered by a selected behaviour self._manager.step(guarantee_decision=True) else: rospy.sleep(0.1) if self._received_action_response: # One behaviour replied with a decision duration = rospy.get_rostime() - start_time rospy.logdebug("%s: Decision-making duration %f", self._agent_name, duration.to_sec()) elif not self._sim_started: # Agent was not initialised in time rospy.logwarn("%s idle_action(): sim not yet started", self._agent_name) else: # Our decision-making has taken too long rospy.logwarn("%s: Decision-making timeout", self._agent_name) def _sim_start_callback(self, msg): """ here we could also evaluate the msg in order to initialize depending on the role etc. :param msg: the message :type msg: SimStart """ if not self._sim_started: # init only once here rospy.loginfo(self._agent_name + " started") # creating the actual RHBP model self._initialize_behaviour_model() self._sim_started = True def _callback_generic_action(self, msg): """ ROS callback for generic actions :param msg: ros message :type msg: GenericAction """ self._received_action_response = True def _sim_end_callback(self, msg): """ :param msg: the message :type msg: SimEnd """ #rospy.loginfo("SimEnd:" + str(msg)) for g in self.goals: g.unregister() for b in self.behaviours: b.unregister() self._sim_started = False def _bye_callback(self, msg): """ :param msg: the message :type msg: Bye """ #rospy.loginfo("Simulation finished") rospy.signal_shutdown('Shutting down {} - Simulation server closed'.format(self._agent_name)) def _action_request_callback(self, msg): """ here we just trigger the decision-making and planning while tracking the available time and behaviour responses :param msg: the message :type msg: RequestAction """ # calculate deadline for the current simulation step start_time = rospy.get_rostime() safety_offset = rospy.Duration.from_sec(0.2) # Safety offset in seconds deadline_msg = rospy.Time.from_sec(msg.deadline / 1000.0) current_msg = rospy.Time.from_sec(msg.time / 1000.0) deadline = start_time + (deadline_msg - current_msg) - safety_offset self.perception_provider.update_perception(request_action_msg=msg) ###### UPDATE AND SYNCHRONIZATION ###### # update tasks from perception self.tasks = update_tasks(current_tasks=self.tasks, tasks_percept=self.perception_provider.tasks, simulation_step=self.perception_provider.simulation_step) # remove assigned subtasks if task is deleted for assigned_subtask in self.assigned_subtasks[:]: if assigned_subtask.parent_task_name not in self.tasks: self.assigned_subtasks.remove(assigned_subtask) #rospy.loginfo("{} updated tasks. New amount of tasks: {}".format(self._agent_name, len(self.tasks))) # task auctioning self.auction.task_auctioning() # map update self.local_map.update_map(perception=self.perception_provider) # map merging self.map_communication.map_merge() # TODO understand a better order for this functions # map update after merging self.local_map._update_path_planner_representation(perception=self.perception_provider) self.local_map._update_distances() # send the map if perceive the goal if self.local_map.goal_area_fully_discovered: self.map_communication.publish_map() # if last action was `dispense` and result = 'success' then can attach if self.perception_provider.agent.last_action == "request" and self.perception_provider.agent.last_action_result == "success": #TODO check if the subtask block type is the same of the block just dispensed self.assigned_subtasks[0].is_dispensed = True # if last action was `connect` and result = 'success' then save the attached block if self.perception_provider.agent.last_action == "connect" and self.perception_provider.agent.last_action_result == "success": other_agent_name = self.perception_provider.agent.last_action_params[0] #new task_name = self.assigned_subtasks[0].parent_task_name current_task = self.tasks.get(task_name, None) # For submitting agent is_connected only when all the others blocks are connected if self.assigned_subtasks[0].submit_behaviour: is_connected_counter = 0 number_of_subtasks = len(current_task.sub_tasks) for sub_task in current_task.sub_tasks: # Check for other agents if sub_task.assigned_agent != self._agent_name: if sub_task.is_connected: is_connected_counter += 1 # Check for all the other subtaks connected if is_connected_counter == number_of_subtasks - 1: self.assigned_subtasks[0].is_connected = True else: self.assigned_subtasks[0].is_connected = True # make the other guy's subtask completed and connected # task_name = self.assigned_subtasks[0].parent_task_name # current_task = self.tasks.get(task_name, None) for sub_task in current_task.sub_tasks: # TODO check which is the completed sub_task if an agent can have more sub_tasks # TODO because there is no communication, other agents updating your is_connected it shouldn't affect if sub_task.assigned_agent == other_agent_name: self.local_map._attached_blocks.append(Block(block_type=sub_task.type, position=sub_task.position)) sub_task.is_connected = True sub_task.complete = True # if last action was detach, detach the blocks if self.perception_provider.agent.last_action == "detach" and \ self.perception_provider.agent.last_action_result == "success": # TODO detach only the block in the direction of the detach self.local_map._attached_blocks = [] for assigned_subtask in self.assigned_subtasks: if assigned_subtask.is_connected == True: # DO NOT KNOW WHY THE PREVIOUS SUBTASK WAS DELETED assigned_subtask.is_complete = True self.assigned_subtasks.remove(assigned_subtask) # if last action was submit, detach the blocks if self.perception_provider.agent.last_action == "submit" and \ self.perception_provider.agent.last_action_result == "success": # TODO detach only the block in the direction of the task self.local_map._attached_blocks = [] # this shouldn't be necessary because the task is not in the percept, # therefore the subtask is removed # self.assigned_subtasks.pop() ''' # send personal message test if self._agent_name == "agentA1": self._communication.send_message(self._pub_agents, "agentA2", "task", "[5,5]") ''' # update the sensors before starting the rhbp reasoning self.sensor_manager.update_sensors() # dumped class if global_variables.DUMP_CLASS: # TODO use a relative path and save everything in the test folder test_case_number = 'fixed' file_name = 'task_assignment_for_' + str(self.number_of_agents) + '_' + test_case_number file_object = open("/home/alvaro/Desktop/AAIP/mapc_workspace/src/group5/strategy_1/src/" \ + file_name + '.dat', "wb") pickle.dump(self.tasks, file_object) file_object.close() self.start_rhbp_reasoning(start_time, deadline) def _callback_agents(self, msg): msg_id = msg.message_id msg_from = msg.agent_id_from msg_type = msg.message_type msg_param = msg.params if msg.agent_id_to == self._agent_name: rospy.loginfo( self._agent_name + " received message from " + msg_from + " | id: " + msg_id + " | type: " + msg_type + " | params: " + msg_param) self._communication.send_message(self._pub_agents, msg_from, "received", msg_id) def _callback_subtask_update(self, msg): msg_id = msg.message_id msg_from = msg.agent_id command = msg.command message_subtask_id = msg.task_id if command == "done": # if gets a "done" command, then cycle all the subtasks when the task passed in the message is found then is set as complete for task_name, task_object in self.tasks.iteritems(): for sub in task_object.sub_tasks: current_subtask_id = sub.sub_task_name if current_subtask_id == message_subtask_id: sub.complete = True break def _initialize_behaviour_model(self): """ This function initialises the RHBP behaviour/goal model. """ ### Exploration ## exploration_move = ExplorationBehaviour(name="exploration_move", agent_name=self._agent_name, rhbp_agent=self) self.behaviours.append(exploration_move) # assigned to a task exploration_move.add_precondition(Condition(sensor=self.sensor_manager.assigned_task_list_empty, activator=BooleanActivator(desiredValue=True))) exploration_move.add_effect(Effect(self.perception_provider.dispenser_visible_sensor.name, indicator=True)) exploration_move.add_effect(Effect(self.sensor_manager.assigned_task_list_empty.name, indicator=True)) ### Move to Dispenser ### move_to_dispenser = MoveToDispenserBehaviour(name="move_to_dispenser", agent_name=self._agent_name, rhbp_agent=self) self.behaviours.append(move_to_dispenser) # assigned to a task precondition move_to_dispenser.add_precondition( Condition(self.sensor_manager.assigned_task_list_empty, BooleanActivator(desiredValue=False)) ) # block not attached precondition move_to_dispenser.add_precondition( Condition(self.sensor_manager.attached_to_block, BooleanActivator(desiredValue=False)) ) # not at the dispenser precondition move_to_dispenser.add_precondition( Condition(self.sensor_manager.at_the_dispenser, BooleanActivator(desiredValue=False)) ) move_to_dispenser.add_effect(Effect(self.sensor_manager.at_the_dispenser.name, indicator=True)) # # Our simple goal is to create more and more blocks # dispense_goal = GoalBase("dispensing", permanent=True, # conditions=[ # Condition(self.sensor_manager.at_the_dispenser, GreedyActivator())], # planner_prefix=self._agent_name) # self.goals.append(dispense_goal) ### Requeste block - Dispense ### dispense = DispenseBehaviour(name="dispense", agent_name=self._agent_name, rhbp_agent=self) self.behaviours.append(dispense) # assigned to a task precondition dispense.add_precondition( Condition(self.sensor_manager.assigned_task_list_empty, BooleanActivator(desiredValue=False)) ) # block not attached precondition dispense.add_precondition( Condition(self.sensor_manager.attached_to_block, BooleanActivator(desiredValue=False)) ) # not at the dispenser precondition dispense.add_precondition( Condition(self.sensor_manager.at_the_dispenser, BooleanActivator(desiredValue=True)) ) # not next to block dispense.add_precondition(Condition(self.sensor_manager.next_to_block, BooleanActivator(desiredValue=False))) # effect of dispense is that agent is next to block dispense.add_effect(Effect(self.sensor_manager.next_to_block.name, indicator=True)) # Our simple goal is to create more and more blocks # dispense_goal = GoalBase("dispensing", permanent=True, # conditions=[ # Condition(self.sensor_manager.attached_to_block, GreedyActivator())], # planner_prefix=self._agent_name) # self.goals.append(dispense_goal) #### Attach to Block ### attach = AttachBehaviour(name="attach", agent_name=self._agent_name, rhbp_agent=self) self.behaviours.append(attach) # Preconditions # assigned to a task attach.add_precondition(Condition(sensor=self.sensor_manager.assigned_task_list_empty, activator=BooleanActivator(desiredValue=False))) # is not yet attached to a block of type of the current task attach.add_precondition(Condition(sensor=self.sensor_manager.attached_to_block, activator=BooleanActivator(desiredValue=False))) # has already dispensed the block, temporary solution for lack of communication attach.add_precondition( Condition(sensor=self.sensor_manager.is_dispensed, activator=BooleanActivator(desiredValue=True))) # is next to a block attach.add_precondition( Condition(sensor=self.sensor_manager.next_to_block, activator=BooleanActivator(desiredValue=True))) # has free capacity to attach attach.add_precondition(Condition(sensor=self.sensor_manager.fully_attached, activator=BooleanActivator(desiredValue=False))) # effect of attach is that agent is attached to a block attach.add_effect(Effect(self.sensor_manager.attached_to_block.name, indicator=True)) # attach_goal = GoalBase("attaching", permanent=True, # conditions=[ # Condition(self.sensor_manager.attached_to_block, GreedyActivator())], # planner_prefix=self._agent_name) # self.goals.append(attach_goal) #### Reach meeting point ### reach_meeting_point = ReachMeetingPointBehaviour(name="reach_meeting_point", agent_name=self._agent_name, rhbp_agent=self) self.behaviours.append(reach_meeting_point) # assigned to a task reach_meeting_point.add_precondition(Condition(sensor=self.sensor_manager.assigned_task_list_empty, activator=BooleanActivator(desiredValue=False))) # have the block attached reach_meeting_point.add_precondition(Condition(sensor=self.sensor_manager.attached_to_block, activator=BooleanActivator(desiredValue=True))) # the shape is not complete reach_meeting_point.add_precondition(Condition(sensor=self.sensor_manager.shape_complete, activator=BooleanActivator(desiredValue=False))) # has not reached the meeting point already reach_meeting_point.add_precondition(Condition(sensor=self.sensor_manager.at_meeting_point, activator=BooleanActivator(desiredValue=False))) # effect is moving till the agent reaches the meeting point reach_meeting_point.add_effect(Effect(self.sensor_manager.at_meeting_point.name, indicator=True)) # at_meeting_point_goal = GoalBase("reach_meeting_point_goal", permanent=True, # conditions=[ # Condition(self.sensor_manager.at_meeting_point, GreedyActivator())], # planner_prefix=self._agent_name) # self.goals.append(at_meeting_point_goal) #### Connect ### connect = ConnectBehaviour(name="connect", agent_name=self._agent_name, rhbp_agent=self) self.behaviours.append(connect) # assigned to a task connect.add_precondition(Condition(sensor=self.sensor_manager.assigned_task_list_empty, activator=BooleanActivator(desiredValue=False))) # have the block attached connect.add_precondition(Condition(sensor=self.sensor_manager.attached_to_block, activator=BooleanActivator(desiredValue=True))) # connection not completed connect.add_precondition(Condition(sensor=self.sensor_manager.connect_successful, activator=BooleanActivator(desiredValue=False))) # has reached the meeting point connect.add_precondition(Condition(sensor=self.sensor_manager.at_meeting_point, activator=BooleanActivator(desiredValue=True))) # effect is creating the shape connect.add_effect(Effect(self.sensor_manager.connect_successful.name, indicator=True)) connect.add_effect(Effect(self.sensor_manager.shape_complete.name, indicator=True)) # connect_successful_goal = GoalBase("connect_successful_goal", permanent=True, # conditions=[ # Condition(self.sensor_manager.connect_successful, GreedyActivator())], # planner_prefix=self._agent_name) # self.goals.append(connect_successful_goal) #### Detach to Block ### detach = DetachBehaviour(name="detach", agent_name=self._agent_name, rhbp_agent=self) self.behaviours.append(detach) # Preconditions # assigned to a task detach.add_precondition(Condition(sensor=self.sensor_manager.assigned_task_list_empty, activator=BooleanActivator(desiredValue=False))) # connect action was succesful detach.add_precondition( Condition(sensor=self.sensor_manager.connect_successful, activator=BooleanActivator(desiredValue=True))) # is NOT the agent assigned to submit detach.add_precondition( Condition(sensor=self.sensor_manager.can_submit, activator=BooleanActivator(desiredValue=False))) detach.add_effect(Effect(self.sensor_manager.points.name, indicator=True)) #### Go to goal area ### go_to_goal_area = ReachGoalAreaBehaviour(name="go_to_goal_area", agent_name=self._agent_name, rhbp_agent=self) self.behaviours.append(go_to_goal_area) # Preconditions # assigned to a task go_to_goal_area.add_precondition(Condition(sensor=self.sensor_manager.assigned_task_list_empty, activator=BooleanActivator(desiredValue=False))) # connect action was succesful go_to_goal_area.add_precondition( Condition(sensor=self.sensor_manager.shape_complete, activator=BooleanActivator(desiredValue=True))) # is the agent assigned to submit go_to_goal_area.add_precondition( Condition(sensor=self.sensor_manager.can_submit, activator=BooleanActivator(desiredValue=True))) # is the agent assigned to submit go_to_goal_area.add_precondition( Condition(sensor=self.sensor_manager.at_goal_area, activator=BooleanActivator(desiredValue=False))) # effect is that agent is at goal area go_to_goal_area.add_effect(Effect(self.sensor_manager.at_goal_area.name, indicator=True)) #### Submit ### submit = SubmitBehaviour(name="submit", agent_name=self._agent_name, rhbp_agent=self) self.behaviours.append(submit) # Preconditions # assigned to a task submit.add_precondition(Condition(sensor=self.sensor_manager.assigned_task_list_empty, activator=BooleanActivator(desiredValue=False))) # have the block attached connect.add_precondition(Condition(sensor=self.sensor_manager.attached_to_block, activator=BooleanActivator(desiredValue=True))) # connect action was succesful submit.add_precondition( Condition(sensor=self.sensor_manager.shape_complete, activator=BooleanActivator(desiredValue=True))) # is the agent assigned to submit submit.add_precondition( Condition(sensor=self.sensor_manager.can_submit, activator=BooleanActivator(desiredValue=True))) # is the agent at the goal area submit.add_precondition( Condition(sensor=self.sensor_manager.at_goal_area, activator=BooleanActivator(desiredValue=True))) # effect of is that points are earned submit.add_effect(Effect(self.sensor_manager.points.name, indicator=True)) # our unique goal is to make points make_points_goal = GoalBase("make_points_goal", permanent=True, conditions=[ Condition(self.sensor_manager.points, GreedyActivator())], planner_prefix=self._agent_name) self.goals.append(make_points_goal)