Ejemplo n.º 1
0
class StateMachine:
    def __init__(self, states: Actions):
        """
            Don't need initial and end state since the transition
            will be a ordered list.
        :param states:
        """
        self._states = states
        self._planner = Planner(actions=states)
        self._transitions = []
        self._current_state = None
        self.start_state = None
        self.end_state = None

    def set_transitions(self, init_state, end_state):
        """ Receives an ordered list and set it to self._transitions

        :param planner:
        :param init_state:
        :param end_state:
        :param obj:
        :param obj: obj capable of order the list
        :return: None
        """
        transitions = []
        if init_state in self._planner.actions.all_possible_states():
            plan = self._planner.plan(init_state, end_state)
            print(plan)
            for src, dst, obj in plan:
                transitions.append(obj['object'])
        self._transitions = transitions

    def get_transitions(self):
        return self._transitions

    def stop(self):
        self._current_state = None

    def start(self, init_state: dict, end_state: dict):
        result = []

        self.set_transitions(init_state=init_state, end_state=end_state)
        for state in self._transitions:
            self._current_state = state
            result.append(self._current_state.do())

        self.stop()
        return result
Ejemplo n.º 2
0
class StateMachine:
    def __init__(self, states: Actions):
        """
            Don't need initial and end state since the transition
            will be a ordered list.
        :param states:
        """
        self._states = states
        self._planner = Plan(actions=states)
        self._transitions = []
        self._current_state = None
        self.start_state = None
        self.end_state = None

    def set_transitions(self, init_state: dict, end_state: dict):
        """ Receives an ordered list and set it to self._transitions

        :param init_state: A dict representing the initial state of the StateMachine
        :param end_state: A dict representing the final state of the StateMachine
        :return: None
        """
        transitions = []
        if init_state in self._planner.actions.all_possible_states():
            plan = self._planner.plan(init_state, end_state)
            print(plan)
            for src, dst, obj in plan:
                transitions.append(obj['object'])
        self._transitions = transitions

    def get_transitions(self):
        return self._transitions

    def stop(self):
        self._current_state = None
        # sys.exit(1)

    def start(self, init_state: dict, end_state: dict):
        result = []
        self.set_transitions(init_state=init_state, end_state=end_state)
        for state in self._transitions:
            self._current_state = state
            result.append(self._current_state.do())

        self.stop()
        return result
Ejemplo n.º 3
0
class Automaton:
    """ A 3 State Machine Automaton: observing (aka monitor or patrol), planning and acting """

    machine = MethodicalMachine()

    def __init__(self,
                 name: str='Automaton',
                 sensors: Sensors=[],
                 actions: Actions=[],
                 world_state: dict=DEFAULT_WORLD_STATE):
        # setup
        self.world_state = WorldState(world_state)
        self.working_memory = []
        self.name = name
        self.sensors = sensors
        self.actions = actions
        self.planner = Planner(actions=actions)
        #
        self.action_plan = []
        self.action_plan_response = None
        self.sensors_responses = {}
        self.actions_response = []
        self.goal = {}

    def __sense_environment(self):
        Observable.from_(self.sensors). \
            subscribe(
            lambda sensor: self.working_memory.append(Fact(sensor=sensor.name, data=sensor.exec(), binding=sensor.binding))
        )
        Observable.from_(self.working_memory). \
            subscribe(
            lambda fact: setattr(self.world_state, fact.binding, fact.data.output)
        )

    def __set_action_plan(self):
        self.action_plan = self.planner.plan(self.world_state, self.goal)
        return self.action_plan

    def __execute_action_plan(self):
        # [print(action[2]['object']) for action in self.action_plan]
        self.actions_response = [self.actions.get(action[2]['object'].exec()) for action in self.action_plan]
        return 'Action planning execution results: {}'.format(self.action_plan_response)

    @machine.state(initial=True)
    def waiting_orders(self):
        """ Waiting goal / orders """

    @machine.state()
    def sensing(self):
        """ Running sensors and assimilating sensor's responses """

    @machine.state()
    def planning(self):
        """ Generating action plan to change actual world state to achieve goal """

    @machine.state()
    def acting(self):
        """ Executing action plan"""

    @machine.input()
    def sense(self):
        """ Input sense state """

    @machine.output()
    def __sense(self):
        """ Execute sensors """
        self.__sense_environment()

    @machine.input()
    def plan(self):
        """ Input for planning state """

    @machine.output()
    def __plan(self):
        """ Generate action plan """
        self.__set_action_plan()

    @machine.input()
    def act(self):
        """ Input for acting state"""

    @machine.output()
    def __act(self):
        """ Execute action plan """
        self.__execute_action_plan()

    @machine.input()
    def input_goal(self, goal):
        """ Change / Set AI goal """

    @machine.output()
    def __input_goal(self, goal):
        """ Actually sets goal """
        self.goal = goal

    # cyclical main states
    waiting_orders.upon(sense, enter=sensing, outputs=[__sense])
    sensing.upon(plan, enter=planning, outputs=[__plan])
    planning.upon(act, enter=acting, outputs=[__act])
    acting.upon(sense, enter=sensing, outputs=[__sense])
    # change orders
    waiting_orders.upon(input_goal, enter=waiting_orders, outputs=[__input_goal])
    planning.upon(input_goal, enter=waiting_orders, outputs=[__input_goal])
    acting.upon(input_goal, enter=waiting_orders, outputs=[__input_goal])
Ejemplo n.º 4
0
class PlannerTest(unittest.TestCase):
    @staticmethod
    def __reset_environment():
        if path.isdir('/tmp/goap_tmp'):
            subprocess.call(['rm', '-rf', '/tmp/goap_tmp'])

    def __print(self):
        self.print.pprint('Nodes: {}, Edges: {}'.format(
            self.planner.graph.nodes, self.planner.graph.edges))

    def setUp(self):
        self.print = PrettyPrinter(indent=4)
        self.setUpDirHandlerCMD()
        self.setUpLvmCMD()
        self.init_ws = WorldState({
            "tmp_dir_state": False,
            "tmp_dir_content": False,
        })
        self.gs = WorldState({
            "tmp_dir_state": True,
            "tmp_dir_content": True,
        })
        self.planner = Planner(actions=self.dir_handler_cmd)

    def setUpDirHandlerCMD(self):
        self.dir_handler_cmd = Actions()
        self.dir_handler_cmd.add(name='CreateTmpDir',
                                 pre_conditions={
                                     'tmp_dir_state': False,
                                     'tmp_dir_content': False
                                 },
                                 effects={
                                     'tmp_dir_state': True,
                                     'tmp_dir_content': False
                                 },
                                 shell='mkdir -p /tmp/goap_tmp')
        self.dir_handler_cmd.add(name='CreateToken',
                                 pre_conditions={
                                     'tmp_dir_state': True,
                                     'tmp_dir_content': False
                                 },
                                 effects={
                                     'tmp_dir_state': True,
                                     'tmp_dir_content': True
                                 },
                                 shell='touch /tmp/goap_tmp/.token')

    def setUpLvmCMD(self):
        self.lv_act = Actions()
        self.lv_act.add(name='ExpandLV',
                        pre_conditions={
                            'lv_need_expansion': True,
                            'vg_need_expansion': False,
                        },
                        effects={
                            'lv_need_expansion': False,
                        },
                        shell='echo expand_lv',
                        cost=1.0)
        self.lv_act.add(name='ExpandVG',
                        pre_conditions={
                            'vg_need_expansion': True,
                            'pv_need_expansion': False,
                        },
                        effects={
                            'vg_need_expansion': False,
                        },
                        shell='echo expand_vg')
        self.lv_act.add(
            name='ExpandPV',
            pre_conditions={
                'pv_need_expansion': True,
            },
            effects={
                'pv_need_expansion': False,
            },
            shell='echo purge_old_files',
            cost=1.5,
        )

    def test_set_goal(self):
        self.planner.goal = self.gs
        assert self.gs == self.planner.goal

    def test_graph_isomorphic(self):
        from Goap.Planner import Node
        from Goap.Planner import Edge

        acts = Actions()
        acts.add(name='CreateTmpDir',
                 pre_conditions={
                     'tmp_dir_state': False,
                     'tmp_dir_content': False
                 },
                 effects={
                     'tmp_dir_state': True,
                     'tmp_dir_content': False
                 },
                 shell='mkdir -p /tmp/goap_tmp')
        acts.add(name='CreateToken',
                 pre_conditions={
                     'tmp_dir_state': True,
                     'tmp_dir_content': False
                 },
                 effects={
                     'tmp_dir_state': True,
                     'tmp_dir_content': True
                 },
                 shell='touch /tmp/goap_tmp/.token')
        node1 = Node(attributes={
            'tmp_dir_state': False,
            'tmp_dir_content': False
        })
        node2 = Node(attributes={
            'tmp_dir_state': True,
            'tmp_dir_content': False
        })
        node3 = Node(attributes={
            'tmp_dir_state': True,
            'tmp_dir_content': True
        })
        edge1 = Edge(name='CreateTmpDir',
                     predecessor=node1,
                     successor=node2,
                     obj=acts.get('CreateTmpDir'))
        edge2 = Edge(name='CreateToken',
                     predecessor=node2,
                     successor=node3,
                     obj=acts.get('CreateToken'))
        g1 = nx.DiGraph(nodes=[node1, node2, node3], edges=[edge1, edge2])
        g2 = self.planner.graph.directed
        assert nx.is_isomorphic(g1, g2) is True

    def test_plan(self):
        create_tmp_dir = self.planner.actions.get('CreateTmpDir')
        create_token = self.planner.actions.get('CreateToken')
        plan = self.planner.plan(state=self.init_ws, goal=self.gs)
        action_plan = [action[2]['object'] for action in plan]
        assert action_plan == [create_tmp_dir, create_token]
Ejemplo n.º 5
0
class Automaton:
    """ A 3 State Machine Automaton: observing (aka monitor or patrol), planning and acting """

    machine = MethodicalMachine()

    def __init__(self, name: str, sensors: Sensors, actions: Actions,
                 world_state: dict):
        # setup
        self.world_state = WorldState(world_state)
        self.working_memory = []
        self.name = name
        self.sensors = sensors
        self.actions = actions
        self.planner = Planner(actions=actions)
        #
        self.action_plan = []
        self.action_plan_response = None
        self.sensors_responses = {}
        self.actions_response = []
        self.goal = {}

    def __sense_environment(self):
        Observable.from_(self.sensors). \
            subscribe(
            lambda sensor: self.working_memory.append(Fact(sensor=sensor.name, data=sensor.exec(), binding=sensor.binding))
        )
        Observable.from_(self.working_memory). \
            subscribe(
            lambda fact: setattr(self.world_state, fact.binding, fact.data.response)
        )

    def __set_action_plan(self):
        self.action_plan = self.planner.plan(self.world_state, self.goal)
        return self.action_plan

    def __execute_action_plan(self):
        self.actions_response = [
            action[2]['object'].exec() for action in self.action_plan
        ]
        return 'Action planning execution results: {}'.format(
            self.action_plan_response)

    @machine.state(initial=True)
    def waiting_orders(self):
        """ Waiting goal / orders """

    @machine.state()
    def sensing(self):
        """ Running sensors and assimilating sensor's responses """

    @machine.state()
    def planning(self):
        """ Generating action plan to change actual world state to achieve goal """

    @machine.state()
    def acting(self):
        """ Executing action plan"""

    @machine.input()
    def wait(self):
        """ Input waiting_orders state """

    @machine.input()
    def sense(self):
        """ Input sense state """

    @machine.output()
    def __sense(self):
        """ Execute sensors """
        self.__sense_environment()

    @machine.input()
    def plan(self):
        """ Input for planning state """

    @machine.output()
    def __plan(self):
        """ Generate action plan """
        self.__set_action_plan()

    @machine.input()
    def act(self):
        """ Input for acting state"""

    @machine.output()
    def __act(self):
        """ Execute action plan """
        self.__execute_action_plan()

    @machine.input()
    def input_goal(self, goal):
        """ Change / Set AI goal """

    @machine.output()
    def __input_goal(self, goal):
        """ Actually sets goal """
        self.goal = goal

    @machine.output()
    def __reset_working_memory(self):
        self.working_memory = []

    # cyclical main states
    waiting_orders.upon(sense, enter=sensing, outputs=[__sense])
    sensing.upon(plan, enter=planning, outputs=[__plan])
    planning.upon(act, enter=acting, outputs=[__act])
    acting.upon(sense,
                enter=sensing,
                outputs=[__reset_working_memory, __sense])
    # change orders
    waiting_orders.upon(input_goal,
                        enter=waiting_orders,
                        outputs=[__input_goal])
    planning.upon(input_goal, enter=waiting_orders, outputs=[__input_goal])
    acting.upon(input_goal, enter=waiting_orders, outputs=[__input_goal])
    # reset working memory from sensing
    sensing.upon(wait, enter=waiting_orders, outputs=[__reset_working_memory])