示例#1
0
 def test_named_SENSOR(self):
     network = Network()
     n1 = network.add_SENSOR_node(Thing1, 'one')
     self.assertTrue(
         unpack0(network.update(([(Thing1('two'), 1.0)], {}))) == set())
     self.assertTrue(
         unpack0(network.update(([(Thing1('one'), 1.0)], {}))) == set([n1]))
示例#2
0
        class Cachelot(Agent):
            # pylint: disable=no-self-use
            def __init__(self):
                super().__init__(None, 'Cachelot')
                self.status_history = {'energy':[]}
                self.network = Network(None, {'energy': 1.0})
                self.status = self.network.get_NEEDs()

            def program(self, percept):
                percepts, _ = percept
                self.network.update(percept)
                if any([isinstance(p, Squid) for p, _ in percepts]):
                    return 'eat_and_forward'
                return 'forward'
示例#3
0
    def test_NEED(self):
        N = Network(None, {'energy': 1.0})

        N.update(([], {'energy': 0.001}))
        self.assertTrue(N.get_NEEDs()['energy'] == 1.0)

        N.update(([], {'energy': -0.001}))
        self.assertTrue(N.get_NEEDs()['energy'] == 0.999)
示例#4
0
 def test_RAND_NOT(self):
     network = Network()
     n1 = network.add_RAND_node(0.5)
     n2 = network.add_NOT_node([n1])
     for _ in range(0, 100):
         network.update(([], {}))
         state = network.get_state()
         self.assertTrue(state[n1] != state[n2])
示例#5
0
    def __init__(self, objectives, landmarks):
        # pylint: disable=line-too-long, too-many-locals

        super().__init__(None, 'grid_agent')

        N = Network(None, objectives)
        SENSOR = N.add_SENSOR_node
        self.status = N.get_NEEDs()
        self.status_history = {'energy': [], 'water': []}

        # Create sensors
        SENSOR(Water)
        SENSOR(Energy)
        # create one SENSOR for each square
        sensor_dict = {}
        for lm in landmarks:
            sensor_dict[frozenset([SENSOR(Landmark, lm)])] = lm
        network_model = NetworkModel(sensor_dict)

        M = MotorNetwork(motors, motors_to_action)

        # NOTE: init=agent_start_pos, using a location here (only for debugging),
        #            is a state when MDP:s are used
        self.ndp = NetworkDP(agent_start_pos, self.status, motor_model, .9,
                             network_model)
        self.q_agent = NetworkQLearningAgent(self.ndp,
                                             Ne=0,
                                             Rplus=2,
                                             alpha=lambda n: 60. / (59 + n),
                                             epsilon=0.2,
                                             delta=0.5)

        # compose applies the functions from right to left
        self.program = compose(
            do(partial(l.debug, 'mnetwork.update')), M.update,
            do(partial(l.debug, 'q_agent')), self.q_agent,
            do(partial(l.debug, N)), do(partial(l.debug,
                                                'network.update')), N.update,
            do(partial(l.debug, 'percept')),
            lambda x: do(partial(l.debug, '*** ENERY FOUND ***'))(x)
            if 'energy' in x[1] and x[1]['energy'] > 0.0 else x,
            lambda x: do(partial(l.debug, '*** WATER FOUND ***'))(x)
            if 'water' in x[1] and x[1]['water'] > 0.0 else x, do(self.printU))
示例#6
0
    def __init__(self, objectives):
        # pylint: disable=line-too-long

        super().__init__(None, 'calf')

        motors = ['eat_and_forward', 'forward', 'dive_and_forward',
                  'up_and_forward']

        eat_and_forward, forward = frozenset([0]), frozenset([1])
        dive_and_forward, up_and_forward = frozenset([2]), frozenset([3])

        motors_to_action = {eat_and_forward: 'eat_and_forward',
                            forward: 'forward',
                            dive_and_forward: 'dive_and_forward',
                            up_and_forward: 'up_and_forward',
                            '*': '-'}

        motor_model = MotorModel(motors_to_action)


        self.network = N = Network(None, objectives)
        self.status = N.get_NEEDs()
        self.status_history = {'energy':[]}
        s1 = N.add_SENSOR_node(Squid)
        s2 = N.add_SENSOR_node(Song)
        self.network_model = NetworkModel({frozenset([]): 'no_sensors',
                                      frozenset([s1]): 'squid',
                                      frozenset([s2]): 'song',
                                      frozenset([s1,s2]): 'squid_and_song'})

        self.motor_network = M = MotorNetwork(motors, motors_to_action)

        # NOTE: init=agent_start_pos, using a location here (only for debugging),
        #            is a state when MDP:s are used
        self.ndp = NetworkDP(calf_start_pos, self.status, motor_model, gamma=.9,
                             network_model=self.network_model)
        self.q_agent = NetworkQLearningAgent(self.ndp, Ne=0, Rplus=2,
                                             alpha=lambda n: 60./(59+n),
                                             epsilon=0.2,
                                             delta=0.5)

        # compose applies the functions from right to left
        self.program = compose(do(partial(l.debug, 'Calf mnetwork.update'))
                               , do(partial(l.debug, M))
                               , lambda a: do(partial(l.debug, '*** CALF EATING! ***'))(a) if a == 'eat_and_forward' else a
                               , M.update
                               , do(partial(l.debug, 'Calf q_agent'))
                               , self.q_agent
                               , do(partial(l.debug, N))
                               , lambda p: do(partial(l.debug, '*** CALF HEARD SONG! ***'))(p) if s2 in p[0] else p
                               , lambda p: do(partial(l.debug, '*** CALF FOUND SQUID! ***'))(p) if s1 in p[0] else p
                               , do(partial(l.debug, 'Calf network.update'))
                               , N.update
                               , do(partial(l.debug, 'Calf percept'))
                              )
示例#7
0
    def __init__(self):
        # pylint: disable=line-too-long, too-many-locals

        super().__init__(None, 'mom')

        N = Network(None, {'energy': 1.0})
        self.status = N.get_NEEDs()
        self.status_history = {'energy': []}

        M = MotorNetwork(motors, motors_to_action)
        SENSOR, RAND, AND = N.add_SENSOR_node, N.add_RAND_node, N.add_AND_node
        NOT, OR = N.add_NOT_node, N.add_OR_node

        s1, r1, r2, r3 = SENSOR(Squid), RAND(0.3), RAND(0.3), RAND(0.3)
        n3 = AND([
            NOT([s1]),
            OR([AND([r1, NOT([r2, r3])]),
                AND([r3, NOT([r1, r2])])])
        ])
        n4 = AND([NOT([s1]), NOT([r1, r2, r3])])
        n2 = NOT([s1, n3, n4])

        state_to_motor = {
            frozenset([s1]): sing_eat_and_forward,
            frozenset([n2]): forward,
            frozenset([n3]): dive_and_forward,
            frozenset([n4]): up_and_forward
        }

        l.info('state_to_motor:', state_to_motor)
        l.info('motors_to_action:', motors_to_action)

        # compose applies the functions from right to left
        self.program = compose(
            do(partial(l.debug, 'Mom mnetwork.update')), M.update,
            do(partial(l.debug, 'Mom state_to_motor')),
            lambda p: state_to_motor.get(p[0]), do(partial(l.debug, N)),
            do(partial(l.debug, 'Mom filter interesting states')), lambda p:
            (p[0] & {s1, n2, n3, n4}, p[1]),
            do(partial(l.debug, 'Mom network.update')), N.update,
            do(partial(l.debug, 'Mom percept')))
示例#8
0
    def __init__(self, objectives):
        # pylint: disable=line-too-long, too-many-locals

        # program=None
        super().__init__(None, 'mom')

        # Motors and actions
        motors = ['sing_eat_and_forward', 'forward', 'dive_and_forward',
                  'up_and_forward']

        sing_eat_and_forward, forward = frozenset([0]), frozenset([1])
        dive_and_forward, up_and_forward = frozenset([2]), frozenset([3])

        motors_to_action = {sing_eat_and_forward: 'sing_eat_and_forward',
                            forward: 'forward',
                            dive_and_forward: 'dive_and_forward',
                            up_and_forward: 'up_and_forward',
                            '*': '-'}

        motor_model = MotorModel(motors_to_action)


        self.network = N = Network(None, objectives)
        self.status = N.get_NEEDs()
        self.status_history = {'energy':[]}
        s1 = N.add_SENSOR_node(Squid)
        self.network_model = NetworkModel({frozenset(): 'no_sensors',
                                           frozenset([s1]): 'squid'})

        self.motor_network = M = MotorNetwork(motors, motors_to_action)

        # NOTE: init=agent_start_pos, using a location here (only for debugging),
        #            is a state when MDP:s are used
        self.ndp = NetworkDP(mom_start_pos, self.status, motor_model, gamma=.9,
                             network_model=self.network_model)
        self.q_agent = NetworkQLearningAgent(self.ndp, Ne=0, Rplus=2,
                                             alpha=lambda n: 60./(59+n),
                                             epsilon=0.2,
                                             delta=0.5)

        # compose applies the functions from right to left
        self.program = compose(do(partial(l.debug, 'Mom mnetwork.update'))
                               , do(partial(l.debug, M))
                               , M.update
                               , do(partial(l.debug, 'Mom q_agent'))
                               , self.q_agent
                               , do(partial(l.debug, N))
                               , do(partial(l.debug, 'Mom network.update'))
                               , N.update
                               , do(partial(l.debug, 'Mom percept'))
                              )
示例#9
0
    def test_complex(self):
        N = Network()
        SENSOR, RAND, AND = N.add_SENSOR_node, N.add_RAND_node, N.add_AND_node
        NOT, OR = N.add_NOT_node, N.add_OR_node

        s1, r1, r2, r3 = SENSOR(Thing1), RAND(0.3), RAND(0.3), RAND(0.3)
        t1 = NOT([r1, r2])
        t2 = NOT([r2, r3])
        t3 = AND([r3, t1])
        t4 = AND([r1, t2])
        n3 = AND([NOT([s1]), OR([t4, t3])])
        n4 = AND([NOT([s1]), NOT([r1, r2, r3])])
        n2 = NOT([s1, n3, n4])

        S = N.state
        for i in range(0, 100):
            percept = ([(Thing1(), 1.0)] if i % 2 else [], {})
            vs = unpack0(N.update(percept))
            self.assertTrue(S[t1] == (not S[r1] and not S[r2]))
            self.assertTrue(S[t2] == (not S[r2] and not S[r3]))
            self.assertTrue(S[t3] == (S[r3] and S[t1]))
            self.assertTrue(S[t4] == (S[r1] and S[t2]))
            vs = vs & {s1, n2, n3, n4}
            self.assertTrue(len(vs) == 1)
示例#10
0
    def __init__(self):
        # pylint: disable=line-too-long

        super().__init__(None, 'calf')

        N = Network(None, {'energy': 1.0})
        self.status = N.get_NEEDs()
        self.status_history = {'energy': []}

        s1 = N.add_SENSOR_node(Squid)
        r1 = N.add_RAND_node(0.3)
        r2 = N.add_RAND_node(0.3)
        r3 = N.add_RAND_node(0.3)
        s2 = N.add_SENSOR_node(Song)

        M = MotorNetwork(motors, motors_to_action)

        state_to_motor = {
            frozenset([r1, r2, r3]): forward,
            frozenset([r1, r2]): forward,
            frozenset([r1, r3]): forward,
            frozenset([r2, r3]): forward,
            frozenset([r2]): forward,
            frozenset([r3]): up_and_forward,
            frozenset([r1]): up_and_forward,
            frozenset([]): dive_and_forward
        }

        # compose applies the functions from right to left
        self.program = compose(
            do(partial(l.debug, 'Calf mnetwork.update')),
            M.update,
            do(partial(l.debug, 'Calf state_to_motor')),
            lambda p: eat_and_forward
            if s1 in p[0] else (dive_and_forward
                                if s2 in p[0] else up_and_forward)
            #, lambda s: eat_and_forward if s1 in s else (dive_and_forward if s2 in s else state_to_motor.get(s))
            ,
            lambda p: do(partial(l.info, '--- CALF HEARD SONG, DIVING! ---'))
            (p) if s2 in p[0] else p,
            lambda p: do(partial(l.info, '--- CALF FOUND SQUID, EATING! ---'))
            (p) if s1 in p[0] else p,
            do(partial(l.debug, 'Calf network.update')),
            N.update,
            do(partial(l.debug, 'Calf percept')))
示例#11
0
    def test_top_active(self):
        network = Network()
        n1 = network.add_SENSOR_node(Thing1)
        n2 = network.add_SENSOR_node(Thing2)
        n3 = network.add_SENSOR_node(Thing3)
        n4 = network.add_SEQ_node([n1, n2, n3])

        network.update(([(Thing1(), 1.0)], {}))
        self.assertTrue(network.get() == set([n1]))
        self.assertTrue(network.top_active() == set([n1]))

        network.update(([(Thing2(), 1.0)], {}))
        self.assertTrue(network.get() == set([n2]))
        self.assertTrue(network.top_active() == set([n2]))

        network.update(([(Thing3(), 1.0)], {}))
        self.assertTrue(network.get() == set([n3, n4]))
        self.assertTrue(network.top_active() == set([n4]))

        network.update(([], {}))
        self.assertTrue(network.get() == set())

        # Check that SEQ can keep track of multiple sequences at the same time
        network.update(([(Thing1(), 1.0)], {}))
        network.update(([(Thing2(), 1.0), (Thing1(), 1.0)], {}))
        network.update(([(Thing3(), 1.0), (Thing2(), 1.0)], {}))
        self.assertTrue(network.get() == set([n3, n2, n4]))
        network.update(([(Thing3(), 1.0)], {}))
        self.assertTrue(network.get() == set([n3, n4]))
示例#12
0
    def test_SEQ(self):
        network = Network([('thing1', Thing1), ('thing2', Thing2)])
        network.add_SEQ_node([0, 1])

        network.update(([(Thing1(), 1.0)], {}))
        self.assertTrue(network.get_state() == (True, False, False))

        network.update(([(Thing2(), 0.5)], {}))
        self.assertTrue(network.get_state() == (False, True, True))

        network.update(([(Thing2(), 1.0)], {}))
        self.assertTrue(network.get_state() == (False, True, False))

        network.update(([], {}))
        self.assertTrue(network.get_state() == (False, False, False))
示例#13
0
    def test_SENSOR_AND(self):
        network = Network()
        network.add_SENSOR_node(Thing1)

        self.assertTrue(network.get_state() == (None, ))

        network.update(([(Thing2(), 1.0)], {}))
        self.assertTrue(network.get_state() == (False, ))

        network.update(([(Thing1(), 1.0)], {}))
        self.assertTrue(network.get_state() == (True, ))

        network.add_SENSOR_node(Thing2)
        network.update(([(Thing2(), 1.0)], {}))
        self.assertTrue(network.get_state() == (
            False,
            True,
        ))

        network.update(([], {}))
        self.assertTrue(network.get_state() == (
            False,
            False,
        ))

        network.update(([(Thing2(), 2.0), (Thing1(), 0.5)], {}))
        self.assertTrue(network.get_state() == (
            True,
            True,
        ))

        network.add_AND_node([0, 1])
        network.update(([(Thing2(), 1.0), (Thing1(), 0.3)], {}))
        self.assertTrue(network.get_state() == (True, True, True))

        network.update(([(Thing2(), 2.0)], {}))
        self.assertTrue(network.get_state() == (False, True, False))
示例#14
0
    def test_ONE(self):
        network = Network()
        n1 = network.add_SENSOR_node(Thing1)
        n2 = network.add_SENSOR_node(Thing2)
        n3 = network.add_SENSOR_node(Thing3)
        n1T = network.add_ONE_node(n1, [n1, n2, n3])
        n2T = network.add_ONE_node(n2, [n1, n2, n3])
        n3T = network.add_ONE_node(n3, [n1, n2, n3])

        self.assertTrue(
            unpack0(network.update(([(Thing1(), 1.0)], {}))) -
            set([n1]) == set([n1T]))
        self.assertTrue(
            unpack0(network.update(([(Thing2(), 1.0)], {}))) -
            set([n2]) == set([n2T]))
        self.assertTrue(
            unpack0(network.update(([(Thing3(), 1.0)], {}))) -
            set([n3]) == set([n3T]))
        self.assertTrue(
            unpack0(network.update(([(Thing1(), 1.0), (Thing2(), 1.0)], {}))) -
            set([n1, n2]) == set())
        self.assertTrue(
            unpack0(
                network.update(([(Thing1(), 1.0), (Thing2(), 1.0),
                                 (Thing3(), 1.0)], {}))) -
            set([n1, n2, n3]) == set())
        self.assertTrue(unpack0(network.update(([], {}))) == set())

        m1 = network.add_MIN_node(1, [n1, n2, n3])
        m2 = network.add_MIN_node(2, [n1, n2, n3])
        m3 = network.add_MIN_node(3, [n1, n2, n3])

        vs = unpack0(network.update(([(Thing3(), 1.0)], {})))
        self.assertTrue(m1 in vs and not m2 in vs and not m3 in vs)
        vs = unpack0(network.update(([(Thing1(), 1.0), (Thing2(), 1.0)], {})))
        self.assertTrue(m2 in vs and m1 in vs and not m3 in vs)
        vs = unpack0(
            network.update(([(Thing1(), 1.0), (Thing2(), 1.0),
                             (Thing3(), 1.0)], {})))
        self.assertTrue(m3 in vs and m2 in vs and m1 in vs)

        m1 = network.add_MAX_node(1, [n1, n2, n3])
        m2 = network.add_MAX_node(2, [n1, n2, n3])
        m3 = network.add_MAX_node(3, [n1, n2, n3])

        vs = unpack0(network.update(([], {})))
        self.assertTrue(m1 in vs and m2 in vs and m3 in vs)
        vs = unpack0(network.update(([(Thing3(), 1.0)], {})))
        self.assertTrue(m1 in vs and m2 in vs and m3 in vs)
        vs = unpack0(network.update(([(Thing1(), 1.0), (Thing2(), 1.0)], {})))
        self.assertTrue(m2 in vs and not m1 in vs and m3 in vs)
        vs = unpack0(
            network.update(([(Thing1(), 1.0), (Thing2(), 1.0),
                             (Thing3(), 1.0)], {})))
        self.assertTrue(m3 in vs and not m2 in vs and not m1 in vs)

        # NOTE: Need to be manually checked
        network.saveGraphviz('test_ONE_graph.dot')
示例#15
0
 def __init__(self):
     super().__init__(None, 'Cachelot')
     self.status_history = {'energy':[]}
     self.network = Network(None, {'energy': 1.0})
     self.status = self.network.get_NEEDs()