def __init__(self, objectives): # pylint: disable=line-too-long super().__init__(None, 'calf') motors = ['eat_and_forward', 'forward', 'dive_and_forward', 'up_and_forward'] eat_and_forward, forward = frozenset([0]), frozenset([1]) dive_and_forward, up_and_forward = frozenset([2]), frozenset([3]) motors_to_action = {eat_and_forward: 'eat_and_forward', forward: 'forward', dive_and_forward: 'dive_and_forward', up_and_forward: 'up_and_forward', '*': '-'} motor_model = MotorModel(motors_to_action) self.network = N = Network(None, objectives) self.status = N.get_NEEDs() self.status_history = {'energy':[]} s1 = N.add_SENSOR_node(Squid) s2 = N.add_SENSOR_node(Song) self.network_model = NetworkModel({frozenset([]): 'no_sensors', frozenset([s1]): 'squid', frozenset([s2]): 'song', frozenset([s1,s2]): 'squid_and_song'}) self.motor_network = M = MotorNetwork(motors, motors_to_action) # NOTE: init=agent_start_pos, using a location here (only for debugging), # is a state when MDP:s are used self.ndp = NetworkDP(calf_start_pos, self.status, motor_model, gamma=.9, network_model=self.network_model) self.q_agent = NetworkQLearningAgent(self.ndp, Ne=0, Rplus=2, alpha=lambda n: 60./(59+n), epsilon=0.2, delta=0.5) # compose applies the functions from right to left self.program = compose(do(partial(l.debug, 'Calf mnetwork.update')) , do(partial(l.debug, M)) , lambda a: do(partial(l.debug, '*** CALF EATING! ***'))(a) if a == 'eat_and_forward' else a , M.update , do(partial(l.debug, 'Calf q_agent')) , self.q_agent , do(partial(l.debug, N)) , lambda p: do(partial(l.debug, '*** CALF HEARD SONG! ***'))(p) if s2 in p[0] else p , lambda p: do(partial(l.debug, '*** CALF FOUND SQUID! ***'))(p) if s1 in p[0] else p , do(partial(l.debug, 'Calf network.update')) , N.update , do(partial(l.debug, 'Calf percept')) )
def __init__(self, objectives): # pylint: disable=line-too-long, too-many-locals # program=None super().__init__(None, 'mom') # Motors and actions motors = ['sing_eat_and_forward', 'forward', 'dive_and_forward', 'up_and_forward'] sing_eat_and_forward, forward = frozenset([0]), frozenset([1]) dive_and_forward, up_and_forward = frozenset([2]), frozenset([3]) motors_to_action = {sing_eat_and_forward: 'sing_eat_and_forward', forward: 'forward', dive_and_forward: 'dive_and_forward', up_and_forward: 'up_and_forward', '*': '-'} motor_model = MotorModel(motors_to_action) self.network = N = Network(None, objectives) self.status = N.get_NEEDs() self.status_history = {'energy':[]} s1 = N.add_SENSOR_node(Squid) self.network_model = NetworkModel({frozenset(): 'no_sensors', frozenset([s1]): 'squid'}) self.motor_network = M = MotorNetwork(motors, motors_to_action) # NOTE: init=agent_start_pos, using a location here (only for debugging), # is a state when MDP:s are used self.ndp = NetworkDP(mom_start_pos, self.status, motor_model, gamma=.9, network_model=self.network_model) self.q_agent = NetworkQLearningAgent(self.ndp, Ne=0, Rplus=2, alpha=lambda n: 60./(59+n), epsilon=0.2, delta=0.5) # compose applies the functions from right to left self.program = compose(do(partial(l.debug, 'Mom mnetwork.update')) , do(partial(l.debug, M)) , M.update , do(partial(l.debug, 'Mom q_agent')) , self.q_agent , do(partial(l.debug, N)) , do(partial(l.debug, 'Mom network.update')) , N.update , do(partial(l.debug, 'Mom percept')) )
def __init__(self): # pylint: disable=line-too-long super().__init__(None, 'calf') N = Network(None, {'energy': 1.0}) self.status = N.get_NEEDs() self.status_history = {'energy': []} s1 = N.add_SENSOR_node(Squid) r1 = N.add_RAND_node(0.3) r2 = N.add_RAND_node(0.3) r3 = N.add_RAND_node(0.3) s2 = N.add_SENSOR_node(Song) M = MotorNetwork(motors, motors_to_action) state_to_motor = { frozenset([r1, r2, r3]): forward, frozenset([r1, r2]): forward, frozenset([r1, r3]): forward, frozenset([r2, r3]): forward, frozenset([r2]): forward, frozenset([r3]): up_and_forward, frozenset([r1]): up_and_forward, frozenset([]): dive_and_forward } # compose applies the functions from right to left self.program = compose( do(partial(l.debug, 'Calf mnetwork.update')), M.update, do(partial(l.debug, 'Calf state_to_motor')), lambda p: eat_and_forward if s1 in p[0] else (dive_and_forward if s2 in p[0] else up_and_forward) #, lambda s: eat_and_forward if s1 in s else (dive_and_forward if s2 in s else state_to_motor.get(s)) , lambda p: do(partial(l.info, '--- CALF HEARD SONG, DIVING! ---')) (p) if s2 in p[0] else p, lambda p: do(partial(l.info, '--- CALF FOUND SQUID, EATING! ---')) (p) if s1 in p[0] else p, do(partial(l.debug, 'Calf network.update')), N.update, do(partial(l.debug, 'Calf percept')))
def __init__(self, objectives, landmarks): # pylint: disable=line-too-long, too-many-locals super().__init__(None, 'grid_agent') N = Network(None, objectives) SENSOR = N.add_SENSOR_node self.status = N.get_NEEDs() self.status_history = {'energy': [], 'water': []} # Create sensors SENSOR(Water) SENSOR(Energy) # create one SENSOR for each square sensor_dict = {} for lm in landmarks: sensor_dict[frozenset([SENSOR(Landmark, lm)])] = lm network_model = NetworkModel(sensor_dict) M = MotorNetwork(motors, motors_to_action) # NOTE: init=agent_start_pos, using a location here (only for debugging), # is a state when MDP:s are used self.ndp = NetworkDP(agent_start_pos, self.status, motor_model, .9, network_model) self.q_agent = NetworkQLearningAgent(self.ndp, Ne=0, Rplus=2, alpha=lambda n: 60. / (59 + n), epsilon=0.2, delta=0.5) # compose applies the functions from right to left self.program = compose( do(partial(l.debug, 'mnetwork.update')), M.update, do(partial(l.debug, 'q_agent')), self.q_agent, do(partial(l.debug, N)), do(partial(l.debug, 'network.update')), N.update, do(partial(l.debug, 'percept')), lambda x: do(partial(l.debug, '*** ENERY FOUND ***'))(x) if 'energy' in x[1] and x[1]['energy'] > 0.0 else x, lambda x: do(partial(l.debug, '*** WATER FOUND ***'))(x) if 'water' in x[1] and x[1]['water'] > 0.0 else x, do(self.printU))
def __init__(self): # pylint: disable=line-too-long, too-many-locals super().__init__(None, 'mom') N = Network(None, {'energy': 1.0}) self.status = N.get_NEEDs() self.status_history = {'energy': []} M = MotorNetwork(motors, motors_to_action) SENSOR, RAND, AND = N.add_SENSOR_node, N.add_RAND_node, N.add_AND_node NOT, OR = N.add_NOT_node, N.add_OR_node s1, r1, r2, r3 = SENSOR(Squid), RAND(0.3), RAND(0.3), RAND(0.3) n3 = AND([ NOT([s1]), OR([AND([r1, NOT([r2, r3])]), AND([r3, NOT([r1, r2])])]) ]) n4 = AND([NOT([s1]), NOT([r1, r2, r3])]) n2 = NOT([s1, n3, n4]) state_to_motor = { frozenset([s1]): sing_eat_and_forward, frozenset([n2]): forward, frozenset([n3]): dive_and_forward, frozenset([n4]): up_and_forward } l.info('state_to_motor:', state_to_motor) l.info('motors_to_action:', motors_to_action) # compose applies the functions from right to left self.program = compose( do(partial(l.debug, 'Mom mnetwork.update')), M.update, do(partial(l.debug, 'Mom state_to_motor')), lambda p: state_to_motor.get(p[0]), do(partial(l.debug, N)), do(partial(l.debug, 'Mom filter interesting states')), lambda p: (p[0] & {s1, n2, n3, n4}, p[1]), do(partial(l.debug, 'Mom network.update')), N.update, do(partial(l.debug, 'Mom percept')))
def test_motor_to_action(self): # set([(active_motors, action)]) # each motor is mapped to one action. It is only meaningful to # activate one motor at the time. All other combinations of # active motors are mapped to the action '-' (do nothing) motors = ['<', '>', '^', 'v'] motors_to_action1 = { frozenset([0]): '<', frozenset([1]): '>', frozenset([2]): '^', frozenset([3]): 'v', '*': '-' } mnetwork = MotorNetwork(motors, motors_to_action1) mnetwork.update(frozenset([0])) self.assertTrue(mnetwork.get_action() == '<') mnetwork.update(frozenset([1])) self.assertTrue(mnetwork.get_action() == '>') mnetwork.update(frozenset([2])) self.assertTrue(mnetwork.get_action() == '^') mnetwork.update(frozenset([3])) self.assertTrue(mnetwork.get_action() == 'v') # Motor model where all combinations of active motors have an action motors_to_action2 = { frozenset(): '<', frozenset([0]): '>', frozenset([1]): '^', frozenset([0, 1]): 'v' } mnetwork = MotorNetwork(motors, motors_to_action2) mnetwork.update(frozenset()) self.assertTrue(mnetwork.get_action() == '<') mnetwork.update(frozenset([0])) self.assertTrue(mnetwork.get_action() == '>') mnetwork.update(frozenset([1])) self.assertTrue(mnetwork.get_action() == '^') mnetwork.update(frozenset([0, 1])) self.assertTrue(mnetwork.get_action() == 'v')
def test_MOTOR_AND_SEQ(self): mnetwork = MotorNetwork() n1 = mnetwork.add_MOTOR_node('m1') self.assertTrue(mnetwork.get() == set()) mnetwork.update(set([n1])) self.assertTrue(mnetwork.get() == set([0])) mnetwork.update(set()) self.assertTrue(mnetwork.get() == set()) n2 = mnetwork.add_MOTOR_node('m2') mnetwork.update(set([n2])) self.assertTrue(mnetwork.get() == set([n2])) mnetwork.update(set([n2, n1])) self.assertTrue(mnetwork.get() == set([n1, n2])) n3 = mnetwork.add_MAND_node([n1, n2]) mnetwork.update(set()) self.assertTrue(mnetwork.get() == set()) mnetwork.update(set([n3])) self.assertTrue(mnetwork.get() == set([n1, n2])) n4 = mnetwork.add_MOTOR_node('m3') n5 = mnetwork.add_MSEQ_node([n3, n4]) mnetwork.update(set()) self.assertTrue(mnetwork.get() == set()) mnetwork.update(set()) self.assertTrue(mnetwork.get() == set()) mnetwork.update(set([n5])) self.assertTrue(mnetwork.get() == set([n1, n2])) mnetwork.update(set()) self.assertTrue(mnetwork.get() == set([n4])) mnetwork.update(set()) self.assertTrue(mnetwork.get() == set()) mnetwork.delete_nodes([n5]) mnetwork.update(set([n4])) self.assertTrue(mnetwork.get() == set([n4]))