def test_Agent(): def constant_prog(percept): return percept agent = Agent(constant_prog) result = agent.program(5) assert result == 5
def test_TableDrivenAgent(): random.seed(10) loc_A, loc_B = (0, 0), (1, 0) # table defining all the possible states of the agent table = { ((loc_A, 'Clean'), ): 'Right', ((loc_A, 'Dirty'), ): 'Suck', ((loc_B, 'Clean'), ): 'Left', ((loc_B, 'Dirty'), ): 'Suck', ((loc_A, 'Dirty'), (loc_A, 'Clean')): 'Right', ((loc_A, 'Clean'), (loc_B, 'Dirty')): 'Suck', ((loc_B, 'Clean'), (loc_A, 'Dirty')): 'Suck', ((loc_B, 'Dirty'), (loc_B, 'Clean')): 'Left', ((loc_A, 'Dirty'), (loc_A, 'Clean'), (loc_B, 'Dirty')): 'Suck', ((loc_B, 'Dirty'), (loc_B, 'Clean'), (loc_A, 'Dirty')): 'Suck' } # create an program and then an object of the TableDrivenAgent program = TableDrivenAgentProgram(table) agent = Agent(program) # create an object of TrivialVacuumEnvironment environment = TrivialVacuumEnvironment() # initializing some environment status environment.status = {loc_A: 'Dirty', loc_B: 'Dirty'} # add agent to the environment environment.add_thing(agent, location=(1, 0)) # run the environment by single step everytime to check how environment evolves using TableDrivenAgentProgram environment.run(steps=1) assert environment.status == {(1, 0): 'Clean', (0, 0): 'Dirty'} environment.run(steps=1) assert environment.status == {(1, 0): 'Clean', (0, 0): 'Dirty'} environment.run(steps=1) assert environment.status == {(1, 0): 'Clean', (0, 0): 'Clean'}
def test_ModelBasedReflexAgentProgram(): class Rule: def __init__(self, state, action): self.__state = state self.action = action def matches(self, state): return self.__state == state loc_A = (0, 0) loc_B = (1, 0) # create rules for a two-state Vacuum Environment rules = [ Rule((loc_A, "Dirty"), "Suck"), Rule((loc_A, "Clean"), "Right"), Rule((loc_B, "Dirty"), "Suck"), Rule((loc_B, "Clean"), "Left") ] def update_state(state, action, percept, transition_model, sensor_model): return percept # create a program and then an object of the ModelBasedReflexAgentProgram class program = ModelBasedReflexAgentProgram(rules, update_state, None, None) agent = Agent(program) # create an object of TrivialVacuumEnvironment environment = TrivialVacuumEnvironment() # add agent to the environment environment.add_thing(agent) # run the environment environment.run() # check final status of the environment assert environment.status == {(1, 0): 'Clean', (0, 0): 'Clean'}
def test_SimpleReflexAgentProgram(): class Rule: def __init__(self, state, action): self.__state = state self.action = action def matches(self, state): return self.__state == state loc_A = (0, 0) loc_B = (1, 0) # create rules for a two state Vacuum Environment rules = [ Rule((loc_A, "Dirty"), "Suck"), Rule((loc_A, "Clean"), "Right"), Rule((loc_B, "Dirty"), "Suck"), Rule((loc_B, "Clean"), "Left") ] def interpret_input(state): return state # create a program and then an object of the SimpleReflexAgentProgram program = SimpleReflexAgentProgram(rules, interpret_input) agent = Agent(program) # create an object of TrivialVacuumEnvironment environment = TrivialVacuumEnvironment() # add agent to the environment environment.add_thing(agent) # run the environment environment.run() # check final status of the environment assert environment.status == {(1, 0): 'Clean', (0, 0): 'Clean'}
def test_RandomAgentProgram(): # create a list of all the actions a Vacuum cleaner can perform list = ['Right', 'Left', 'Suck', 'NoOp'] # create a program and then an object of the RandomAgentProgram program = RandomAgentProgram(list) agent = Agent(program) # create an object of TrivialVacuumEnvironment environment = TrivialVacuumEnvironment() # add agent to the environment environment.add_thing(agent) # run the environment environment.run() # check final status of the environment assert environment.status == {(1, 0): 'Clean', (0, 0): 'Clean'}