Пример #1
0
def test_ModelBasedReflexAgentProgram():
    class Rule:
        def __init__(self, state, action):
            self.__state = state
            self.action = action

        def matches(self, state):
            return self.__state == state

    loc_A = (0, 0)
    loc_B = (1, 0)

    # create rules for a two-state vacuum environment
    rules = [
        Rule((loc_A, "Dirty"), "Suck"),
        Rule((loc_A, "Clean"), "Right"),
        Rule((loc_B, "Dirty"), "Suck"),
        Rule((loc_B, "Clean"), "Left")
    ]

    def update_state(state, action, percept, model):
        return percept

    # create a program and then an object of the ModelBasedReflexAgentProgram class
    program = ModelBasedReflexAgentProgram(rules, update_state, None)
    agent = Agent(program)
    # create an object of TrivialVacuumEnvironment
    environment = TrivialVacuumEnvironment()
    # add agent to the environment
    environment.add_thing(agent)
    # run the environment
    environment.run()
    # check final status of the environment
    assert environment.status == {(1, 0): 'Clean', (0, 0): 'Clean'}
Пример #2
0
def test_TableDrivenAgent():
    loc_A, loc_B = (0, 0), (1, 0)
    # table defining all the possible states of the agent
    table = {
        ((loc_A, 'Clean'), ): 'Right',
        ((loc_A, 'Dirty'), ): 'Suck',
        ((loc_B, 'Clean'), ): 'Left',
        ((loc_B, 'Dirty'), ): 'Suck',
        ((loc_A, 'Dirty'), (loc_A, 'Clean')): 'Right',
        ((loc_A, 'Clean'), (loc_B, 'Dirty')): 'Suck',
        ((loc_B, 'Clean'), (loc_A, 'Dirty')): 'Suck',
        ((loc_B, 'Dirty'), (loc_B, 'Clean')): 'Left',
        ((loc_A, 'Dirty'), (loc_A, 'Clean'), (loc_B, 'Dirty')): 'Suck',
        ((loc_B, 'Dirty'), (loc_B, 'Clean'), (loc_A, 'Dirty')): 'Suck'
    }

    # create an program and then an object of the TableDrivenAgent
    program = TableDrivenAgentProgram(table)
    agent = Agent(program)
    # create an object of TrivialVacuumEnvironment
    environment = TrivialVacuumEnvironment()
    # initializing some environment status
    environment.status = {loc_A: 'Dirty', loc_B: 'Dirty'}
    # add agent to the environment
    environment.add_thing(agent)

    # run the environment by single step everytime to check how environment evolves using TableDrivenAgentProgram
    environment.run(steps=1)
    assert environment.status == {(1, 0): 'Clean', (0, 0): 'Dirty'}

    environment.run(steps=1)
    assert environment.status == {(1, 0): 'Clean', (0, 0): 'Dirty'}

    environment.run(steps=1)
    assert environment.status == {(1, 0): 'Clean', (0, 0): 'Clean'}
Пример #3
0
def test_TableDrivenAgent():
    loc_A, loc_B = (0, 0), (1, 0)
    # table defining all the possible states of the agent
    table = {((loc_A, 'Clean'),): 'Right',
             ((loc_A, 'Dirty'),): 'Suck',
             ((loc_B, 'Clean'),): 'Left',
             ((loc_B, 'Dirty'),): 'Suck',
             ((loc_A, 'Dirty'), (loc_A, 'Clean')): 'Right',
             ((loc_A, 'Clean'), (loc_B, 'Dirty')): 'Suck',
             ((loc_B, 'Clean'), (loc_A, 'Dirty')): 'Suck',
             ((loc_B, 'Dirty'), (loc_B, 'Clean')): 'Left',
             ((loc_A, 'Dirty'), (loc_A, 'Clean'), (loc_B, 'Dirty')): 'Suck',
             ((loc_B, 'Dirty'), (loc_B, 'Clean'), (loc_A, 'Dirty')): 'Suck'
             }

    # create an program and then an object of the TableDrivenAgent
    program = TableDrivenAgentProgram(table)
    agent = Agent(program)
    # create an object of TrivialVacuumEnvironment
    environment = TrivialVacuumEnvironment()
    # initializing some environment status
    environment.status = {loc_A:'Dirty', loc_B:'Dirty'}
    # add agent to the environment
    environment.add_thing(agent)

    # run the environment by single step everytime to check how environment evolves using TableDrivenAgentProgram
    environment.run(steps = 1)
    assert environment.status == {(1,0): 'Clean', (0,0): 'Dirty'}

    environment.run(steps = 1)
    assert environment.status == {(1,0): 'Clean', (0,0): 'Dirty'}

    environment.run(steps = 1)
    assert environment.status == {(1,0): 'Clean', (0,0): 'Clean'}
Пример #4
0
def test_SimpleReflexAgentProgram():
    class Rule:
        def __init__(self, state, action):
            self.__state = state
            self.action = action

        def matches(self, state):
            return self.__state == state

    loc_A = (0, 0)
    loc_B = (1, 0)

    # create rules for a two state Vacuum Environment
    rules = [
        Rule((loc_A, "Dirty"), "Suck"),
        Rule((loc_A, "Clean"), "Right"),
        Rule((loc_B, "Dirty"), "Suck"),
        Rule((loc_B, "Clean"), "Left")
    ]

    def interpret_input(state):
        return state

    # create a program and then an object of the SimpleReflexAgentProgram
    program = SimpleReflexAgentProgram(rules, interpret_input)
    agent = Agent(program)
    # create an object of TrivialVacuumEnvironment
    environment = TrivialVacuumEnvironment()
    # add agent to the environment
    environment.add_thing(agent)
    # run the environment
    environment.run()
    # check final status of the environment
    assert environment.status == {(1, 0): 'Clean', (0, 0): 'Clean'}
Пример #5
0
def test_SimpleReflexAgentProgram():
    class Rule:
        
        def __init__(self, state, action):
            self.__state = state
            self.action = action
            
        def matches(self, state):
            return self.__state == state
        
    loc_A = (0, 0)
    loc_B = (1, 0)
    
    # create rules for a two state Vacuum Environment
    rules = [Rule((loc_A, "Dirty"), "Suck"), Rule((loc_A, "Clean"), "Right"),
            Rule((loc_B, "Dirty"), "Suck"), Rule((loc_B, "Clean"), "Left")]
    
    def interpret_input(state):
        return state
    
    # create a program and then an object of the SimpleReflexAgentProgram
    program = SimpleReflexAgentProgram(rules, interpret_input) 
    agent = Agent(program)
    # create an object of TrivialVacuumEnvironment
    environment = TrivialVacuumEnvironment()
    # add agent to the environment
    environment.add_thing(agent)
    # run the environment
    environment.run()
    # check final status of the environment
    assert environment.status == {(1,0):'Clean' , (0,0) : 'Clean'}
Пример #6
0
def test_TableDrivenAgent() :
    #create a table that would consist of all the possible states of the agent
    loc_A, loc_B = (0, 0), (1, 0)
    
    table = {((loc_A, 'Clean'),): 'Right',
             ((loc_A, 'Dirty'),): 'Suck',
             ((loc_B, 'Clean'),): 'Left',
             ((loc_B, 'Dirty'),): 'Suck',
             ((loc_A, 'Dirty'), (loc_A, 'Clean')): 'Right',
             ((loc_A, 'Clean'), (loc_B, 'Dirty')): 'Suck',
             ((loc_B, 'Clean'), (loc_A, 'Dirty')): 'Suck',
             ((loc_B, 'Dirty'), (loc_B, 'Clean')): 'Left',
             ((loc_A, 'Dirty'), (loc_A, 'Clean'), (loc_B, 'Dirty')): 'Suck',
             ((loc_B, 'Dirty'), (loc_B, 'Clean'), (loc_A, 'Dirty')): 'Suck'
             }
    # create an program and then an object of the TableDrivenAgent
    program = TableDrivenAgentProgram(table)
    agent = Agent(program)
    # create an object of the TrivialVacuumEnvironment
    environment = TrivialVacuumEnvironment()
    # add agent to the environment
    environment.add_thing(agent)
    # run the environment
    environment.run()
    # check final status of the environment
    assert environment.status == {(1, 0): 'Clean', (0, 0): 'Clean'}
Пример #7
0
def test_RandomVacuumAgent():
    # create an object of the RandomVacuumAgent
    agent = RandomVacuumAgent()
    # create an object of TrivialVacuumEnvironment
    environment = TrivialVacuumEnvironment()
    # add agent to the environment
    environment.add_thing(agent)
    # run the environment
    environment.run()
    # check final status of the environment
    assert environment.status == {(1, 0): 'Clean', (0, 0): 'Clean'}
Пример #8
0
def test_RandomVacuumAgent() :
    # create an object of the RandomVacuumAgent
    agent = RandomVacuumAgent()
    # create an object of TrivialVacuumEnvironment
    environment = TrivialVacuumEnvironment()
    # add agent to the environment
    environment.add_thing(agent)
    # run the environment
    environment.run()
    # check final status of the environment
    assert environment.status == {(1,0):'Clean' , (0,0) : 'Clean'}
Пример #9
0
def Model(start, a, b):
    print("---Start: {}, (0,0):{} (1,0):{}---".format(start, a, b))
    agent = ReflexVacuumAgent()
    env = TrivialVacuumEnvironment()
    env.add_thing(agent, start)
    env.status = {(1, 0): b, (0, 0): a}

    while (env.status != {(1, 0): 'Clean', (0, 0): 'Clean'}):
        env.step()

    return env.agents[0].performance
Пример #10
0
def test_RandomAgentProgram():
    #create a list of all the actions a vacuum cleaner can perform
    list = ['Right', 'Left', 'Suck', 'NoOp']
    # create a program and then an object of the RandomAgentProgram
    program = RandomAgentProgram(list)

    agent = Agent(program)
    # create an object of TrivialVacuumEnvironment
    environment = TrivialVacuumEnvironment()
    # add agent to the environment
    environment.add_thing(agent)
    # run the environment
    environment.run()
    # check final status of the environment
    assert environment.status == {(1, 0): 'Clean', (0, 0): 'Clean'}
Пример #11
0
def test_RandomAgentProgram() :
    #create a list of all the actions a vacuum cleaner can perform
    list = ['Right', 'Left', 'Suck', 'NoOp']
    # create a program and then an object of the RandomAgentProgram
    program = RandomAgentProgram(list)
    
    agent = Agent(program)
    # create an object of TrivialVacuumEnvironment
    environment = TrivialVacuumEnvironment()
    # add agent to the environment
    environment.add_thing(agent)
    # run the environment
    environment.run()
    # check final status of the environment
    assert environment.status == {(1, 0): 'Clean' , (0, 0): 'Clean'}
Пример #12
0
def test_ModelBasedReflexAgentProgram():

    loc_A = (0, 0)
    loc_B = (1, 0)

    model = {loc_A: None, loc_B: None}

    class Rule:
        def __init__(self, state, action):
            self.__state = state
            self.action = action

        def matches(self, state):
            return self.__state == state

    # create rules for a two state Vacuum Environment
    rules = [
        Rule((loc_A, "Dirty"), "Suck"),
        Rule((loc_A, "Clean"), "Right"),
        Rule((loc_B, "Dirty"), "Suck"),
        Rule((loc_B, "Clean"), "Left")
    ]

    def update_state(state, action, percept, model):
        loc, status = percept

        # the other location
        loc2 = tuple(map(lambda x: x[0] - x[1], zip((1, 0), loc)))

        # initial guess of the other location
        if not state or not action or not model[loc2]:
            model[loc2] = random.choice(['Dirty', 'Clean'])

        model[loc] = status

        # the model think environment will keep clean if agent chose to suck last step
        if action == 'Suck':
            state = percept
            return state

        # rubbish may appears suddenly, so the model guess randomly
        if status == 'Clean':
            status = random.choice(['Dirty', 'Clean'])
            model[loc] = status

        # move right or left will not influence the environment
        state = (loc, model[loc])
        return state

    # create a program and then an object of the ModelBasedReflexAgentProgram

    program = ModelBasedReflexAgentProgram(rules, update_state, model)
    agent = Agent(program)

    # create an object of TrivialVacuumEnvironment
    environment = TrivialVacuumEnvironment()
    # add agent to the environment
    environment.add_thing(agent)
    # run the environment
    environment.run()
    # check final status of the environment
    assert environment.status == {(1, 0): 'Clean', (0, 0): 'Clean'}
Пример #13
0
        ReflexVacuumAgent,
        TrivialVacuumEnvironment,
        )
from utils import (
        mean,
        )


# Exercise 2.7
print "\n\nExercise 2.7"
n=100
steps=4
print "Compare ReflexVacuumAgent against {n} TrivialVacuumEnvironment instances and {steps} steps".format(**locals())
print compare_agents(TrivialVacuumEnvironment, [ReflexVacuumAgent], n=n, steps=4)

# Exercise 2.8
# Run the environment simulator with a simple reflex agent for all possible
# initial dirt configurations and agent locations
print "\n\nExercise 2.8"
scores = []
for loc_A_status in ('Clean', 'Dirty'):
    for loc_B_status in ('Clean', 'Dirty'):
        for agent_loc in (loc_A, loc_B):
            agent = ReflexVacuumAgent()
            env = TrivialVacuumEnvironment(loc_A_status, loc_B_status)
            env.add_thing(agent, agent_loc)
            env.run(steps)
            scores.append(agent.performance)
            print "Environment=({loc_A_status}, {loc_B_status}) and agent at {agent_loc}, agent scored {agent.performance} in {steps} steps".format(**locals())
print "Mean score {}".format(mean(scores))