示例#1
0
def test_compare_agents() :
    environment = agents.VacuumeEnvironment
    agentList = [agents.TableDrivenVacuumAgent,agents.RandomVacuumAgent, agents.ReflexVacuumAgent]

    result = agents.compare_agents(environment, agentList,[(1,0),(2,0),(0,0)])
    for i in range(0,len(result)):
    print("{}'s avg performance: {}".format(result[i][0].__name__,result[i][1]))

#runs a single vacuum environment with a specific agent and step count
class vacuumSim():
    def __init__(self):
        self.vacuumEnvironment = agents.VacuumeEnvironment()
    
    def startSim(self):
        self.vacuumEnvironment.add_thing(agents.TraceAgent(agents.ReflexVacuumAgent()))
        #self.vacuumEnvironment.add_thing(agents.TraceAgent(agents.TableDrivenVacuumAgent()))
        self.vacuumEnvironment.run(10)

def main():
   
   vacSim=vacuumSim()
   vacSim.startSim()

   #test_compare_agents()  #used to compare multiple agents in the same environment conditions


if __name__ == '__main__':  # if we're running file directly and not importing it
    main()  
def test_compare_agents():
    environment = TrivialVacuumEnvironment
    agents = [ModelBasedVacuumAgent, ReflexVacuumAgent]

    result = compare_agents(environment, agents)
    performance_ModelBasedVacummAgent = result[0][1]
    performance_ReflexVacummAgent = result[1][1]

    # The performance of ModelBasedVacuumAgent will be at least as good as that of
    # ReflexVacuumAgent, since ModelBasedVacuumAgent can identify when it has
    # reached the terminal state (both locations being clean) and will perform
    # NoOp leading to 0 performance change, whereas ReflexVacuumAgent cannot
    # identify the terminal state and thus will keep moving, leading to worse
    # performance compared to ModelBasedVacuumAgent.
    assert performance_ReflexVacummAgent <= performance_ModelBasedVacummAgent
示例#3
0
def test_compare_agents() :
    environment = TrivialVacuumEnvironment
    agents = [ModelBasedVacuumAgent, ReflexVacuumAgent]

    result = compare_agents(environment, agents)
    performance_ModelBasedVacummAgent = result[0][1]
    performance_ReflexVacummAgent = result[1][1]

    # The performance of ModelBasedVacuumAgent will be at least as good as that of
    # ReflexVacuumAgent, since ModelBasedVacuumAgent can identify when it has
    # reached the terminal state (both locations being clean) and will perform
    # NoOp leading to 0 performance change, whereas ReflexVacuumAgent cannot
    # identify the terminal state and thus will keep moving, leading to worse
    # performance compared to ModelBasedVacuumAgent.
    assert performance_ReflexVacummAgent <= performance_ModelBasedVacummAgent
示例#4
0
        loc_A,
        loc_B,
        ReflexVacuumAgent,
        TrivialVacuumEnvironment,
        )
from utils import (
        mean,
        )


# Exercise 2.7
print "\n\nExercise 2.7"
n=100
steps=4
print "Compare ReflexVacuumAgent against {n} TrivialVacuumEnvironment instances and {steps} steps".format(**locals())
print compare_agents(TrivialVacuumEnvironment, [ReflexVacuumAgent], n=n, steps=4)

# Exercise 2.8
# Run the environment simulator with a simple reflex agent for all possible
# initial dirt configurations and agent locations
print "\n\nExercise 2.8"
scores = []
for loc_A_status in ('Clean', 'Dirty'):
    for loc_B_status in ('Clean', 'Dirty'):
        for agent_loc in (loc_A, loc_B):
            agent = ReflexVacuumAgent()
            env = TrivialVacuumEnvironment(loc_A_status, loc_B_status)
            env.add_thing(agent, agent_loc)
            env.run(steps)
            scores.append(agent.performance)
            print "Environment=({loc_A_status}, {loc_B_status}) and agent at {agent_loc}, agent scored {agent.performance} in {steps} steps".format(**locals())