def run(wss=None, steps=None, seed=None): steps = int(steps) if steps else 50 random.seed(seed) l.debug('Running blind_dog ', steps, ' steps') options = OPTIONS options.wss = wss park = Park(options) dog1 = BlindDog(program, 'fido') dog2 = BlindDog(program, 'dido') dog1.direction = Direction(Direction.D) dog2.direction = Direction(Direction.D) dogfood = Food('dogfood') water = Water('water') dirt = Dirt('dirt') park.add_thing(dog1, fido_start_pos) park.add_thing(dog2, dido_start_pos) l.debug(dog1.location, dog2.location) park.add_thing(dirt, (0, 2)) park.add_thing(dogfood, (0, 5)) park.add_thing(water, (0, 7)) park.run(steps)
def test_VacuumEnvironment(): # Initialize Vacuum Environment v = VacuumEnvironment(6, 6) #Get an agent agent = ModelBasedVacuumAgent() agent.direction = Direction(Direction.R) v.add_thing(agent) v.add_thing(Dirt(), location=(2, 1)) # Check if things are added properly assert len([x for x in v.things if isinstance(x, Wall)]) == 20 assert len([x for x in v.things if isinstance(x, Dirt)]) == 1 #Let the action begin! assert v.percept(agent) == ("Clean", "None") v.execute_action(agent, "Forward") assert v.percept(agent) == ("Dirty", "None") v.execute_action(agent, "TurnLeft") v.execute_action(agent, "Forward") assert v.percept(agent) == ("Dirty", "Bump") v.execute_action(agent, "Suck") assert v.percept(agent) == ("Clean", "None") old_performance = agent.performance v.execute_action(agent, "NoOp") assert old_performance == agent.performance
def add_thing(self, thing, location=None): # set random location if not provided thing.location = location if location is not None else self.default_location( thing) # if thing is instance of Agent if (isinstance(thing, Agent)): self.state[thing.location[0] * 5 + thing.location[1]][ "A"] += 1 # update enviroment state based on location thing.performance = 100 # set agent performance thing.currentDirection = Direction( randomDirection()) # set agent direction if (isinstance( thing, ModelReflexAgent)): # if agent is a ModelReflexAgent thing.visited = set() thing.visible = set() thing.visited.add( thing.location) # add current location as visited self.agents.append(thing) # register agent else: if (isinstance(thing, Gold)): # if instance of gold self.state[thing.location[0] * 5 + thing.location[1]][ "G"] += 1 # update enviroment state based on location else: self.state[thing.location[0] * 5 + thing.location[1]]["T"] += 1 # if instance of trap self.things.append( thing) # update enviroment state based on location
def test_move_forward(): d = Direction("up") l1 = d.move_forward((0, 0)) assert l1 == (0, -1) d = Direction(Direction.R) l1 = d.move_forward((0, 0)) assert l1 == (1, 0) d = Direction(Direction.D) l1 = d.move_forward((0, 0)) assert l1 == (0, 1) d = Direction("left") l1 = d.move_forward((0, 0)) assert l1 == (-1, 0) l2 = d.move_forward((1, 0)) assert l2 == (0, 0)
def run(wss=None, steps=None, seed=None): steps = int(steps) if steps else 50 options = OPTIONS options.wss = wss world = World(options) test1 = TestAnimat(program, 'Test') test1.direction = Direction(Direction.R) world.add_thing(test1, (0,0)) world.run(steps)
def test_add(): d = Direction(Direction.U) l1 = d + "right" l2 = d + "left" assert l1.direction == Direction.R assert l2.direction == Direction.L d = Direction("right") l1 = d.__add__(Direction.L) l2 = d.__add__(Direction.R) assert l1.direction == "up" assert l2.direction == "down" d = Direction("down") l1 = d.__add__("right") l2 = d.__add__("left") assert l1.direction == Direction.L assert l2.direction == Direction.R d = Direction(Direction.L) l1 = d + Direction.R l2 = d + Direction.L assert l1.direction == Direction.U assert l2.direction == Direction.D