예제 #1
0
 def testTransitionHFMaxFood1(self):
     random.seed(1)
     state_str = 'B---\n--HF'
     grid = problem.parse(state_str)
     action = 'HF_3_1'
     harvester_world = problem.to_problem(x=4, y=2, max_food=1)
     distances = problem.distance_to_base(grid, harvester_world)
     distances = problem.add_distance_to_food(grid, distances,
                                              harvester_world)
     belief_state = problem.to_state(grid, distances=distances)
     food_dist = problem.chance_of_food(belief_state, harvester_world)
     future_food = problem.sample_future_food(food_dist, n=1)
     initial_state = problem.to_state(grid,
                                      distances=distances,
                                      future_food=future_food)
     next_state, action_cost = transition(initial_state,
                                          action,
                                          harvester_world,
                                          time_left=1)
     self.assertEquals(next_state.grid, {
         (0, 1): 'F',
         (0, 0): 'B',
         (3, 1): '$',
         (2, 1): None
     }, next_state.grid)
     self.assertEquals(next_state.reward, -1, next_state.reward)
     self.assertEquals(action_cost, 1, action_cost)
     random.seed(None)
예제 #2
0
 def testReturnMaxG(self):
     state_str = '-#\n$B'
     grid = problem.parse(state_str)
     harvester_world = problem.to_problem(x=2, y=2)
     distances = problem.distance_to_base(grid, harvester_world)
     distances = problem.add_distance_to_food(grid, distances,
                                              harvester_world)
     initial_state = problem.to_state(grid, distances=distances)
     max_g = search(initial_state, harvester_world, horizon=10)
     self.assertEquals(max_g, 49.0, max_g)
예제 #3
0
 def testReturnPlan(self):
     state_str = '-#\n$B'
     grid = problem.parse(state_str)
     harvester_world = problem.to_problem(x=2, y=2)
     distances = problem.distance_to_base(grid, harvester_world)
     distances = problem.add_distance_to_food(grid, distances,
                                              harvester_world)
     initial_state = problem.to_state(grid, distances=distances)
     _ = search(initial_state,
                harvester_world,
                horizon=10,
                return_plan=True)
 def testExpand(self):
     state_str = 'H-\n-B'
     base, harvester, food, obstacle, defender, enemy, has_food = problem.parse(
         state_str)
     state = problem.to_state(base, harvester, food, obstacle, defender,
                              enemy, has_food)
     world = problem.to_problem(x=2, y=2)
     open_list = []
     policy = [[None] * world.y for _ in range(world.x)]
     expand(((1, 1), 0), open_list, policy, state, world)
     self.assertEquals(open_list, [((1, 0), 1), ((0, 1), 1)], open_list)
     self.assertEquals(policy, [[None, ((1, 1), 1)], [((1, 1), 1), None]],
                       policy)
 def testDijkstra(self):
     state_str = '#-\n-b'
     base, harvester, food, obstacle, defender, enemy, has_food = problem.parse(
         state_str)
     state = problem.to_state(base, harvester, food, obstacle, defender,
                              enemy, has_food)
     world = problem.to_problem(x=2, y=2)
     policy = dijkstra((1, 1), state, world)
     self.assertEquals(policy, {
         (0, 1): ((1, 1), 1),
         (1, 0): ((1, 1), 1),
         (1, 1): ('*', 0)
     }, policy)
예제 #6
0
 def testTransitionHB(self):
     state_str = '---$\n---B'
     grid = problem.parse(state_str)
     action = 'HB'
     harvester_world = problem.to_problem(x=4, y=2)
     distances = problem.distance_to_base(grid, harvester_world)
     initial_state = problem.to_state(grid, distances=distances)
     next_state, action_cost = transition(initial_state,
                                          action,
                                          harvester_world,
                                          time_left=1)
     self.assertEquals(next_state.grid, {
         (3, 1): '*',
         (3, 0): None
     }, next_state.grid)
     self.assertEquals(next_state.reward, 49, next_state.reward)
     self.assertEquals(action_cost, 1, action_cost)
예제 #7
0
 def testTransitionHF(self):
     state_str = 'B---\n--HF'
     grid = problem.parse(state_str)
     action = 'HF_3_1'
     harvester_world = problem.to_problem(x=4, y=2)
     distances = problem.distance_to_base(grid, harvester_world)
     distances = problem.add_distance_to_food(grid, distances,
                                              harvester_world)
     initial_state = problem.to_state(grid, distances=distances)
     next_state, action_cost = transition(initial_state,
                                          action,
                                          harvester_world,
                                          time_left=1)
     self.assertEquals(next_state.grid, {
         (0, 0): 'B',
         (3, 1): '$',
         (2, 1): None
     }, next_state.grid)
     self.assertEquals(next_state.reward, -1, next_state.reward)
     self.assertEquals(action_cost, 1, action_cost)
if __name__ == '__main__':
    import argparse
    import agent
    import random
    random.seed(1)
    parser = argparse.ArgumentParser()
    parser.add_argument("initial_state")
    parser.add_argument("max_food")
    parser.add_argument("destination_x")
    parser.add_argument("destination_y")
    parser.add_argument("time_left")
    args = parser.parse_args()

    initial_state, x, y = agent.init_belief(args.initial_state)
    world = problem.to_problem(x, y, int(args.max_food))
    initial_state = problem.sample(initial_state, world)

    print "initial_state: {0}".format(args.initial_state)
    print "max_food: {0}".format(args.max_food)
    print "action: ({0}, {1})".format(args.destination_x, args.destination_y)

    print(initial_state)
    next_states = transition(
        initial_state, (int(args.destination_x), int(args.destination_y)),
        world, int(args.time_left), int(args.time_left))

    print(problem.state_to_string(initial_state, world))

    for next_state, action_cost in next_states:
        print(problem.state_to_string(next_state, world))
예제 #9
0
    import problem

    parser = argparse.ArgumentParser()
    parser.add_argument("width")
    parser.add_argument("height")
    parser.add_argument("max_food")
    parser.add_argument("n_worlds")
    parser.add_argument("n_obstacles")
    parser.add_argument("enemy")
    parser.add_argument("scenario")
    parser.add_argument("file_name")
    args = parser.parse_args()

    #random.seed(1)

    dimensions = problem.to_problem(int(args.width), int(args.height))

    for w in range(int(args.n_worlds)):
        harvester_dict = {}
        food_dict = {}
        defender_dict = {}
        enemy_dict = {}
        obstacle_dict = {}
        base_dict = {}
        belief_food_dict = {}
        belief_obstacle_dict = {}

        b_x, b_y = random_coordinate(int(args.width), int(args.height))
        base_dict[(b_x, b_y)] = "b"
        harvester_dict[(b_x, b_y)] = "b"
예제 #10
0
    if dimensions.known == 1:
        known = True
    print(problem.interleaved(reality, belief, dimensions, known=known))


if __name__ == '__main__':

    args = parse_args()
    random.seed(int(args.seed))
    reality_state, x, y = init_reality(args.reality)  # Dimensions of reality are derived from input file
    problem_has_enemy = False
    if len(reality_state.enemy_dict) > 0:
        problem_has_enemy = True
    harvester_world = problem.to_problem(x,
                                         y,
                                         int(args.max_food),
                                         int(args.known),
                                         enemy=problem_has_enemy,
                                         scenario=int(args.scenario))
    # food_dist = problem.chance_of_food(reality_state, harvester_world)
    future_food = problem.sample_n_future_food(harvester_world, 100)
    # for i in range(1000):
    #     future_food.append(problem.sample_cell(food_dist)[1])
    distances = problem.all_distances(reality_state, harvester_world)
    reality_state = problem.to_state(reality_state.base_dict,
                                     reality_state.harvester_dict,
                                     food=reality_state.food_dict,
                                     obstacle=reality_state.obstacle_dict,
                                     defender=reality_state.defender_dict,
                                     enemy=reality_state.enemy_dict,
                                     has_food=reality_state.has_food,
                                     future_food=future_food,
예제 #11
0
            cell = belief_grid[x][y]
            if cell:
                printable += cell
            else:
                printable += '?'
        printable += '\n'
    return printable


if __name__ == '__main__':
    import argparse
    import agent
    import random
    random.seed(1)
    parser = argparse.ArgumentParser()
    parser.add_argument("initial_state")
    parser.add_argument("goal")
    args = parser.parse_args()
    initial_state, x, y = agent.init_belief(args.initial_state)
    harvester_world = problem.to_problem(x, y)
    #food_dist = problem.chance_of_food(initial_state, harvester_world)
    #initial_state = problem.sample(initial_state, food_dist, harvester_world)
    if args.goal in 'B':
        goal_coordinate, _ = problem.find_base(initial_state.grid)
    else:
        goal_coordinate = (int(args.goal.split('_')[1]), int(args.goal.split('_')[2]))
    policy = dijkstra(goal_coordinate, initial_state.grid, harvester_world)
    print "initial_state: {0}".format(args.initial_state)
    print "goal: {0}".format(args.goal)
    print(interleaved(policy, initial_state.grid, harvester_world))
    random.seed(0)