def run_n_times(agents, no_times_to_run, max_timesteps, threshold):
    '''Given a list of agents, runs the search n times and gathers stats'''
    stat_list = {agent.agent_name: [] for agent in agents}
    for _ in range(no_times_to_run):
        run_t_timesteps(agents, max_timesteps, threshold)
        analysers = [SimpleAgentAnalyser(agent) for agent in agents]
        for agent_index, agent in enumerate(agents):
            stat_list[agent.agent_name].append(analysers[agent_index].get_analysis())
            agent.reset()
    threshold = 0.9

    #agent3.current_belief_map.save_visualisation("D:\\ReinforcementLearning\\DetectSourceAgent\\Visualisations\\Agent3BelMapPrior.png")

    max_timesteps = 130
    no_timesteps_to_discovery = run_t_timesteps([agent3], max_timesteps,
                                                threshold)
    no_timesteps_to_discovery = max_timesteps if not no_timesteps_to_discovery else no_timesteps_to_discovery

    print("\n\nSaving visualisations")
    #agent1.current_belief_map.save_visualisation("D:\\ReinforcementLearning\\DetectSourceAgent\\Visualisations\\Agent1BelMap.png")
    #agent2.current_belief_map.save_visualisation("D:\\ReinforcementLearning\\DetectSourceAgent\\Visualisations\\Agent2BelMap.png")

    #grid, initial_pos, move_from_bel_map_callable, height, epsilon, multirotor_client, agent_name, prior = {}
    # OccupancyGridAgent(grid, Vector3r(0,0), get_move_from_belief_map_epsilon_greedy, -12, 0.3, agent_name, other_active_agents).explore_t_timesteps(args.no_timesteps)
    analyser = SimpleAgentAnalyser(agent3)
    print(
        '\n----------------------------------------------------------------\n')
    for key, value in analyser.get_analysis().items():
        print(key, "\t\t.....\t\t", value)
    print(
        '\n----------------------------------------------------------------\n')

    #move_visualisation_fp = "D:\\ReinforcementLearning\\DetectSourceAgent\\Visualisations\\Agent3MoveMap.png"
    #analyser.save_move_visualisation3d(move_visualisation_fp)

    move_visualisation_fp2d = "D:\\ReinforcementLearning\\DetectSourceAgent\\Visualisations\\Agent3MoveMap2d.png"
    analyser.save_move_visualisation2d(move_visualisation_fp2d)

    bel_map_visualisation_fp = "D:\\ReinforcementLearning\\DetectSourceAgent\\Visualisations\\Agent3BelMap.png"
    analyser.save_belief_map_visualisation(bel_map_visualisation_fp)
Exemple #3
0
    print("Saving visualisations")
    #agent1.current_belief_map.save_visualisation("D:\\ReinforcementLearning\\DetectSourceAgent\\Visualisations\\Agent1BelMap.png")
    #agent2.current_belief_map.save_visualisation("D:\\ReinforcementLearning\\DetectSourceAgent\\Visualisations\\Agent2BelMap.png")

    print(
        '\n----------------------------------------------------------------\n')
    print("Agent1 most likely coordinate: ",
          agent3.current_belief_map.get_most_likely_coordinate())
    print("Agent2 most likely coordinate: ",
          agent3.current_belief_map.get_most_likely_coordinate())
    print(
        '\n----------------------------------------------------------------\n')
    #grid, initial_pos, move_from_bel_map_callable, height, epsilon, multirotor_client, agent_name, prior = {}
    # OccupancyGridAgent(grid, Vector3r(0,0), get_move_from_belief_map_epsilon_greedy, -12, 0.3, agent_name, other_active_agents).explore_t_timesteps(args.no_timesteps)
    analyser = SimpleAgentAnalyser(agent3)
    print(
        '\n----------------------------------------------------------------\n')
    for key, value in analyser.get_analysis().items():
        print(key, "\t\t.....\t\t", value)
    print(
        '\n----------------------------------------------------------------\n')

    move_visualisation_fp = "D:\\ReinforcementLearning\\DetectSourceAgent\\Visualisations\\Agent3MoveMap.png"
    analyser.save_move_visualisation3d(move_visualisation_fp)

    move_visualisation_fp2d = "D:\\ReinforcementLearning\\DetectSourceAgent\\Visualisations\\Agent3MoveMap2d.png"
    analyser.save_move_visualisation2d(move_visualisation_fp2d)

    bel_map_visualisation_fp = "D:\\ReinforcementLearning\\DetectSourceAgent\\Visualisations\\Agent3BelMap.png"
    analyser.save_belief_map_visualisation(bel_map_visualisation_fp)