Пример #1
0
def test_bruteforce_gen(scenario, seed, fully_obs, flat_actions, flat_obs):
    """Tests all generated benchmark scenarios using every possible environment
    setting, using bruteforce agent, checking for any errors
    """
    env = nasim.make_benchmark(scenario,
                               seed=seed,
                               fully_obs=fully_obs,
                               flat_actions=flat_actions,
                               flat_obs=flat_obs)
    run_bruteforce_agent(env, verbose=False)
Пример #2
0
def run_scenario(args):
    scenario_name, seed = args
    print_msg(f"Running '{scenario_name}' scenario with seed={seed}")
    env = nasim.make_benchmark(scenario_name, seed, False, True, True)
    steps, total_reward, done = run_random_agent(env, verbose=False)
    return {
        "Name": scenario_name,
        "Seed": seed,
        "Steps": steps,
        "Total reward": total_reward
    }
Пример #3
0
    parser.add_argument("--replay_size", type=int, default=500000,
                        help="(default=100000)")
    parser.add_argument("--final_epsilon", type=float, default=0.05,
                        help="(default=0.05)")
    parser.add_argument("--init_epsilon", type=float, default=1.0,
                        help="(default=1.0)")
    parser.add_argument("--exploration_steps", type=int, default=800000,
                        help="(default=10000)")
    parser.add_argument("--gamma", type=float, default=0.9,
                        help="(default=0.99)")
    parser.add_argument("--quite", action="store_false",
                        help="Run in Quite mode")

    args = parser.parse_args()

    env = nasim.make_benchmark(args.env_name,
                               args.seed,
                               fully_obs=not args.partially_obs,
                               flat_actions=True,
                               flat_obs=True)
    #env.render_network_graph(show=True)
    dqn_agent =  NoisyDoubleDuelingDQN_PERAgent(env, verbose=args.quite, **vars(args))
    num_actions = env.action_space.n
    #for a in range(num_actions):
          #  print(env.action_space.get_action(a))
    dqn_agent.train()
    dqn_agent.save("D:\\Experiments\\Experiment_Record\\E0812\\Saved_model\\NDSPI-medium-multi-site-0820.pkl")
    #dqn_agent.save("D:\\Experiments\\NetworkAttackSimulator\\medium-multi-site-honeypot\\NoisyDoubleDueling_PER_Mar15.pkl")
    #save_data(dqn_agent.rewards_episode,'D:\\Experiments\\NetworkAttackSimulator\\medium-multi-site\\ NoisyDoubleDueling_PER_rewards_episode_Mar1.csv')
    #save_data(dqn_agent.rewards_step,'D:\\Experiments\\NetworkAttackSimulator\\medium-multi-site\\ NoisyDoubleDueling_PER_rewards_step_Mar1.csv')
   #dqn_agent.run_eval_episode(render=args.render_eval)
Пример #4
0
    parser.add_argument("-p",
                        "--param_actions",
                        action="store_true",
                        help="Use Parameterised action space")
    parser.add_argument("-f",
                        "--box_obs",
                        action="store_true",
                        help="Use 2D observation space")
    args = parser.parse_args()

    seed = args.seed
    run_steps = []
    run_rewards = []
    run_goals = 0
    for i in range(args.runs):
        env = nasim.make_benchmark(args.env_name, seed, not args.partially_obs,
                                   not args.param_actions, not args.box_obs)
        steps, reward, done = run_random_agent(env, verbose=False)
        run_steps.append(steps)
        run_rewards.append(reward)
        run_goals += int(done)
        seed += 1

        if args.runs > 1:
            print(f"Run {i}:")
            print(f"\tSteps = {steps}")
            print(f"\tReward = {reward}")
            print(f"\tGoal reached = {done}")

    run_steps = np.array(run_steps)
    run_rewards = np.array(run_rewards)
"""Environment network graph visualizer

This script allows the user to visualize the network graph for a chosen
benchmark scenario.
"""

import nasim

if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("scenario_name",
                        type=str,
                        help="benchmark scenario name")
    parser.add_argument("-s",
                        "--seed",
                        type=int,
                        default=0,
                        help="random seed (default=0)")
    args = parser.parse_args()

    env = nasim.make_benchmark(args.scenario_name, args.seed)
    env.render_network_graph(show=True)
def test_generator(scenario, seed):
    """Tests generating all generated benchmark scenarios using a range of
    seeds, checking for any errors
    """
    nasim.make_benchmark(scenario, seed=seed)
Пример #7
0
        " AI hacker."))
    parser.add_argument("env_name", type=str, help="benchmark scenario name")
    parser.add_argument("-ai",
                        "--run_ai",
                        action="store_true",
                        help=("Run AI policy (currently ony supported for"
                              " 'tiny' and 'small' environments"))
    args = parser.parse_args()

    if args.run_ai:
        assert args.env_name in DQN_POLICIES, \
            ("AI demo only supported for the following environments:"
             f" {list(DQN_POLICIES)}")

    env = nasim.make_benchmark(args.env_name,
                               fully_obs=True,
                               flat_actions=True,
                               flat_obs=True)

    line_break = f"\n{'-'*60}"
    print(line_break)
    print(f"Running Demo on {args.env_name} environment")
    if args.run_ai:
        print("Using AI policy")
        print(line_break)
        dqn_agent = DQNAgent(env, verbose=False, **vars(args))
        dqn_agent.load(DQN_POLICIES[args.env_name])
        ret, steps, goal = dqn_agent.run_eval_episode(env, True, 0.01,
                                                      "readable")
    else:
        print("Player controlled")
        print(line_break)