Exemplo n.º 1
0
def run_experiment(configpath: str, random_seed: int, noconfig: bool):
    """
    Runs one experiment and saves results and plots

    :param configpath: path to configfile
    :param noconfig: whether to override config
    :return: (train_csv_path, eval_csv_path)
    """
    if configpath is not None and not noconfig:
        if not os.path.exists(args.configpath):
            write_default_config()
        config = util.read_config(args.configpath)
    else:
        config = default_config()
    time_str = str(time.time())
    util.create_artefact_dirs(config.output_dir, random_seed)
    logger = util.setup_logger("actor_critic_vs_random_defense-v8",
                               config.output_dir + "/results/logs/" +
                               str(random_seed) + "/",
                               time_str=time_str)
    config.pg_agent_config.save_dir = default_output_dir(
    ) + "/results/data/" + str(random_seed) + "/"
    config.pg_agent_config.video_dir = default_output_dir(
    ) + "/results/videos/" + str(random_seed) + "/"
    config.pg_agent_config.gif_dir = default_output_dir(
    ) + "/results/gifs/" + str(random_seed) + "/"
    config.pg_agent_config.tensorboard_dir = default_output_dir() + "/results/tensorboard/" \
                                                       + str(random_seed) + "/"
    config.logger = logger
    config.pg_agent_config.logger = logger
    config.pg_agent_config.random_seed = random_seed
    config.random_seed = random_seed
    config.pg_agent_config.to_csv(config.output_dir +
                                  "/results/hyperparameters/" +
                                  str(random_seed) + "/" + time_str + ".csv")
    train_csv_path = ""
    eval_csv_path = ""
    if config.hp_tuning:
        hp_tuning.hype_grid(config)
    else:
        train_result, eval_result = Runner.run(config)
        if len(train_result.avg_episode_steps) > 0 and len(
                eval_result.avg_episode_steps) > 0:
            train_csv_path = config.output_dir + "/results/data/" + str(
                random_seed) + "/" + time_str + "_train" + ".csv"
            train_result.to_csv(train_csv_path)
            eval_csv_path = config.output_dir + "/results/data/" + str(
                random_seed) + "/" + time_str + "_eval" + ".csv"
            eval_result.to_csv(eval_csv_path)
            plot_csv(config, eval_csv_path, train_csv_path, random_seed)

    return train_csv_path, eval_csv_path
Exemplo n.º 2
0
def run_experiment(configpath: str, random_seed: int, noconfig: bool):
    """
    Runs one experiment and saves results and plots

    :param config: experiment configuration
    :param noconfig: whether to override config
    :return: (train_csv_path, eval_csv_path)
    """
    if configpath is not None and not noconfig:
        if not os.path.exists(args.configpath):
            write_default_config()
        config = util.read_config(args.configpath)
    else:
        config = default_config()
    time_str = str(time.time())
    util.create_artefact_dirs(config.output_dir, random_seed)
    logger = util.setup_logger("random_attack_vs_tabular_q_learning-v3",
                               config.output_dir + "/results/logs/" +
                               str(random_seed) + "/",
                               time_str=time_str)
    config.q_agent_config.save_dir = default_output_dir(
    ) + "/results/data/" + str(random_seed) + "/"
    config.q_agent_config.video_dir = default_output_dir(
    ) + "/results/videos/" + str(random_seed) + "/"
    config.q_agent_config.gif_dir = default_output_dir(
    ) + "/results/gifs/" + str(random_seed) + "/"
    config.logger = logger
    config.q_agent_config.logger = logger
    config.q_agent_config.random_seed = random_seed
    config.random_seed = random_seed
    config.q_agent_config.to_csv(config.output_dir +
                                 "/results/hyperparameters/" +
                                 str(random_seed) + "/" + time_str + ".csv")
    train_result, eval_result = Runner.run(config)
    train_csv_path = ""
    eval_csv_path = ""
    if len(train_result.avg_episode_steps) > 0 and len(
            eval_result.avg_episode_steps) > 0:
        train_csv_path = config.output_dir + "/results/data/" + str(
            random_seed) + "/" + time_str + "_train" + ".csv"
        train_result.to_csv(train_csv_path)
        eval_csv_path = config.output_dir + "/results/data/" + str(
            random_seed) + "/" + time_str + "_eval" + ".csv"
        eval_result.to_csv(eval_csv_path)
        plot_csv(config, eval_csv_path, train_csv_path, random_seed)

    return train_csv_path, eval_csv_path
Exemplo n.º 3
0
def setup_train(config: ClientConfig, random_seed):
    time_str = str(time.time())
    util.create_artefact_dirs(config.output_dir, random_seed)
    logger = util.setup_logger("tabular_q_vs_random_defense-v3",
                               config.output_dir + "/results/logs/" +
                               str(random_seed) + "/",
                               time_str=time_str)
    config.q_agent_config.save_dir = default_output_dir(
    ) + "/results/data/" + str(random_seed) + "/"
    config.q_agent_config.video_dir = default_output_dir(
    ) + "/results/videos/" + str(random_seed) + "/"
    config.q_agent_config.gif_dir = default_output_dir(
    ) + "/results/gifs/" + str(random_seed) + "/"
    # config.q_agent_config.dqn_config.tensorboard_dir = default_output_dir() + "/results/tensorboard/" \
    #                                                    + str(random_seed) + "/"
    config.logger = logger
    config.q_agent_config.logger = logger
    config.q_agent_config.random_seed = random_seed
    config.random_seed = random_seed
    config.q_agent_config.to_csv(config.output_dir +
                                 "/results/hyperparameters/" +
                                 str(random_seed) + "/" + time_str + ".csv")
    return time_str
Exemplo n.º 4
0
                               attacker_cumulative_reward=df["attacker_cumulative_reward"],
                               defender_cumulative_reward=df["defender_cumulative_reward"],
                               log_frequency=config.simulation_config.log_frequency,
                               output_dir=config.output_dir, eval=False, sim=True)


# Program entrypoint
if __name__ == '__main__':
    args = util.parse_args(default_config_path())
    if args.configpath is not None:
        if not os.path.exists(args.configpath):
            write_default_config()
        config = util.read_config(args.configpath)
    else:
        config = default_config()
    time_str = str(time.time())
    util.create_artefact_dirs(config.output_dir)
    logger = util.setup_logger("idsgame-v0-random_vs_defend_minimal", config.output_dir + "/logs/",
                               time_str=time_str)
    config.logger = logger
    config.simulation_config.logger = logger
    config.simulation_config.to_csv(config.output_dir + "/hyperparameters/" + time_str + ".csv")
    result = Runner.run(config)
    if len(result.avg_episode_steps) > 0:
        csv_path = config.output_dir + "/data/" + time_str + "_simulation" + ".csv"
        result.to_csv(csv_path)
        plot_csv(config, csv_path)



Exemplo n.º 5
0
                                        config.q_agent_config.eval_frequency, config.q_agent_config.eval_log_frequency,
                                        config.q_agent_config.eval_episodes, config.output_dir, sim=False)


# Program entrypoint
if __name__ == '__main__':
    args = util.parse_args(default_config_path())
    if args.configpath is not None:
        if not os.path.exists(args.configpath):
            write_default_config()
        config = util.read_config(args.configpath)
    else:
        config = default_config()
    time_str = str(time.time())
    util.create_artefact_dirs(config.output_dir)
    logger = util.setup_logger("tabular_q_learning_vs_minimal_defense-v4", config.output_dir + "/logs/",
                               time_str=time_str)
    config.logger = logger
    config.q_agent_config.logger = logger
    config.q_agent_config.to_csv(config.output_dir + "/hyperparameters/" + time_str + ".csv")
    train_result, eval_result = Runner.run(config)
    if len(train_result.avg_episode_steps) > 0 and len(eval_result.avg_episode_steps) > 0:
        train_csv_path = config.output_dir + "/data/" + time_str + "_train" + ".csv"
        train_result.to_csv(train_csv_path)
        eval_csv_path = config.output_dir + "/data/" + time_str + "_eval" + ".csv"
        eval_result.to_csv(eval_csv_path)
        plot_csv(config, eval_csv_path, train_csv_path)



Exemplo n.º 6
0
                               attacker_cumulative_reward=df["attacker_cumulative_reward"],
                               defender_cumulative_reward=df["defender_cumulative_reward"],
                               log_frequency=config.simulation_config.log_frequency,
                               output_dir=config.output_dir, eval=False, sim=True)


# Program entrypoint
if __name__ == '__main__':
    args = util.parse_args(default_config_path())
    if args.configpath is not None:
        if not os.path.exists(args.configpath):
            write_default_config()
        config = util.read_config(args.configpath)
    else:
        config = default_config()
    time_str = str(time.time())
    util.create_artefact_dirs(config.output_dir)
    logger = util.setup_logger("idsgame-v4-tabular_q_agent_vs_random_defender", config.output_dir + "/logs/",
                               time_str=time_str)
    config.logger = logger
    config.simulation_config.logger = logger
    config.simulation_config.to_csv(config.output_dir + "/hyperparameters/" + time_str + ".csv")
    result = Runner.run(config)
    if len(result.avg_episode_steps) > 0:
        csv_path = config.output_dir + "/data/" + time_str + "_simulation" + ".csv"
        result.to_csv(csv_path)
        plot_csv(config, csv_path)



Exemplo n.º 7
0
        defender_cumulative_reward=df["defender_cumulative_reward"],
        log_frequency=config.simulation_config.log_frequency,
        output_dir=config.output_dir,
        eval=False,
        sim=True)


# Program entrypoint
if __name__ == '__main__':
    args = util.parse_args(default_config_path())
    if args.configpath is not None:
        if not os.path.exists(args.configpath):
            write_default_config()
        config = util.read_config(args.configpath)
    else:
        config = default_config()
    time_str = str(time.time())
    util.create_artefact_dirs(config.output_dir)
    logger = util.setup_logger("idsgame-v0-random_vs_random",
                               config.output_dir + "/logs/",
                               time_str=time_str)
    config.logger = logger
    config.simulation_config.logger = logger
    config.simulation_config.to_csv(config.output_dir + "/hyperparameters/" +
                                    time_str + ".csv")
    result = Runner.run(config)
    if len(result.avg_episode_steps) > 0:
        csv_path = config.output_dir + "/data/" + time_str + "_simulation" + ".csv"
        result.to_csv(csv_path)
        plot_csv(config, csv_path)
Exemplo n.º 8
0
        config.output_dir,
        sim=False)


# Program entrypoint
if __name__ == '__main__':
    args = util.parse_args(default_config_path())
    if args.configpath is not None:
        if not os.path.exists(args.configpath):
            write_default_config()
        config = util.read_config(args.configpath)
    else:
        config = default_config()
    time_str = str(time.time())
    util.create_artefact_dirs(config.output_dir)
    logger = util.setup_logger("tabular_q_learning_vs_tabular_q_learning-v5",
                               config.output_dir + "/logs/",
                               time_str=time_str)
    config.logger = logger
    config.q_agent_config.logger = logger
    config.q_agent_config.to_csv(config.output_dir + "/hyperparameters/" +
                                 time_str + ".csv")
    train_result, eval_result = Runner.run(config)
    if len(train_result.avg_episode_steps) > 0 and len(
            eval_result.avg_episode_steps) > 0:
        train_csv_path = config.output_dir + "/data/" + time_str + "_train" + ".csv"
        train_result.to_csv(train_csv_path)
        eval_csv_path = config.output_dir + "/data/" + time_str + "_eval" + ".csv"
        eval_result.to_csv(eval_csv_path)
        plot_csv(config, eval_csv_path, train_csv_path)
Exemplo n.º 9
0
        config.output_dir,
        sim=False)


# Program entrypoint
if __name__ == '__main__':
    args = util.parse_args(default_config_path())
    if args.configpath is not None:
        if not os.path.exists(args.configpath):
            write_default_config()
        config = util.read_config(args.configpath)
    else:
        config = default_config()
    time_str = str(time.time())
    util.create_artefact_dirs(config.output_dir)
    logger = util.setup_logger("dqn_vs_random_defense-v5",
                               config.output_dir + "/logs/",
                               time_str=time_str)
    config.logger = logger
    config.q_agent_config.logger = logger
    config.q_agent_config.to_csv(config.output_dir + "/hyperparameters/" +
                                 time_str + ".csv")
    train_result, eval_result = Runner.run(config)
    if len(train_result.avg_episode_steps) > 0 and len(
            eval_result.avg_episode_steps) > 0:
        train_csv_path = config.output_dir + "/data/" + time_str + "_train" + ".csv"
        train_result.to_csv(train_csv_path)
        eval_csv_path = config.output_dir + "/data/" + time_str + "_eval" + ".csv"
        eval_result.to_csv(eval_csv_path)
        plot_csv(config, eval_csv_path, train_csv_path)
Exemplo n.º 10
0
        config.output_dir,
        sim=False)


# Program entrypoint
if __name__ == '__main__':
    args = util.parse_args(default_config_path())
    if args.configpath is not None:
        if not os.path.exists(args.configpath):
            write_default_config()
        config = util.read_config(args.configpath)
    else:
        config = default_config()
    time_str = str(time.time())
    util.create_artefact_dirs(config.output_dir)
    logger = util.setup_logger("tabular_q_learning_vs_random_defense-v1",
                               config.output_dir + "/logs/",
                               time_str=time_str)
    config.logger = logger
    config.q_agent_config.logger = logger
    config.q_agent_config.to_csv(config.output_dir + "/hyperparameters/" +
                                 time_str + ".csv")
    train_result, eval_result = Runner.run(config)
    if len(train_result.avg_episode_steps) > 0 and len(
            eval_result.avg_episode_steps) > 0:
        train_csv_path = config.output_dir + "/data/" + time_str + "_train" + ".csv"
        train_result.to_csv(train_csv_path)
        eval_csv_path = config.output_dir + "/data/" + time_str + "_eval" + ".csv"
        eval_result.to_csv(eval_csv_path)
        plot_csv(config, eval_csv_path, train_csv_path)
Exemplo n.º 11
0
                                        config.q_agent_config.eval_frequency, config.q_agent_config.eval_log_frequency,
                                        config.q_agent_config.eval_episodes, config.output_dir, sim=False)


# Program entrypoint
if __name__ == '__main__':
    args = util.parse_args(default_config_path())
    if args.configpath is not None:
        if not os.path.exists(args.configpath):
            write_default_config()
        config = util.read_config(args.configpath)
    else:
        config = default_config()
    time_str = str(time.time())
    util.create_artefact_dirs(config.output_dir)
    logger = util.setup_logger("random_attack_vs_tabular_q_learning-v4", config.output_dir + "/logs/",
                               time_str=time_str)
    config.logger = logger
    config.q_agent_config.logger = logger
    config.q_agent_config.to_csv(config.output_dir + "/hyperparameters/" + time_str + ".csv")
    train_result, eval_result = Runner.run(config)
    if len(train_result.avg_episode_steps) > 0 and len(eval_result.avg_episode_steps) > 0:
        train_csv_path = config.output_dir + "/data/" + time_str + "_train" + ".csv"
        train_result.to_csv(train_csv_path)
        eval_csv_path = config.output_dir + "/data/" + time_str + "_eval" + ".csv"
        eval_result.to_csv(eval_csv_path)
        plot_csv(config, eval_csv_path, train_csv_path)



Exemplo n.º 12
0
        defender_cumulative_reward=df["defender_cumulative_reward"],
        log_frequency=config.simulation_config.log_frequency,
        output_dir=config.output_dir,
        eval=False,
        sim=True)


# Program entrypoint
if __name__ == '__main__':
    args = util.parse_args(default_config_path())
    if args.configpath is not None:
        if not os.path.exists(args.configpath):
            write_default_config()
        config = util.read_config(args.configpath)
    else:
        config = default_config()
    time_str = str(time.time())
    util.create_artefact_dirs(config.output_dir, 0)
    logger = util.setup_logger("idsgame-v3-tabular_q_agent_vs_tabular_q_agent",
                               config.output_dir + "/logs/",
                               time_str=time_str)
    config.logger = logger
    config.simulation_config.logger = logger
    config.simulation_config.to_csv(config.output_dir + "/hyperparameters/" +
                                    time_str + ".csv")
    result = Runner.run(config)
    if len(result.avg_episode_steps) > 0:
        csv_path = config.output_dir + "/data/" + time_str + "_simulation" + ".csv"
        result.to_csv(csv_path)
        plot_csv(config, csv_path)
Exemplo n.º 13
0
        config.output_dir,
        sim=False)


# Program entrypoint
if __name__ == '__main__':
    args = util.parse_args(default_config_path())
    if args.configpath is not None:
        if not os.path.exists(args.configpath):
            write_default_config()
        config = util.read_config(args.configpath)
    else:
        config = default_config()
    time_str = str(time.time())
    util.create_artefact_dirs(config.output_dir)
    logger = util.setup_logger("maximal_attack_vs_tabular_q_learning-v1",
                               config.output_dir + "/logs/",
                               time_str=time_str)
    config.logger = logger
    config.q_agent_config.logger = logger
    config.q_agent_config.to_csv(config.output_dir + "/hyperparameters/" +
                                 time_str + ".csv")
    train_result, eval_result = Runner.run(config)
    if len(train_result.avg_episode_steps) > 0 and len(
            eval_result.avg_episode_steps) > 0:
        train_csv_path = config.output_dir + "/data/" + time_str + "_train" + ".csv"
        train_result.to_csv(train_csv_path)
        eval_csv_path = config.output_dir + "/data/" + time_str + "_eval" + ".csv"
        eval_result.to_csv(eval_csv_path)
        plot_csv(config, eval_csv_path, train_csv_path)
Exemplo n.º 14
0
        defender_cumulative_reward=df["defender_cumulative_reward"],
        log_frequency=config.simulation_config.log_frequency,
        output_dir=config.output_dir,
        eval=False,
        sim=True)


# Program entrypoint
if __name__ == '__main__':
    args = util.parse_args(default_config_path())
    if args.configpath is not None:
        if not os.path.exists(args.configpath):
            write_default_config()
        config = util.read_config(args.configpath)
    else:
        config = default_config()
    time_str = str(time.time())
    util.create_artefact_dirs(config.output_dir)
    logger = util.setup_logger("idsgame-v0-attack_maximal_vs_defend_random",
                               config.output_dir + "/logs/",
                               time_str=time_str)
    config.logger = logger
    config.simulation_config.logger = logger
    config.simulation_config.to_csv(config.output_dir + "/hyperparameters/" +
                                    time_str + ".csv")
    result = Runner.run(config)
    if len(result.avg_episode_steps) > 0:
        csv_path = config.output_dir + "/data/" + time_str + "_simulation" + ".csv"
        result.to_csv(csv_path)
        plot_csv(config, csv_path)