Ejemplo n.º 1
0
def env_rand_gen(env_name,
                 num_attr_N=11,
                 num_attr_E=4,
                 T=10,
                 graphid=1,
                 numNodes=30,
                 numEdges=100,
                 numRoot=5,
                 numGoals=6,
                 history=3):
    env = dag.Environment(num_attr_N=num_attr_N,
                          num_attr_E=num_attr_E,
                          T=T,
                          graphid=graphid,
                          numNodes=numNodes,
                          numEdges=numEdges,
                          numRoot=numRoot,
                          numGoals=numGoals,
                          history=history)
    # env.randomDAG()
    env.load_graph()
    path = os.getcwd() + "/env_data/" + env_name + ".pkl"
    print("env path is ", path)
    fp.save_pkl(env, path)
    print(env_name + " has been saved.")
    return env
Ejemplo n.º 2
0
def init_game(saved_env_name: str = None, env_name: str = None):
    """ First attempts to load a saved environment, if not builds new enviornment.

    :param saved_env_name: Name of saved environment name to load.
    :param env_name: Name of environment to create.
    """
    assert (saved_env_name is not None) or (env_name is not None)

    if saved_env_name is not None:
        logger.info(f"Loading environment: {saved_env_name}")
        path = osp.join(settings.get_env_data_dir(), f"{saved_env_name}.pkl")
        if not osp.exists(path):
            raise ValueError("The env being loaded does not exist.")
        env = fp.load_pkl(path)

    else:
        env = DagGenerator.get_env_data_dir(env_name)

    # save graph copy
    env.save_graph_copy()
    env.save_mask_copy()  # TODO: change transfer

    # create players and point to their env
    env.create_players()
    env.create_action_space()

    # initialize game data
    game = EmpiricalGame(env)
    game.env.defender.set_env_belong_to(game.env)
    game.env.attacker.set_env_belong_to(game.env)

    # make no sense
    env.defender.set_env_belong_to(env)
    env.attacker.set_env_belong_to(env)

    return game
Ejemplo n.º 3
0
import os
from attackgraph import rand_strategies_payoff as rp
from attackgraph.sample_strategy import rand_att_str_generator, rand_def_str_generator
from attackgraph import game_data
from attackgraph.util import set_global_seed
from baselines import deepq
import tensorflow as tf
from baselines.common import models
from baselines.deepq import load_action
from attackgraph.sample_strategy import sample_strategy_from_mixed
from attackgraph.parallel_sim import parallel_sim
from attackgraph import file_op as fp
from attackgraph import training
import copy

env = dag.Environment(numNodes=5, numEdges=4, numRoot=2, numGoals=1)

nodeset = [1, 2, 3, 4, 5]
edgeset = [(1, 2), (2, 3), (2, 4), (5, 2)]

attr = {}
attr['nodes'] = nodeset
attr['edges'] = edgeset
attr['Nroots'] = [1, 0, 0, 0, 1]
attr['Ntypes'] = [0, 0, 0, 1, 0]
attr['NeTypes'] = [1, 1, 0, 0, 1]
attr['Nstates'] = [0, 0, 0, 0, 0]
attr['NaRewards'] = [0, 0, 0, 3, 0]
attr['NdPenalties'] = [0, 0, 0, -3, 0]
attr['NdCosts'] = [-1, -1, -1, -1, -1]
attr['NaCosts'] = [-1, -1, -1, -1, -1]
Ejemplo n.º 4
0
def initialize(load_env=None, env_name=None):
    print("=======================================================")
    print("=======Begin Initialization and first epoch============")
    print("=======================================================")

    # Create Environment
    if isinstance(load_env, str):
        path = os.getcwd() + '/env_data/' + load_env + '.pkl'
        if not fp.isExist(path):
            raise ValueError("The env being loaded does not exist.")
        env = fp.load_pkl(path)
    else:
        # env is created and saved.
        env = dag.env_rand_gen_and_save(env_name)

    # save graph copy
    env.save_graph_copy()
    env.save_mask_copy()

    # create players and point to their env
    env.create_players()
    env.create_action_space()

    # load param
    param_path = os.getcwd() + '/network_parameters/param.json'
    param = jp.load_json_data(param_path)

    # initialize game data
    game = game_data.Game_data(env,
                               num_episodes=param['num_episodes'],
                               threshold=param['threshold'])
    game.set_hado_param(param=param['hado_param'])
    game.set_hado_time_step(param['retrain_timesteps'])
    game.env.defender.set_env_belong_to(game.env)
    game.env.attacker.set_env_belong_to(game.env)

    env.defender.set_env_belong_to(env)
    env.attacker.set_env_belong_to(env)

    # uniform strategy has been produced ahead of time
    print("epoch 1:", datetime.datetime.now())
    epoch = 1

    act_att = 'att_str_epoch1.pkl'
    act_def = 'def_str_epoch1.pkl'

    game.add_att_str(act_att)
    game.add_def_str(act_def)

    print('Begin simulation for uniform strategy.')
    sys.stdout.flush()
    # simulate using random strategies and initialize payoff matrix
    # if MPI_flag:
    #     aReward, dReward = do_MPI_sim(act_att, act_def)
    # else:
    aReward, dReward = series_sim(game.env, game, act_att, act_def,
                                  game.num_episodes)
    print('Done simulation for uniform strategy.')
    sys.stdout.flush()

    game.init_payoffmatrix(dReward, aReward)
    ne = {}
    ne[0] = np.array([1], dtype=np.float32)
    ne[1] = np.array([1], dtype=np.float32)
    game.add_nasheq(epoch, ne)

    # save a copy of game data
    game_path = os.getcwd() + '/game_data/game.pkl'
    fp.save_pkl(game, game_path)

    sys.stdout.flush()
    return game
Ejemplo n.º 5
0
def initialize(load_env=None, env_name=None, n_processes: int = 1):
    logger.info("=======================================================")
    logger.info("=======Begin Initialization and first epoch============")
    logger.info("=======================================================")

    # Create Environment
    if isinstance(load_env, str):
        path = osp.join(settings.get_env_data_dir(), "{}.pkl".format(load_env))
        if not fp.isExist(path):
            raise ValueError("The env being loaded does not exist.")
        env = fp.load_pkl(path)
    else:
        # env is created and saved.
        env = dag.env_rand_gen_and_save(env_name)

    # save graph copy
    env.save_graph_copy()
    env.save_mask_copy()  # TODO: change transfer

    # create players and point to their env
    env.create_players()
    env.create_action_space()

    # print root node
    roots = env.get_Roots()
    logger.info(f"Root Nodes: {roots}")
    ed = env.get_ORedges()
    logger.info(f"Or edges: {ed}")

    # initialize game data
    game = empirical_game.EmpiricalGame(env)
    game.env.defender.set_env_belong_to(game.env)
    game.env.attacker.set_env_belong_to(game.env)

    # make no sense
    env.defender.set_env_belong_to(env)
    env.attacker.set_env_belong_to(env)

    # uniform strategy has been produced ahead of time
    logger.info("Epoch 1")
    epoch = 1
    epoch_dir = osp.join(settings.get_results_dir(), f"epoch_{epoch}")
    writer = SummaryWriter(logdir=epoch_dir)

    act_att = 'att_str_epoch1.pkl'
    act_def = 'def_str_epoch1.pkl'

    game.add_att_str(act_att)
    game.add_def_str(act_def)

    logger.info('Begin simulation for uniform strategy.')
    aReward, dReward = simulation.simulate_profile(
        env=game.env,
        game=game,
        nn_att=act_att,
        nn_def=act_def,
        n_episodes=game.num_episodes,
        n_processes=n_processes,
        save_dir=epoch_dir,
        summary_writer=writer)
    logger.info('Done simulation for uniform strategy.')

    game.init_payoffmatrix(dReward, aReward)
    ne = {}
    ne[0] = np.array([1], dtype=np.float32)
    ne[1] = np.array([1], dtype=np.float32)
    game.add_nasheq(epoch, ne)

    # save a copy of game data
    game_path = osp.join(settings.get_run_dir(), "game.pkl")
    fp.save_pkl(game, game_path)

    sys.stdout.flush()
    return game