def __init__(self, ):
     super(myns3env, self).__init__()
     port = 9999
     simTime = 45
     stepTime = 0.2
     startSim = 0
     seed = 3
     simArgs = {
         "--duration": simTime,
     }
     debug = True
     max_env_steps = 250
     self.env = ns3env.Ns3Env(port=port,
                              stepTime=stepTime,
                              startSim=startSim,
                              simSeed=seed,
                              simArgs=simArgs,
                              debug=debug)
     self.env._max_episode_steps = max_env_steps
     self.Cell_num = 6
     self.max_throu = 30
     self.Users = 40
     self.state_dim = self.Cell_num * 4
     self.action_dim = self.env.action_space.shape[0]
     self.action_bound = self.env.action_space.high
     self.action_space = spaces.Box(low=-1,
                                    high=1,
                                    shape=(self.action_dim, ),
                                    dtype=np.float32)
     self.observation_space = spaces.Box(low=0,
                                         high=self.Users,
                                         shape=(self.state_dim, ),
                                         dtype=np.float32)
def env_init():
    startSim = 0
    port = 5555;
    simTime = 20 # seconds
    stepTime = 1.0  # seconds
    seed = 0
    simArgs = {"--simTime": simTime,
               "--testArg": 123}
    debug = False

    env = ns3env.Ns3Env(port=port, stepTime=stepTime, startSim=startSim, simSeed=seed, simArgs=simArgs, debug=debug)
    env.reset()

    return env
Exemple #3
0
    def __init__(self, worker_name, env_count, sess, global_actor,
                 global_critic, max_episode_num):
        threading.Thread.__init__(self)

        # hyperparameters
        self.GAMMA = 0.95
        self.ACTOR_LEARNING_RATE = 0.0001
        self.CRITIC_LEARNING_RATE = 0.001
        self.ENTROPY_BETA = 0.01
        self.t_MAX = 4  # n-step TD

        self.max_episode_num = max_episode_num

        #self.env = gym.make(env_name)
        self.env = ns3env.Ns3Env(port=(port + env_count),
                                 stepTime=stepTime,
                                 startSim=startSim,
                                 simSeed=seed,
                                 simArgs=simArgs,
                                 debug=debug)
        self.worker_name = worker_name
        self.sess = sess

        self.global_actor = global_actor
        self.global_critic = global_critic

        # state variable dimension
        #self.state_dim = self.env.observation_space.shape[0]
        self.state_dim = state_dimension
        # action dimension
        #self.action_dim = self.env.action_space.shape[0]
        self.action_dim = action_dimension
        # action maximum boundary
        #self.action_bound = int(self.env.action_space.high[0])
        self.action_bound = action_max_bound
        # create worker actor and critic NN
        self.worker_actor = Worker_Actor(self.sess, self.state_dim,
                                         self.action_dim, self.action_bound,
                                         self.ACTOR_LEARNING_RATE,
                                         self.ENTROPY_BETA, self.global_actor)
        self.worker_critic = Worker_Critic(self.sess, self.state_dim,
                                           self.action_dim,
                                           self.CRITIC_LEARNING_RATE,
                                           self.global_critic)
        # Copy Hyperparameters from Global NN to Worker NN
        self.worker_actor.model.set_weights(
            self.global_actor.model.get_weights())
        self.worker_critic.model.set_weights(
            self.global_critic.model.get_weights())
Exemple #4
0
    def __init__(self, no_threads, **params):
        self.params = params
        self.no_threads = no_threads
        self.ports = [13968+i+np.random.randint(40000) for i in range(no_threads)]
        self.commands = self._craft_commands(params)

        self.SCRIPT_RUNNING = False
        self.envs = []

        self.run()
        for port in self.ports:
            env = ns3env.Ns3Env(port=port, stepTime=params['envStepTime'], startSim=0, simSeed=0, simArgs=params, debug=False)
            self.envs.append(env)

        self.SCRIPT_RUNNING = True
Exemple #5
0
    def __init__(self, env_name):
        # set tensorflow session
        self.sess = tf.Session()
        K.set_session(self.sess)

        # generate learning environment
        self.env_name = env_name
        self.WORKERS_NUM = multiprocessing.cpu_count()
        #env = gym.make(self.env_name)       # change here to ns3gym
        env = ns3env.Ns3Env(port=port, stepTime=stepTime, startSim=startSim, simSeed=seed, simArgs=simArgs, debug=debug)
        # state dimension
        #state_dim = env.observation_space.shape[0]
        state_dim = state_dimension
        # action dimension
        #action_dim = env.action_space.shape[0]
        action_dim = action_dimension
        # maximum action boundary
        #action_bound = env.action_space.high[0]
        action_bound = action_max_bound
        # generate global actor and critic nn
        self.global_actor = Global_Actor(state_dim, action_dim, action_bound)
        self.global_critic = Global_Critic(state_dim)
Exemple #6
0
startSim = True
iterationNum = 1

port = 5555
simTime = 1000
stepTime = 0.5
seed = 12
simArgs = {
    "--duration": simTime,
}
debug = False

env = ns3env.Ns3Env(port=port,
                    stepTime=stepTime,
                    startSim=startSim,
                    simSeed=seed,
                    simArgs=simArgs,
                    debug=debug)
env.reset()

state_space = env.observation_space.shape[0]
print(state_space)
state_trans_space = 3
action_space = 5


def get_agent(state):
    socketUuid = state[0]
    tcpEnvType = state[1]
    tcpAgent = get_agent.tcpAgents.get(socketUuid, None)
    print(tcpAgent)
Exemple #7
0
elif argumentList.__len__() is 4:
    if sys.argv[1] in ['test']:

        # Collect from the CLI the name of the traffic scenario
        scenario_name = sys.argv[2].split("=")[1]

        # Collect from the CLI the number of cars that the agent was trained on
        num_of_vehicles = sys.argv[3].split("=")[1]

        # Load the previously trained agent parameters and start
        # running the traffic simulation
        # Creating the ns3 environment that will act as a link
        # between our agent and the live simulation
        env = ns3env.Ns3Env(port=5555,
                            stepTime=0.5,
                            startSim=0,
                            simSeed=12,
                            debug=False)

        ob_space = env.observation_space
        ac_space = env.action_space

        print("Observation Space: ", ob_space, ob_space.dtype)
        print("Action Space: ", ac_space, ac_space.dtype)

        stepIdx, currIt = 0, 0

        try:

            # model = PPO2.load(f'rsu_agents/{scenario_name}_agents/'
            #                   f'PPO2_ns3_online_{scenario_name}_cars={num_of_vehicles}')
Exemple #8
0
    #plt.savefig('learning.pdf', bbox_inches='tight')
    plt.tight_layout()
    plt.subplots_adjust(left = 0.16, bottom = 0.10, right = 0.92, top = 0.92, wspace = 0.20, hspace = 0.53)
    plt.show()


parser = argparse.ArgumentParser(description='Start simulation script on/off')
parser.add_argument('--start',
                    type=int,
                    default=1,
                    help='Start ns-3 simulation script 0/1, Default: 1')
args = parser.parse_args()
startSim = bool(args.start)
print("make env")
env = ns3env.Ns3Env(port=setting.port, stepTime=setting.stepTime, startSim=startSim,
                    simSeed=setting.seed, simArgs=setting.simArgs, debug=setting.debug)
print("done env");
env._max_episode_steps = setting.MAX_STEPS

delay_history = []
rew_history = []
util_history = []

signal.signal(signal.SIGINT, signal_handler)

actor = ActorNetwork(state_size=setting.STATE_SIZE, action_size=setting.ACTION_SIZE, lr=setting.ACTOR_LEARNING_RATE, n_h1=setting.N_H1, n_h2=setting.N_H2, tau=setting.TAU)
critic = CriticNetwork(state_size=setting.STATE_SIZE, action_size=setting.ACTION_SIZE, lr=setting.CRITIC_LEARNING_RATE, n_h1=setting.N_H1, n_h2=setting.N_H2, tau=setting.TAU)
noise = OUProcess(setting.ACTION_SIZE)
exprep = ExpReplay(mem_size=setting.MEM_SIZE, start_mem=setting.START_MEM, state_size=[setting.STATE_SIZE], kth=-1, batch_size=setting.BATCH_SIZE)

sess = tf.Session()
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import gym
import argparse
from ns3gym import ns3env

__author__ = "Piotr Gawlowicz"
__copyright__ = "Copyright (c) 2018, Technische Universität Berlin"
__version__ = "0.1.0"
__email__ = "*****@*****.**"


#env = gym.make('ns3-v0', port=5555)
env = ns3env.Ns3Env(port=5555)
env.reset()

ob_space = env.observation_space
ac_space = env.action_space
print("Observation space: ", ob_space,  ob_space.dtype)
print("Action space: ", ac_space, ac_space.dtype)

stepIdx = 0

try:
    obs = env.reset()
    print("Step: ", stepIdx)
    print("---obs: ", obs)

    while True:
        stepIdx += 1
Exemple #10
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import argparse
from ns3gym import ns3env

__author__ = "Piotr Gawlowicz"
__copyright__ = "Copyright (c) 2020, Technische Universität Berlin"
__version__ = "0.1.0"
__email__ = "*****@*****.**"

port = 5556
env = ns3env.Ns3Env(port=port, startSim=False)
env.reset()

ob_space = env.observation_space
ac_space = env.action_space
print("Observation space: ", ob_space, ob_space.dtype)
print("Action space: ", ac_space, ac_space.dtype)

stepIdx = 0
currIt = 0
iterationNum = 3

try:
    while True:
        obs = env.reset()
        print("Step: ", stepIdx)
        print("---obs: ", obs)

        while True:
Exemple #11
0
__email__ = "*****@*****.**"


parser = argparse.ArgumentParser(description='Start simulation script on/off')
parser.add_argument('--start',
                    type=int,
                    default=1,
                    help='Start simulation script True/False, Default: True')
args = parser.parse_args()
startSim = bool(args.start)

print("startSim: ", startSim)


stepTime = 0.5  # seconds
env = ns3env.Ns3Env(stepTime=stepTime, startSim=startSim)
env.reset()

ob_space = env.observation_space
ac_space = env.action_space
print("Observation space: ", ob_space,  ob_space.dtype)
print("Action space: ", ac_space, ac_space.dtype)

stepIdx = 0

try:
    while True:
        stepIdx += 1
        print("Step: ", stepIdx)

        action = env.action_space.sample()