env_args = dict(topology=topology,
                seed=10,
                allow_rejection=True,
                load=load,
                mean_service_holding_time=25,
                episode_length=episode_length,
                num_service_classes=num_service_classes,
                classes_arrival_probabilities=classes_arrival_probabilities,
                classes_reward=classes_reward,
                num_spectrum_resources=num_spectrum_resources,
                k_paths=k_paths)

env_rnd = gym.make('QoSConstrainedRA-v0', **env_args)
mean_reward_rnd, std_reward_rnd = evaluate_heuristic(env_rnd,
                                                     random_policy,
                                                     n_eval_episodes=episodes)
print('Rnd:', mean_reward_rnd, std_reward_rnd)

env_sp = gym.make('QoSConstrainedRA-v0', **env_args)
mean_reward_sp, std_reward_sp = evaluate_heuristic(env_sp,
                                                   shortest_path,
                                                   n_eval_episodes=episodes)
print('SP:', mean_reward_sp, std_reward_sp, env_sp.actions_output)

env_sap = gym.make('QoSConstrainedRA-v0', **env_args)
mean_reward_sap, std_reward_sap = evaluate_heuristic(env_sap,
                                                     shortest_available_path,
                                                     n_eval_episodes=episodes)
print('SAP:', mean_reward_sap, std_reward_sap, env_sap.actions_output)
Exemplo n.º 2
0
])
env_args = dict(topology=topology,
                seed=10,
                allow_rejection=False,
                mean_service_holding_time=7.5,
                mean_service_inter_arrival_time=1. / 12.,
                j=1,
                episode_length=50,
                node_request_probabilities=node_request_probabilities)

print('STR'.ljust(5), 'REW'.rjust(7), 'STD'.rjust(7))

init_env = gym.make('DeepRMSA-v0', **env_args)
env_rnd = init_env
mean_reward_rnd, std_reward_rnd = evaluate_heuristic(env_rnd,
                                                     random_policy,
                                                     n_eval_episodes=episodes)
print('Rnd:'.ljust(5), f'{mean_reward_rnd:.4f}  {std_reward_rnd:>7.4f}')

env_sp = gym.make('DeepRMSA-v0', **env_args)
mean_reward_sp, std_reward_sp = evaluate_heuristic(env_sp,
                                                   shortest_path_first_fit,
                                                   n_eval_episodes=episodes)
print('SP:'.ljust(5), f'{mean_reward_sp:.4f}  {std_reward_sp:>7.4f}')

env_sap = gym.make('DeepRMSA-v0', **env_args)
mean_reward_sap, std_reward_sap = evaluate_heuristic(
    env_sap, shortest_available_path_first_fit, n_eval_episodes=episodes)
print('SAP:'.ljust(5), f'{mean_reward_sap:.4f}  {std_reward_sap:>7.4f}')

# env_llp = gym.make('DeepRMSA-v0', **env_args)
Exemplo n.º 3
0
env_args = dict(topology=topology,
                seed=10,
                allow_rejection=True,
                load=load,
                mean_service_holding_time=25,
                episode_length=episode_length,
                num_spectrum_resources=64)

print('STR'.ljust(5), 'REW'.rjust(7), 'STD'.rjust(7))

# Random Policy
init_env = gym.make('PowerAwareRMSA-v0', **env_args)
env_rnd = SimpleMatrixObservation(init_env)
mean_reward_rnd, std_reward_rnd = evaluate_heuristic(env_rnd,
                                                     random_policy,
                                                     n_eval_episodes=episodes)
print('Rnd:'.ljust(8), f'{mean_reward_rnd:.4f}  {std_reward_rnd:>7.4f}')
print('Bit rate blocking:', (init_env.episode_bit_rate_requested -
                             init_env.episode_bit_rate_provisioned) /
      init_env.episode_bit_rate_requested)
print(
    'Request blocking:',
    (init_env.episode_services_processed - init_env.episode_services_accepted)
    / init_env.episode_services_processed)
print('Total power:', 10 * np.log10(init_env.total_power))
print('Average power:',
      10 * np.log10(init_env.total_power / init_env.services_accepted))

# Shortest Available Path First Fit Fixed Power
env_sap_ff_fp = gym.make('PowerAwareRMSA-v0', **env_args)
Exemplo n.º 4
0
                episode_length=episode_length,
                num_spectrum_resources=64,
                num_spatial_resources=num_spatial_resources)

# print('STR'.ljust(5), 'REW'.rjust(7), 'STD'.rjust(7))
#
# init_env = gym.make('RMCSA-v0', **env_args)
# env_rnd = SimpleMatrixObservation(init_env)
# mean_reward_rnd, std_reward_rnd = evaluate_heuristic(env_rnd, random_policy, n_eval_episodes=episodes)
# print('Rnd:'.ljust(8), f'{mean_reward_rnd:.4f}  {std_reward_rnd:>7.4f}')
# print('Bit rate blocking:', (init_env.episode_bit_rate_requested - init_env.episode_bit_rate_provisioned) / init_env.episode_bit_rate_requested)
# print('Request blocking:', (init_env.episode_services_processed - init_env.episode_services_accepted) / init_env.episode_services_processed)
# print(init_env.topology.graph['throughput'])

env_sap = gym.make('RMCSA-v0', **env_args)
mean_reward_sap, std_reward_sap = evaluate_heuristic(
    env_sap, shortest_available_first_core_first_fit, n_eval_episodes=episodes)

print('STR'.ljust(5), 'REW'.rjust(7), 'STD'.rjust(7))

# Initial Metrics for Environment
print('SAP-FF:'.ljust(8), f'{mean_reward_sap:.4f}  {std_reward_sap:.4f}')
print(
    'Bit rate blocking:',
    (env_sap.episode_bit_rate_requested - env_sap.episode_bit_rate_provisioned)
    / env_sap.episode_bit_rate_requested)
print(
    'Request blocking:',
    (env_sap.episode_services_processed - env_sap.episode_services_accepted) /
    env_sap.episode_services_processed)

# Additional Metrics For Environment