def test_case(bandwidth=5.0, log_paht='./log/zhan-transrate-5Mbps', lambda_t = 0.5, lambda_e = 0.5, nupdates=2000):
        logger.configure(log_paht, ['stdout', 'json', 'csv'])
        resource_cluster = Resources(mec_process_capable=(8.0 * 1024 * 1024),
                                     mobile_process_capable=(1.0 * 1024 * 1024), bandwith_up=bandwidth, bandwith_dl=bandwidth)

        env = OffloadingEnvironment(resource_cluster = resource_cluster, batch_size=100, graph_number=100,
                                    graph_file_paths=["./RLWorkflow/offloading_data/offload_random15/random.15."],
                                    time_major=False,
                                    lambda_t=lambda_t, lambda_e=lambda_e)

        eval_envs = []
        eval_env_1 = OffloadingEnvironment(resource_cluster = resource_cluster, batch_size=100, graph_number=100,
                                    graph_file_paths=["./RLWorkflow/offloading_data/offload_random15_test/random.15."],
                                    time_major=False,
                                    lambda_t=lambda_t, lambda_e=lambda_e)
        eval_env_1.calculate_heft_cost()

        eval_envs.append(eval_env_1)
        print("Finishing initialization of environment")

        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            mean_reward_track = learn(network="default", env=env, eval_envs=eval_envs, nsample_episode=10, nupdates=nupdates,
                                      max_grad_norm=1.0, noptepochs=4, gamma=0.99,
                                      total_timesteps=80000, lr=5e-4, optbatchnumber=500)

            sess.close()
        tf.reset_default_graph()
gamma = 0.99
lam = 0.95
ent_coef = 0.01
vf_coef = 0.5
max_grad_norm = 0.5
load_path = "./checkpoint/model.ckpt"

resource_cluster = Resources(mec_process_capable=(10.0 * 1024 * 1024),
                             mobile_process_capable=(1.0 * 1024 * 1024),
                             bandwith_up=3.0,
                             bandwith_dl=3.0)

env = OffloadingEnvironment(
    resource_cluster=resource_cluster,
    batch_size=100,
    graph_number=100,
    graph_file_paths=["../data/offload_random15/random.15."],
    time_major=False)

ob = tf.placeholder(dtype=tf.float32, shape=[None, None, env.input_dim])
ob_length = tf.placeholder(dtype=tf.int32, shape=[None])

make_model = lambda: S2SModel_Back(ob=ob,
                                   ob_length=ob_length,
                                   ent_coef=ent_coef,
                                   vf_coef=vf_coef,
                                   max_grad_norm=max_grad_norm)
model = make_model()
model.load(load_path)

eval_runner = Runner(env=env, model=model, nepisode=1, gamma=gamma, lam=lam)
Пример #3
0
# setting the cpu running cycle as 2,4,6,8,12

if __name__ == "__main__":
    lambda_t = 0.5
    lambda_e = 0.5

    resource_cluster = Resources(mec_process_capable=(4.0 * 1024 * 1024),
                                 mobile_process_capable=(1.0 * 1024 * 1024),
                                 bandwith_up=7.0,
                                 bandwith_dl=7.0)

    env = OffloadingEnvironment(
        resource_cluster=resource_cluster,
        batch_size=100,
        graph_number=100,
        graph_file_paths=["../data/offload_random15/random.15."],
        #"../data/offload_random15/random.15."],
        time_major=False,
        lambda_t=lambda_t,
        lambda_e=lambda_e)

    #env.calculate_optimal_solution()
    eval_envs = []
    eval_env_1 = OffloadingEnvironment(
        resource_cluster=resource_cluster,
        batch_size=100,
        graph_number=100,
        graph_file_paths=["../data/offload_random15/random.15."],
        time_major=False,
        lambda_t=lambda_t,
        lambda_e=lambda_e)
Пример #4
0
    resource_cluster = Resources(mec_process_capable=(8.0 * 1024 * 1024),
                                 mobile_process_capable=(1.0 * 1024 * 1024),
                                 bandwith_up=8.0,
                                 bandwith_dl=8.0)

    env = OffloadingEnvironment(
        resource_cluster=resource_cluster,
        batch_size=500,
        graph_number=500,
        graph_file_paths=[
            "./RLWorkflow/offloading_data/offload_random10/random.10.",
            "./RLWorkflow/offloading_data/offload_random15/random.15.",
            "./RLWorkflow/offloading_data/offload_random20/random.20.",
            "./RLWorkflow/offloading_data/offload_random25/random.25.",
            "./RLWorkflow/offloading_data/offload_random30/random.30.",
            "./RLWorkflow/offloading_data/offload_random35/random.35.",
            "./RLWorkflow/offloading_data/offload_random40/random.40.",
            "./RLWorkflow/offloading_data/offload_random45/random.45.",
            "./RLWorkflow/offloading_data/offload_random50/random.50.",
        ],
        #"../data/offload_random15/random.15."],
        time_major=False,
        lambda_t=lambda_t,
        lambda_e=lambda_e)

    #env.calculate_optimal_solution()
    eval_envs = []
    eval_env_1 = OffloadingEnvironment(
        resource_cluster=resource_cluster,
        batch_size=100,
Пример #5
0
def test_case(cpu_frequency, graph_file_path, lambda_t=0.5, lambda_e=0.5):
    resource_cluster = Resources(mec_process_capable=(cpu_frequency),
                                 mobile_process_capable=(1.0 * 1024 * 1024),
                                 bandwith_up=8.0,
                                 bandwith_dl=8.0)

    print("========= Testing the cpu freqency {} Hz. ============".format(
        cpu_frequency))
    env = OffloadingEnvironment(resource_cluster=resource_cluster,
                                batch_size=100,
                                graph_number=100,
                                graph_file_paths=[graph_file_path],
                                time_major=False,
                                lambda_t=lambda_t,
                                lambda_e=lambda_e)

    env.calculate_optimal_qoe()

    # Calculate the heft algorithms latency, energy and qoe
    plans, finish_time_batchs = env.greedy_solution(heft=True)
    heft_latency_batch, heft_energy_batch = env.get_running_cost_by_plan_batch(
        plans[0], env.task_graphs[0])
    latency_batch = np.array(heft_latency_batch)
    energy_batch = np.array(heft_energy_batch)
    heft_qoe_batch = calculate_qoe(latency_batch, energy_batch, env)

    # Calculate the greedy algorithms latency, energy and qoe
    plans, finish_time_batchs = env.greedy_solution(heft=False)
    greedy_latency_batch, greedy_energy_batch = env.get_running_cost_by_plan_batch(
        plans[0], env.task_graphs[0])
    latency_batch = np.array(greedy_latency_batch)
    energy_batch = np.array(greedy_energy_batch)
    greedy_qoe_batch = calculate_qoe(latency_batch, energy_batch, env)

    # Calculate the round robin latency, energy and qoe
    rrb_latency_batch, rrb_energy_batch = env.round_robin_solution()
    rrb_qoe_batch = calculate_qoe(rrb_latency_batch, rrb_energy_batch, env)

    # Calculate the random latency latency, energy and qoe
    random_latency_batch, random_energy_batch = env.random_solution()
    random_qoe_batch = calculate_qoe(random_latency_batch, random_energy_batch,
                                     env)

    # Calculate the all local latency, energy and qoe
    all_local_latency_batch, all_local_energy_batch = env.get_all_locally_execute_time_batch(
    )
    all_local_qoe_batch = calculate_qoe(all_local_latency_batch,
                                        all_local_energy_batch, env)

    # Calculate the all remote latency, energy and qoe
    all_remote_latency_batch, all_remote_energy_batch = env.get_all_mec_execute_time_batch(
    )
    all_remote_qoe_batch = calculate_qoe(all_remote_latency_batch,
                                         all_remote_energy_batch, env)

    print(graph_file_path)
    print("HEFT algorighm result: ")
    print("latency: ", np.mean(heft_latency_batch))
    print("energy: ", np.mean(heft_energy_batch))
    print("qoe: ", np.mean(heft_qoe_batch))
    print()
    print("Greedy algorighm result: ")
    print("latency: ", np.mean(greedy_latency_batch))
    print("energy: ", np.mean(greedy_energy_batch))
    print("qoe: ", np.mean(greedy_qoe_batch))
    print()
    print("round roubin algorighm result: ")
    print("latency: ", np.mean(rrb_latency_batch))
    print("energy: ", np.mean(rrb_energy_batch))
    print("qoe: ", np.mean(rrb_qoe_batch))
    print()
    print("random algorighm result: ")
    print("latency: ", np.mean(random_latency_batch))
    print("energy: ", np.mean(random_energy_batch))
    print("qoe: ", np.mean(random_qoe_batch))
    print()
    print("all local algorighm result: ")
    print("latency: ", np.mean(all_local_latency_batch))
    print("energy: ", np.mean(all_local_energy_batch))
    print("qoe: ", np.mean(all_local_qoe_batch))
    print()
    print("all remote algorigthm result: ")
    print("latency: ", np.mean(all_remote_latency_batch))
    print("energy: ", np.mean(all_remote_energy_batch))
    print("qoe: ", np.mean(all_remote_qoe_batch))
    print()
    print("optimal qoe algorithm result: ")
    print("optimal qoe: ", np.mean(env.optimal_qoe))
    print("optimal qoe latency: ", np.mean(env.optimal_qoe_latency))
    print("optimal qoe energy: ", np.mean(env.optimal_qoe_energy))
Пример #6
0
]

print("=============Test heurestic methods for different n. =============")
print("lambda_t: ", lambda_t)
print("lambda_e: ", lambda_e)

for graph_file_path in graph_file_pahts:
    resource_cluster = Resources(mec_process_capable=(8.0 * 1024 * 1024),
                                 mobile_process_capable=(1.0 * 1024 * 1024),
                                 bandwith_up=8.0,
                                 bandwith_dl=8.0)

    env = OffloadingEnvironment(resource_cluster=resource_cluster,
                                batch_size=100,
                                graph_number=100,
                                graph_file_paths=[graph_file_path],
                                time_major=False,
                                lambda_t=lambda_t,
                                lambda_e=lambda_e)
    if env.task_graphs[0][0].task_number < 20:
        env.calculate_optimal_qoe()

    # Calculate the heft algorithms latency, energy and qoe
    plans, finish_time_batchs = env.greedy_solution(heft=True)
    heft_latency_batch, heft_energy_batch = env.get_running_cost_by_plan_batch(
        plans[0], env.task_graphs[0])
    latency_batch = np.array(heft_latency_batch)
    energy_batch = np.array(heft_energy_batch)
    heft_qoe_batch = calculate_qoe(latency_batch, energy_batch, env)

    # Calculate the greedy algorithms latency, energy and qoe