Ejemplo n.º 1
0
    plt.plot(bandwidth, cost_of_all, '^-', linewidth=0.4, label='all selection')
    plt.plot(bandwidth, cost_of_local, '<-', linewidth=0.4, label='only local selection')
    plt.plot(bandwidth, cost_of_mec, '>-', linewidth=0.4, label='only MEC selection')
    plt.plot(bandwidth, cost_of_all_15ge, '<-', linewidth=0.2, label='all selection of 15 UEs')
    plt.plot(bandwidth, cost_of_all_20ge, '<-', linewidth=0.2, label='all selection of 20 UEs')
    plt.plot(bandwidth, cost_of_all_25ge, '<-', linewidth=0.2, label='all selection of 25 UEs')

    plt.grid(True)      #显示网格

    plt.xlabel('The Bandwidth of Channel')
    plt.ylabel('Sum Cost')
    plt.legend(loc='upper right')    #图例右上角
    plt.show()

    data = DTE("./picture/pic3/all")   ##  TLIU
    print(cost_of_all)
    data.write(cost_of_all)

    data = DTE("./picture/pic3/mec")   ##  TLIU
    print(cost_of_mec)
    data.write(cost_of_mec)

    data = DTE("./picture/pic3/local")   ##  TLIU
    print(cost_of_local)
    data.write(cost_of_local)

    data = DTE("./picture/pic3/all_15_UEs")   ##  TLIU
    print(cost_of_all_15ge)
    data.write(cost_of_all_15ge)
Ejemplo n.º 2
0
                                         actions_set[iteration_actions[i]][0],
                                         rff[i])
            queue_relay_array[i].updateQx()
            queue_relay_array[i].updateQy()
            queue_relay_array[i].updateQz()

        # reward step
        reward_history.append(sum(reward))
        for i in range(user_num):
            wolf_agent_array[i].observe(reward=reward[i])

    for i in range(user_num):
        print(wolf_agent_array[i].pi_average)

    # plt.plot(np.arange(len(reward_history)), reward_history, label="all")
    # plt.title('wolf_dl2-dh6')
    # # plt.show()

    # data = DTE("./picture/pic1/wolf_dl2_dh6")  ##  TLIU
    # print(OUTPUT)
    # data.write(OUTPUT)

    # plt.plot(np.arange(len(PR[1])), PR[1])
    # plt.title('PR[1]')
    # plt.show()

    for i in range(user_num):

        data = DTE("./picture/pic1/PR" + str(i))  ##  TLIU
        data.write(PR[i])
Ejemplo n.º 3
0
    plt.plot(usernumber, cost_of_local, '<-', linewidth=0.4, label='only local selection')
    plt.plot(usernumber, cost_of_mec, '>-', linewidth=0.4, label='only MEC selection')
    plt.plot(usernumber, cost_of_all_6mhz, '<-', linewidth=0.2, label='all selection of 6mhz')
    plt.plot(usernumber, cost_of_all_8mhz, '<-', linewidth=0.2, label='all selection of 8mhz')
    plt.plot(usernumber, cost_of_all_12mhz, '<-', linewidth=0.2, label='all selection of 12mhz')



    plt.grid(True)      #显示网格

    plt.xlabel('The number of UE')
    plt.ylabel('Sum Cost')
    plt.legend(loc='upper left')    #图例右上角
    plt.show()

    data = DTE("./picture/pic2/all")   ##  TLIU
    print(cost_of_all)
    data.write(cost_of_all)

    data = DTE("./picture/pic2/mec")   ##  TLIU
    print(cost_of_mec)
    data.write(cost_of_mec)

    data = DTE("./picture/pic2/local")   ##  TLIU
    print(cost_of_local)
    data.write(cost_of_local)

    data = DTE("./picture/pic2/all_6MHZ")   ##  TLIU
    print(cost_of_all_6mhz)
    data.write(cost_of_all_6mhz)
Ejemplo n.º 4
0
Archivo: new_run.py Proyecto: T610/MEC
        reward, bn, lumbda, rff = game.step(actions=iteration_actions)
        print("episode", episode, "reward", sum(reward))
        OUTPUT.append(sum(reward))

        for i in range(user_num):
            #wolf agent act
            # update_Queue_relay
            queue_relay_array[i].lumbda = lumbda[i]
            queue_relay_array[i].updateQ(bn[i],
                                         actions_set[iteration_actions[i]][0],
                                         rff[i])
            queue_relay_array[i].updateQx()
            queue_relay_array[i].updateQy()
            queue_relay_array[i].updateQz()

        # reward step
        reward_history.append(sum(reward))
        for i in range(user_num):
            wolf_agent_array[i].observe(reward=reward[i])

    for i in range(user_num):
        print(wolf_agent_array[i].pi_average)

    plt.plot(np.arange(len(reward_history)), reward_history, label="all")
    plt.show()

    data = DTE("./picture/pic2/wolf")  ##  TLIU
    print(OUTPUT)
    data.write(OUTPUT)
Ejemplo n.º 5
0
    # data = DTE("./picture/pic1/wolf_dl2_dh6")  ##  TLIU
    # print(OUTPUT)
    # data.write(OUTPUT)


if __name__ == '__main__':
    dl = [0.0005, 0.001, 0.0015, 0.002, 0.0025, 0.003, 0.0035, 0.004]
    dh = [0.004, 0.006, 0.008, 0.01, 0.012, 0.14, 0.016]

    reward_dif_wolf = []
    # reward_dif_wolf.append(difwolfphc.wolf_cal_reward(DL=0.001, DH=0.006))

    # print('reward_differ_wolf',reward_dif_wolf)
    # for i in range(len(dl)):
    #     for j in range(len(dh)):
    i = 6
    j = 5
    if 1 == 1:
        if 1 == 1:
            # for j in range(len(dh)):
            difwolfphc = differ_DL_DH()
            print(
                "==================================i,j                                 ",
                i, j)
            reward_dif_wolf.append(
                difwolfphc.wolf_cal_reward(DL=dl[i], DH=dh[j]))

    data = DTE("./picture/pic1/differ_wolf_dl_dh")  ##  TLIU
    print('reward_differ_wolf', reward_dif_wolf)
    data.write(reward_dif_wolf)
Ejemplo n.º 6
0
        OUTPUT.append(sum(reward))

        for i in range(user_num):
            #wolf agent act
            # update_Queue_relay
            queue_relay_array[i].lumbda = lumbda[i]
            queue_relay_array[i].updateQ(bn[i], actions_set[iteration_actions[i]][0], rff[i])
            queue_relay_array[i].updateQx()
            queue_relay_array[i].updateQy()
            queue_relay_array[i].updateQz()
                
        # reward step
        reward_history.append(sum(reward))
        for i in range(user_num):
            wolf_agent_array[i].observe(reward=reward[i])

    for i in range(user_num):
        print(wolf_agent_array[i].pi_average)
    plt.plot(np.arange(len(reward_history)), reward_history, label="only local")
    plt.show()
    #
    data = DTE("./picture/pic1/local-new")   ##  TLIU
    print(OUTPUT)
    data.write(OUTPUT)






Ejemplo n.º 7
0
        #print('Q value :'+str(Q_array)+str(Qx_array)+str(Qy_array)+str(Qz_array))

        reward, bn, lumbda, rff = game.step(actions=iteration_actions)
        print("episode", episode, "reward", sum(reward))
        OUTPUT.append(sum(reward))

        for i in range(user_num):
            #wolf agent act
            # update_Queue_relay
            queue_relay_array[i].lumbda = lumbda[i]
            queue_relay_array[i].updateQ(bn[i],
                                         actions_set[iteration_actions[i]][0],
                                         rff[i])
            queue_relay_array[i].updateQx()
            queue_relay_array[i].updateQy()
            queue_relay_array[i].updateQz()

        # reward step
        reward_history.append(sum(reward))
        for i in range(user_num):
            wolf_agent_array[i].observe(reward=reward[i])

    for i in range(user_num):
        print(wolf_agent_array[i].pi_average)
    plt.plot(np.arange(len(reward_history)), reward_history, label="only mec")
    plt.show()

    data = DTE("./picture/pic1/mec")  ##  TLIU
    print(OUTPUT)
    data.write(OUTPUT)
Ejemplo n.º 8
0
        reward, bn, lumbda, rff = game.step(actions=iteration_actions)
        print("episode", episode, "reward", sum(reward))
        OUTPUT.append(sum(reward))

        for i in range(user_num):
            # wolf agent act
            # update_Queue_relay
            queue_relay_array[i].lumbda = lumbda[i]
            queue_relay_array[i].updateQ(bn[i],
                                         actions_set[iteration_actions[i]][0],
                                         rff[i])
            queue_relay_array[i].updateQx()
            queue_relay_array[i].updateQy()
            queue_relay_array[i].updateQz()

        # reward step
        reward_history.append(sum(reward))
        for i in range(user_num):
            wolf_agent_array[i].observe(reward=reward[i])

    for i in range(user_num):
        print(wolf_agent_array[i].pi_average)

    plt.plot(np.arange(len(reward_history)), reward_history, label="all")
    plt.title('wolf_dl2-dh5')
    plt.show()

    data = DTE("./picture/pic1/wolf_dl2_dh5")  ##  TLIU
    print(OUTPUT)
    data.write(OUTPUT)