task_num = 3 # number of tasks per WD # Load data task_size = sio.loadmat('./data/MUMT_data_3x3')['task_size'] gain = sio.loadmat('./data/MUMT_data_3x3')['gain_min'] # generate the train and test data sample index # data are splitted as 80:20 # training data are randomly sampled with duplication if N > total data size split_idx = int(.8* len(task_size)) num_test = min(len(task_size) - split_idx, N - int(.8* N)) # training data size mem = MemoryDNN(net = [WD_num*task_num, 120, 80, WD_num*task_num], net_num=net_num, learning_rate = 0.01, training_interval=10, batch_size=128, memory_size=1024 ) start_time=time.time() gain_his = [] gain_his_ratio = [] knm_idx_his = [] m_li=[] env = MU.MUMT(3,3,rand_seed=1) for i in range(N): if i % (N//100) == 0: print("----------------------------------------------rate of progress:%0.2f"%(i/N)) if i < N - num_test:
M = 6 # number of frequency blocks Memory = 1024 # capacity of memory structure # Load data PathInput = "D:/DL_MEC/Data_4x6/Channels/" # "D:/DL_MEC/Data_4x6/Input/" PathMaxCompRate = "D:/DL_MEC/Data_4x6/MaxCompRate/MaxCompRate.csv" MaxCompRate = np.genfromtxt(PathMaxCompRate, delimiter=',') split_idx = int(.8 * len(os.listdir(PathInput))) num_test = min(len(os.listdir(PathInput)) - split_idx, n - int(.8 * n)) # training data size mem = MemoryDNN(net=[N * M, 120, 80, N * M], net_num=net_num, learning_rate=0.01, training_interval=10, batch_size=128, memory_size=1024) start_time = time.time() gain_his = [] gain_his_ratio = [] knm_idx_his = [] for i in range(n): if i % (n // 100) == 0: print( "----------------------------------------------rate of progress:%0.2f" % (i / n)) if i < n - num_test:
rate = sio.loadmat('./data/data_%d' % N)['output_obj'] # increase h to close to 1 for better training; it is a trick widely adopted in deep learning channel = channel * 1000000 # generate the train and test data sample index # data are splitted as 80:20 # training data are randomly sampled with duplication if n > total data size split_idx = int(.8 * len(channel)) num_test = min(len(channel) - split_idx, n - int(.8 * n)) # training data size mem = MemoryDNN(net=[N, 120, 80, N], learning_rate=0.01, training_interval=10, batch_size=128, memory_size=Memory) start_time = time.time() rate_his = [] rate_his_ratio = [] mode_his = [] k_idx_his = [] K_his = [] h = channel[0, :] # initilize the weights by setting case_id = 0. weight, rate = alternate_weights(0) print("WD weights at time frame %d:" % (0), weight)
)['output_obj'] # this rate is only used to plot figures; never used to train DROO. Generate by CD method in # optimization.py mode = sio.loadmat('./data/data_%d' % N)['output_mode'] # increase h to close to 1 for better training; it is a trick widely adopted in deep learning channel = channel * 1000000 # generate the train and test data sample index # data are split as 80:20 # training data are randomly sampled with duplication if n > total data size split_idx = int(.9 * len(channel)) # channel size: 30000*10 num_test = len(channel) - split_idx mem = MemoryDNN(net=[N, 60, 40, N], learning_rate=0.001, training_interval=training_interval, test_interval=test_interval, batch_size=batch_size, memory_size=memory_size, output_graph=False) start_time = time.time() print('\n' + '=' * 20 + 'Start training' + '=' * 20) print( 'User num:{}\nChannel num(Time frames):{:,}\nK:{}\nDecode mode:{}\nMemory size:{}\nDelta(K update interval):{}' .format(N, n, K, decoder_mode, memory_size, Delta)) rate_his = [] rate_his_ratio = [] mode_his = [] k_idx_his = [] K_his = [] test_rate_his = []