def Greedy_Scheduling(general_para, gains_diagonal, gains_nondiagonal, prop_weights): n_layouts, N = np.shape(gains_diagonal) assert np.shape(prop_weights) == (n_layouts, N) SNRS = gains_diagonal * general_para.tx_power / general_para.output_noise_power direct_rates = general_para.bandwidth * np.log2(1 + SNRS / general_para.SNR_gap) # layouts X N; O(N) computation complexity sorted_links_indices = np.argsort(prop_weights * direct_rates, axis=1) allocs = np.zeros([n_layouts, N]) previous_weighted_sum_rates = np.zeros([n_layouts]) for j in range(N - 1, -1, -1): # schedule the ith shortest links allocs[np.arange(n_layouts), sorted_links_indices[:, j]] = 1 rates = helper_functions.compute_rates(general_para, allocs, gains_diagonal, gains_nondiagonal) weighted_sum_rates = np.sum(rates * prop_weights, axis=1) # (number of layouts,) # schedule the ith shortest pair for samples that have sum rate improved allocs[np.arange(n_layouts), sorted_links_indices[:, j]] = (weighted_sum_rates > previous_weighted_sum_rates).astype(int) previous_weighted_sum_rates = np.maximum(weighted_sum_rates, previous_weighted_sum_rates) return allocs
def test(): model.eval() total_loss = 0 for data in test_loader: data = data.to(device) with torch.no_grad(): start = time.time() out = model(data) end = time.time() print('dnn time', end - start) loss = sr_loss(data, out, test_K) total_loss += loss.item() * data.num_graphs power = out[:, 2] power = torch.reshape(power, (-1, test_K)) Y = power.numpy() rates = helper_functions.compute_rates(test_config, Y, directLink_channel_losses, crossLink_channel_losses) sr = np.mean(np.sum(rates, axis=1)) print('actual_rates:', sr) return total_loss / test_layouts
N]).astype(float) allocs_all_methods["All Active"] = np.ones([n_layouts, N]).astype(float) print("<<<<<<<<<<<<<<<<EVALUATION>>>>>>>>>>>>>>>>>>>") print("[Percentages of Scheduled Links] ") for method_key in allocs_all_methods.keys(): print("[{}]: {}%; ".format( method_key, round(np.mean(allocs_all_methods[method_key]) * 100, 2)), end="") print("\n") for method_key in allocs_all_methods.keys(): all_links_rates = helper_functions.compute_rates( general_para, allocs_all_methods[method_key], directLink_channel_losses, crossLink_channel_losses) # n_layouts X N assert np.shape(all_links_rates) == (n_layouts, N) sum_rates = np.sum(all_links_rates, axis=-1) sum_rates_all_methods[method_key] = sum_rates print("[Sum-Rate Performance Ratios Averaged over all Layouts]") assert "FP" in allocs_all_methods.keys( ), "[evaluate.py] Didn't include FP in sum-rate computation" for method_key in allocs_all_methods.keys(): if (method_key == "FP"): continue ratios = sum_rates_all_methods[method_key] / sum_rates_all_methods[ "FP"] * 100 print("[{}]: avg {}% of FP;".format(method_key, round(np.mean(ratios), 2)), end="")
test_config = init_parameters() layouts, test_dists = wg.generate_layouts(test_config, test_layouts) test_path_losses = wg.compute_path_losses(test_config, test_dists) norm_train, norm_test = normalize_data(1 / train_dists, 1 / test_dists) directLink_channel_losses = helper_functions.get_directLink_channel_losses( test_path_losses) crossLink_channel_losses = helper_functions.get_crossLink_channel_losses( test_path_losses) Y1 = FP_optimize(test_config, test_path_losses, np.ones([test_layouts, test_K])) rates1 = helper_functions.compute_rates(test_config, Y1, directLink_channel_losses, crossLink_channel_losses) sr1 = np.mean(np.sum(rates1, axis=1)) Pini = np.random.rand(test_layouts, test_K, 1) Y2 = wf.batch_WMMSE2(Pini, np.ones([test_layouts, test_K]), np.sqrt(test_path_losses), 1, var) rates2 = helper_functions.compute_rates(test_config, Y2, directLink_channel_losses, crossLink_channel_losses) sr2 = np.mean(np.sum(rates2, axis=1)) print('FPLinQ:', sr1) print('WMMSE:', sr2) train_data_list = proc_data(train_path_losses, train_dists, norm_train,
# ================= error ======================== errSpace = FunctionSpace(refine(mesh), "DG", p + 2) e = ue - interpolate(uh, errSpace) l_err = sqrt(assemble(e**2 * dx)) print "l2 err = ", l_err conf_err.append(l_err) e_nc = ue - interpolate(uh_nc, errSpace) l_nc_err = sqrt(assemble(e_nc**2 * dx)) print "l2 non-conforming err = ", l_nc_err nconf_err.append(l_nc_err) h_vec.append(1.0 / N) r_c = help.compute_rates(h_vec, conf_err) r_nc = help.compute_rates(h_vec, nconf_err) print "h = ", h_vec print "conf_err = ", conf_err print "conf_rates = ", r_c print "nconf_err = ", nconf_err print "nconf_rates = ", r_nc # ================= plotting ============================== #plot(uh) #plot(uh_nc) #interactive()
allocs_all_methods = {} rates_all_methods = {} superSets_all_methods = {} # only for FP and neural network print("FP Log Utility Optimization...") allocs_all_timeSlots = [] rates_all_timeSlots = [] proportional_fairness_weights = np.ones([n_layouts, N]) for i in range(n_timeSlots): if ((i + 1) * 100 / n_timeSlots % 25 == 0): print("At {}/{} time slots...".format(i + 1, n_timeSlots)) allocs = benchmarks.FP(general_para, channel_losses, proportional_fairness_weights) rates = helper_functions.compute_rates(general_para, allocs, directLink_channel_losses, crossLink_channel_losses) allocs_all_timeSlots.append(allocs) rates_all_timeSlots.append(rates) proportional_fairness_weights = helper_functions.update_proportional_fairness_weights( proportional_fairness_weights, rates) allocs_all_timeSlots = np.transpose(np.array(allocs_all_timeSlots), (1, 0, 2)) rates_all_timeSlots = np.transpose(np.array(rates_all_timeSlots), (1, 0, 2)) assert np.shape(allocs_all_timeSlots) == np.shape(rates_all_timeSlots) == ( n_layouts, n_timeSlots, N) allocs_all_methods["FP"] = allocs_all_timeSlots rates_all_methods["FP"] = rates_all_timeSlots print("FP Not Knowing Fading Log Utility Optimization...")
errSpace = FunctionSpace(refine(mesh),"DG",p+2) e = ue - interpolate(uh,errSpace) l_err = sqrt(assemble(e**2*dx)) print "l2 err = ", l_err conf_err.append(l_err) e_nc = ue - interpolate(uh_nc,errSpace) l_nc_err = sqrt(assemble(e_nc**2*dx)) print "l2 non-conforming err = ", l_nc_err nconf_err.append(l_nc_err) h_vec.append(1.0/N) r_c = help.compute_rates(h_vec,conf_err) r_nc = help.compute_rates(h_vec,nconf_err) print "h = ", h_vec print "conf_err = ", conf_err print "conf_rates = ", r_c print "nconf_err = ", nconf_err print "nconf_rates = ", r_nc # ================= plotting ============================== #plot(uh) #plot(uh_nc) #interactive()
def logUtility_scheduling(general_para, layouts, gains_diagonal, gains_nondiagonal, n_timeSlots): N = general_para.n_links n_layouts = np.shape(layouts)[0] neural_net = Convolutional_Neural_Network_Model.Conv_Network( general_para, n_layouts) neural_net.build_network() neural_net_inputs = helper_functions.process_layouts_inputs( general_para, layouts) with neural_net.TFgraph.as_default(): saver = tf.train.Saver() with tf.Session() as sess: saver.restore(sess, neural_net.model_filename) print("Restored model from: {}!".format(neural_net.model_filename)) schedules_all_timeSlots = [] rates_all_timeSlots = [] supersets_all_timeSlots = [] proportional_fairness_weights = np.ones([n_layouts, N]) proportional_fairness_weights_binary = np.ones([n_layouts, N]) for i in range(1, n_timeSlots + 1): if ((i / n_timeSlots * 100) % 20 == 0): print("{}/{} time slots".format(i, n_timeSlots)) schedules = sess.run( neural_net.outputs_final, feed_dict={ neural_net.placeholders['tx_indices_hash']: neural_net_inputs['tx_indices_hash'], neural_net.placeholders['rx_indices_hash']: neural_net_inputs['rx_indices_hash'], neural_net.placeholders['tx_indices_extract']: neural_net_inputs['tx_indices_ext'], neural_net.placeholders['rx_indices_extract']: neural_net_inputs['rx_indices_ext'], neural_net.placeholders['pair_tx_convfilter_indices']: neural_net_inputs['pair_tx_convfilter_indices'], neural_net.placeholders['pair_rx_convfilter_indices']: neural_net_inputs['pair_rx_convfilter_indices'], neural_net.placeholders['subset_links']: proportional_fairness_weights_binary }) schedules = schedules * proportional_fairness_weights_binary # zero out links not to be scheduled supersets_all_timeSlots.append( proportional_fairness_weights_binary) schedules_all_timeSlots.append(schedules) rates = helper_functions.compute_rates(general_para, schedules, gains_diagonal, gains_nondiagonal) rates_all_timeSlots.append(rates) proportional_fairness_weights = helper_functions.update_proportional_fairness_weights( proportional_fairness_weights, rates) proportional_fairness_weights_binary = helper_functions.binarize_proportional_fairness_weights( general_para, proportional_fairness_weights) schedules_all_timeSlots = np.transpose(np.array(schedules_all_timeSlots), (1, 0, 2)) rates_all_timeSlots = np.transpose(np.array(rates_all_timeSlots), (1, 0, 2)) supersets_all_timeSlots = np.transpose(np.array(supersets_all_timeSlots), (1, 0, 2)) assert np.shape(schedules_all_timeSlots) == np.shape( rates_all_timeSlots) == np.shape(supersets_all_timeSlots) == ( n_layouts, n_timeSlots, N) return schedules_all_timeSlots, rates_all_timeSlots, supersets_all_timeSlots