def FP_prop_fair(general_para, gains, directlink_channel_losses,
                 crosslink_channel_losses):
    number_of_layouts, N = np.shape(directlink_channel_losses)
    allocs_alltime = []
    rates_alltime = []
    prop_weights = np.ones([number_of_layouts, N])
    for i in range(general_para.log_utility_time_slots):
        if ((i + 1) * 100 / general_para.log_utility_time_slots % 50 == 0):
            print("[FP Log Util] At {}/{} time slots...".format(
                i + 1, general_para.log_utility_time_slots))
        allocs = benchmarks.FP(general_para,
                               gains,
                               prop_weights,
                               scheduling_output=True)
        if np.any(np.sum(allocs, -1) == 0):
            # Having a layout producing all zero scheduling outputs after quantization
            # Activate the link with the highest weights
            layouts_got_stuck = np.where(np.sum(allocs, -1) == 0)[0]
            strongest_links_scheduling = (
                prop_weights[layouts_got_stuck] == np.max(
                    prop_weights[layouts_got_stuck], axis=1,
                    keepdims=True)).astype(int)
            allocs[layouts_got_stuck] = strongest_links_scheduling
        rates = utils.compute_rates(general_para, allocs,
                                    directlink_channel_losses,
                                    crosslink_channel_losses)
        allocs_alltime.append(allocs)
        rates_alltime.append(rates)
        prop_weights = proportional_update_weights(general_para, rates,
                                                   prop_weights)
    allocs_alltime = np.transpose(np.array(allocs_alltime), (1, 0, 2))
    rates_alltime = np.transpose(np.array(rates_alltime), (1, 0, 2))
    assert np.shape(allocs_alltime) == np.shape(rates_alltime) == (
        number_of_layouts, general_para.log_utility_time_slots, N)
    return allocs_alltime, rates_alltime
def Greedy_Scheduling_prop_fair(general_para, directlink_channel_losses,
                                crosslink_channel_losses):
    number_of_layouts, N = np.shape(directlink_channel_losses)
    allocs_alltime = []
    rates_alltime = []
    prop_weights = np.ones([number_of_layouts, N])
    for i in range(general_para.log_utility_time_slots):
        if ((i + 1) * 100 / general_para.log_utility_time_slots % 50 == 0):
            print("[Greedy Log Util] At {}/{} time slots...".format(
                i + 1, general_para.log_utility_time_slots))
        allocs = benchmarks.greedy_scheduling(general_para,
                                              directlink_channel_losses,
                                              crosslink_channel_losses,
                                              prop_weights)
        rates = utils.compute_rates(general_para, allocs,
                                    directlink_channel_losses,
                                    crosslink_channel_losses)
        allocs_alltime.append(allocs)
        rates_alltime.append(rates)
        prop_weights = proportional_update_weights(general_para, rates,
                                                   prop_weights)
    allocs_alltime = np.transpose(np.array(allocs_alltime), (1, 0, 2))
    rates_alltime = np.transpose(np.array(rates_alltime), (1, 0, 2))
    assert np.shape(allocs_alltime) == np.shape(rates_alltime) == (
        number_of_layouts, general_para.log_utility_time_slots, N)
    return allocs_alltime, rates_alltime
def all_active_prop_fair(general_para, directlink_channel_losses,
                         crosslink_channel_losses):
    number_of_layouts, N = np.shape(directlink_channel_losses)
    allocs = np.ones([number_of_layouts, N]).astype(float)
    rates = utils.compute_rates(general_para, allocs,
                                directlink_channel_losses,
                                crosslink_channel_losses)
    allocs_alltime = np.tile(np.expand_dims(allocs, axis=0),
                             (general_para.log_utility_time_slots, 1, 1))
    rates_alltime = np.tile(np.expand_dims(rates, axis=0),
                            (general_para.log_utility_time_slots, 1, 1))
    allocs_alltime = np.transpose(np.array(allocs_alltime), (1, 0, 2))
    rates_alltime = np.transpose(np.array(rates_alltime), (1, 0, 2))
    assert np.shape(allocs_alltime) == np.shape(rates_alltime) == (
        number_of_layouts, general_para.log_utility_time_slots, N)
    return allocs_alltime, rates_alltime
def random_scheduling_prop_fair(general_para, directlink_channel_losses,
                                crosslink_channel_losses):
    number_of_layouts, N = np.shape(directlink_channel_losses)
    allocs_alltime = np.random.randint(
        2, size=(general_para.log_utility_time_slots, number_of_layouts,
                 N)).astype(float)
    rates_alltime = []
    for i in range(general_para.log_utility_time_slots):
        rates_oneslot = utils.compute_rates(general_para, allocs_alltime[i],
                                            directlink_channel_losses,
                                            crosslink_channel_losses)
        rates_alltime.append(rates_oneslot)
    allocs_alltime = np.transpose(np.array(allocs_alltime), (1, 0, 2))
    rates_alltime = np.transpose(np.array(rates_alltime), (1, 0, 2))
    assert np.shape(allocs_alltime) == np.shape(rates_alltime) == (
        number_of_layouts, general_para.log_utility_time_slots, N)
    return allocs_alltime, rates_alltime
def vanilla_round_robin_prop_fair(general_para, directlink_channel_losses,
                                  crosslink_channel_losses):
    number_of_layouts, N = np.shape(directlink_channel_losses)
    allocs_alltime = []
    rates_alltime = []
    iterator = cycle(range(N))
    for i in range(general_para.log_utility_time_slots):
        allocs_oneslot = np.zeros([number_of_layouts, N])
        allocs_oneslot[:, next(iterator)] = 1
        rates_oneslot = utils.compute_rates(general_para, allocs_oneslot,
                                            directlink_channel_losses,
                                            crosslink_channel_losses)
        allocs_alltime.append(allocs_oneslot)
        rates_alltime.append(rates_oneslot)
    allocs_alltime = np.transpose(np.array(allocs_alltime), (1, 0, 2))
    rates_alltime = np.transpose(np.array(rates_alltime), (1, 0, 2))
    assert np.shape(allocs_alltime) == np.shape(rates_alltime) == (
        number_of_layouts, general_para.log_utility_time_slots, N)
    return allocs_alltime, rates_alltime
def Clustering_based_prop_fair(general_para, directlink_channel_losses,
                               crosslink_channel_losses, cluster_assignments,
                               clustering_method):
    print("{} Proportional Fairness...".format(clustering_method))
    number_of_layouts, N = np.shape(directlink_channel_losses)
    assert np.shape(cluster_assignments) == (number_of_layouts, N)
    n_clusters = (np.max(cluster_assignments, axis=1) + 1).astype(
        int)  # number of layouts
    allocs_alltime = []
    rates_alltime = []
    # create iterator
    iterators_all_layouts = []
    for layout_id in range(number_of_layouts):
        iterators_one_layout = []
        for cluster_id in range(n_clusters[layout_id]):
            iterators_one_layout.append(
                cycle(
                    np.where(cluster_assignments[layout_id] == cluster_id)[0]))
        iterators_all_layouts.append(iterators_one_layout)
    # Start sequential time slots scheduling
    for time_slot in range(general_para.log_utility_time_slots):
        allocs = np.zeros([number_of_layouts, N])
        for layout_id in range(number_of_layouts):
            for cluster_id in range(n_clusters[layout_id]):
                iterator_to_schedule = iterators_all_layouts[layout_id][
                    cluster_id]
                link_to_schedule = next(iterator_to_schedule)
                allocs[layout_id][link_to_schedule] = 1
        rates = utils.compute_rates(general_para, allocs,
                                    directlink_channel_losses,
                                    crosslink_channel_losses)
        allocs_alltime.append(allocs)
        rates_alltime.append(rates)
    allocs_alltime = np.transpose(np.array(allocs_alltime), (1, 0, 2))
    rates_alltime = np.transpose(np.array(rates_alltime), (1, 0, 2))
    assert np.shape(allocs_alltime) == np.shape(rates_alltime) == (
        number_of_layouts, general_para.log_utility_time_slots, N)
    return allocs_alltime, rates_alltime
def greedy_scheduling(general_para, gains_diagonal, gains_nondiagonal,
                      prop_weights):
    number_of_layouts, N = np.shape(gains_diagonal)
    assert np.shape(prop_weights) == (number_of_layouts, N)
    SNRS = gains_diagonal * general_para.tx_power / general_para.output_noise_power
    direct_rates = general_para.bandwidth * np.log2(
        1 + SNRS /
        general_para.SNR_gap)  # layouts X N; O(N) computation complexity
    sorted_links_indices = np.argsort(prop_weights * direct_rates, axis=1)
    allocs = np.zeros([number_of_layouts, N])
    previous_weighted_sum_rates = np.zeros([number_of_layouts])
    for j in range(N - 1, -1, -1):
        # schedule the ith shortest links
        allocs[np.arange(number_of_layouts), sorted_links_indices[:, j]] = 1
        rates = utils.compute_rates(general_para, allocs, gains_diagonal,
                                    gains_nondiagonal)
        weighted_sum_rates = np.sum(rates * prop_weights,
                                    axis=1)  # (number of layouts,)
        # schedule the ith shortest pair for samples that have sum rate improved
        allocs[np.arange(number_of_layouts), sorted_links_indices[:, j]] = (
            weighted_sum_rates > previous_weighted_sum_rates).astype(int)
        previous_weighted_sum_rates = np.maximum(weighted_sum_rates,
                                                 previous_weighted_sum_rates)
    return allocs
Esempio n. 8
0
def network_inference_weighted(general_para, gains_diagonal,
                               gains_nondiagonal):
    steps_amount_test = 20
    N_test, layouts_amount, slots_per_layout = general_para.pairs_amount, general_para.test_data_info[
        "layouts"], general_para.test_data_info["slots_per_layout"]
    global N
    N = N_test
    print(
        "[ConvNetSumRateV10 network inference weighted] Starting with N={}; {} Layouts; {} Time slots......"
        .format(N, layouts_amount, slots_per_layout))
    general_para.amount_per_batch = layouts_amount  # for weighted sumrate case, should be small enough amount of layouts
    global amount_per_batch
    amount_per_batch = general_para.amount_per_batch
    # load test data
    raw_data = utils.load_raw_data(general_para, model_para, ['test'])
    test_data = utils.prepare_batches(general_para, raw_data['test'])
    test_data = utils.add_appended_indices(general_para, test_data)
    batches_amount = np.shape(test_data['locations'])[0]
    print("Test batch amount: {}; with {} samples per batch".format(
        batches_amount, general_para.amount_per_batch))

    # create the network graph
    g_test, outputs_final, all_timesteps_allocs, placeholders = test_network(
        steps_amount=steps_amount_test)

    model_loc = general_para.base_dir + model_para.model_loc
    with g_test.as_default():
        saver = tf.train.Saver()
        with tf.Session() as sess:
            print("Restoring previously trained model from: {}".format(
                model_loc))
            saver.restore(sess, model_loc)
            total_time = 0
            allocs = []
            rates = []
            subsets = []
            orig_prop_weights = []
            weights_orig = np.ones([layouts_amount, N])
            weights_binary = np.ones([layouts_amount, N])
            for i in range(1, slots_per_layout + 1):
                if ((i / slots_per_layout * 100) % 20 == 0):
                    print("{}/{} time slots".format(i, slots_per_layout))
                start_time = time.time()
                allocs_oneslot = sess.run(
                    outputs_final,
                    feed_dict={
                        placeholders['tx_indices_hash']:
                        test_data['tx_indices_hash'][0],
                        placeholders['rx_indices_hash']:
                        test_data['rx_indices_hash'][0],
                        placeholders['tx_indices_extract']:
                        test_data['tx_indices_ext'][0],
                        placeholders['rx_indices_extract']:
                        test_data['rx_indices_ext'][0],
                        placeholders['pair_tx_convfilter_indices']:
                        test_data['pair_tx_convfilter_indices'][0],
                        placeholders['pair_rx_convfilter_indices']:
                        test_data['pair_rx_convfilter_indices'][0],
                        placeholders['subset_links']:
                        weights_binary
                    })
                total_time += time.time() - start_time
                allocs_oneslot = allocs_oneslot * weights_binary  # zero out links not to be scheduled
                orig_prop_weights.append(weights_orig)
                subsets.append(weights_binary)
                allocs.append(allocs_oneslot)
                rates_oneslot = utils.compute_rates(general_para,
                                                    allocs_oneslot,
                                                    gains_diagonal,
                                                    gains_nondiagonal)
                rates.append(rates_oneslot)
                weights_orig = utils.proportional_update_weights(
                    general_para, weights_orig, rates_oneslot)
                start_time = time.time()
                weights_binary = utils.binary_importance_weights_approx(
                    general_para, weights_orig)
                total_time += time.time() - start_time
    print("{} layouts with {} links over {} timeslots, it took {} seconds.".
          format(layouts_amount, N, slots_per_layout, total_time))
    allocs = np.transpose(np.array(allocs), (1, 0, 2))
    assert np.shape(allocs) == (layouts_amount, slots_per_layout,
                                N), "Wrong shape: {}".format(np.shape(allocs))
    rates = np.transpose(np.array(rates), (1, 0, 2))
    assert np.shape(rates) == (layouts_amount, slots_per_layout,
                               N), "Wrong shape: {}".format(np.shape(rates))
    subsets = np.transpose(np.array(subsets), (1, 0, 2))
    assert np.shape(subsets) == (layouts_amount, slots_per_layout,
                                 N), "Wrong shape: {}".format(
                                     np.shape(subsets))
    orig_prop_weights = np.transpose(np.array(orig_prop_weights), (1, 0, 2))
    assert np.shape(orig_prop_weights) == (layouts_amount, slots_per_layout,
                                           N), "Wrong shape: {}".format(
                                               np.shape(orig_prop_weights))
    np.save(
        general_para.base_dir + "SanityChecks/Weighted_SumRate_Opt/Conv_V10/" +
        "allocs.npy", allocs)
    np.save(
        general_para.base_dir + "SanityChecks/Weighted_SumRate_Opt/Conv_V10/" +
        "subsets.npy", subsets)
    np.save(
        general_para.base_dir + "SanityChecks/Weighted_SumRate_Opt/Conv_V10/" +
        "prop_weights.npy", orig_prop_weights)
    return allocs, rates, subsets
Esempio n. 9
0
        all_allocs["Random Scheduling"] = np.random.randint(2, size=[n_layouts, N]).astype(float) # Return float type
        all_allocs["Strongest Link"] = benchmarks.Strongest_Link(general_para, directlink_channel_losses)


        # EVALUATION AND COMPARISON
        all_allocs_means = {}
        for clustering_method in all_allocs.keys():
            assert np.shape(all_allocs[clustering_method]) == (n_layouts, N) # checking dimension validity
            all_allocs_means[clustering_method] = np.mean(all_allocs[clustering_method],axis=1)
        compute_avg_ratio(all_allocs_means, "Scheduling Mean")

        # Evaluate Stage I: single time slot
        links_rates_I = {}
        evaluate_results_I = {}
        for clustering_method in all_allocs.keys():
            links_rates_I[clustering_method] = utils.compute_rates(general_para, all_allocs[clustering_method], directlink_channel_losses, crosslink_channel_losses)
            assert np.shape(links_rates_I[clustering_method]) == (n_layouts, N)
            evaluate_results_I[clustering_method] = np.sum(links_rates_I[clustering_method], axis=1)
        compute_avg_ratio(evaluate_results_I, "Sum Rate Single Timeslot")

        # visualize clustering based methods allocations for worst and best layouts, 3 each
        if (debug_visualize):
            for clustering_method in all_clustering_methods:
                fig, axs = plt.subplots(nrows=2, ncols=3)
                fig.suptitle("{} clustering and scheduling for Sum-Rate".format(clustering_method))
                layout_indices_ranked = np.argsort(evaluate_results_I[clustering_method])
                rank_titles = {0: "Worst", 1: "2nd Worst", 2: "3rd Worst", -1: "Best", -2: "2nd Best", -3: "3rd Best"}
                for i, rank_tuple in enumerate(rank_titles.items()):
                    layout_index = layout_indices_ranked[rank_tuple[0]]
                    layout = layouts[layout_index]
                    clusters = all_clusters[clustering_method][layout_index]