Esempio n. 1
0
def main():

    # Create the data file
    local_shell = exputil.LocalShell()
    local_shell.remove_force_recursive("data")
    local_shell.make_full_dir("data")
    local_shell.remove_force_recursive("pdf")
    local_shell.make_full_dir("pdf")

    # Plot all the pair path utilization
    for traffic_mode in ["specific", "general"]:
        for movement in ["static", "moving"]:

            # Pair path max utilization
            plot_pair_path_max_utilization(
                "../../satgenpy_analysis/data/"
                "kuiper_630_isls_plus_grid_ground_stations_top_100_algorithm_free_one_only_over_isls/100ms_for_200s"
                "/manual/data",
                "run_%s_tm_pairing_kuiper_isls_%s" % (traffic_mode, movement),
                1174, 1229, movement == "static")

            # Perform simple flow plot for debugging purposes
            run_name = "run_%s_tm_pairing_kuiper_isls_%s" % (traffic_mode,
                                                             movement)
            local_shell.make_full_dir("pdf/" + run_name)
            local_shell.make_full_dir("data/" + run_name)
            local_shell.perfect_exec(
                "cd ../../../ns3-sat-sim/simulator/contrib/basic-sim/tools/plotting/plot_tcp_flow; "
                "python plot_tcp_flow.py "
                "../../../../../../../paper/ns3_experiments/traffic_matrix/runs/"
                + run_name + "/logs_ns3 "
                "../../../../../../../paper/ns3_experiments/traffic_matrix/data/"
                + run_name + " "
                "../../../../../../../paper/ns3_experiments/traffic_matrix/pdf/"
                + run_name + " " + "0 " +
                str(1 * 1000 * 1000 * 1000
                    ),  # Flow id = 0, 1 * 1000 * 1000 * 1000 ns = 1s interval
                output_redirect=exputil.OutputRedirect.CONSOLE)

            local_shell.perfect_exec(
                "cd ../../../ns3-sat-sim/simulator/contrib/basic-sim/tools/plotting/plot_tcp_flow; "
                "python plot_tcp_flow.py "
                "../../../../../../../paper/ns3_experiments/traffic_matrix/runs/"
                + run_name + "/logs_ns3 "
                "../../../../../../../paper/ns3_experiments/traffic_matrix/data/"
                + run_name + " "
                "../../../../../../../paper/ns3_experiments/traffic_matrix/pdf/"
                + run_name + " " + "35 " +
                str(1 * 1000 * 1000 * 1000
                    ),  # Flow id = 35, 1 * 1000 * 1000 * 1000 ns = 1s interval
                output_redirect=exputil.OutputRedirect.CONSOLE)
Esempio n. 2
0
def print_routes_and_rtt(base_output_dir, satellite_network_dir, dynamic_state_update_interval_ms,
                         simulation_end_time_s, src, dst, satgenpy_dir_with_ending_slash):

    # Local shell
    local_shell = exputil.LocalShell()

    # Dynamic state dir can be inferred
    satellite_network_dynamic_state_dir = "%s/dynamic_state_%dms_for_%ds" % (
        satellite_network_dir, dynamic_state_update_interval_ms, simulation_end_time_s
    )

    # Default output dir assumes it is done manual
    pdf_dir = base_output_dir + "/pdf"
    data_dir = base_output_dir + "/data"
    local_shell.make_full_dir(pdf_dir)
    local_shell.make_full_dir(data_dir)

    # Variables (load in for each thread such that they don't interfere)
    ground_stations = read_ground_stations_extended(satellite_network_dir + "/ground_stations.txt")
    tles = read_tles(satellite_network_dir + "/tles.txt")
    list_isls = read_isls(satellite_network_dir + "/isls.txt")
    satellites = tles["satellites"]
    epoch = tles["epoch"]
    description = exputil.PropertiesConfig(satellite_network_dir + "/description.txt")

    # Derivatives
    simulation_end_time_ns = simulation_end_time_s * 1000 * 1000 * 1000
    dynamic_state_update_interval_ns = dynamic_state_update_interval_ms * 1000 * 1000
    max_gsl_length_m = exputil.parse_positive_float(description.get_property_or_fail("max_gsl_length_m"))
    max_isl_length_m = exputil.parse_positive_float(description.get_property_or_fail("max_isl_length_m"))

    # Write data file

    data_path_filename = data_dir + "/networkx_path_" + str(src) + "_to_" + str(dst) + ".txt"
    with open(data_path_filename, "w+") as data_path_file:

        # For each time moment
        fstate = {}
        current_path = []
        rtt_ns_list = []
        for t in range(0, simulation_end_time_ns, dynamic_state_update_interval_ns):

            with open(satellite_network_dynamic_state_dir + "/fstate_" + str(t) + ".txt", "r") as f_in:
                for line in f_in:
                    spl = line.split(",")
                    current = int(spl[0])
                    destination = int(spl[1])
                    next_hop = int(spl[2])
                    fstate[(current, destination)] = next_hop

                # Calculate path length
                path_there = get_path(src, dst, fstate)
                path_back = get_path(dst, src, fstate)
                if path_there is not None and path_back is not None:
                    length_src_to_dst_m = compute_path_length_without_graph(path_there, epoch, t, satellites,
                                                                            ground_stations, list_isls,
                                                                            max_gsl_length_m, max_isl_length_m)
                    length_dst_to_src_m = compute_path_length_without_graph(path_back, epoch, t,
                                                                            satellites, ground_stations, list_isls,
                                                                            max_gsl_length_m, max_isl_length_m)
                    rtt_ns = (length_src_to_dst_m + length_dst_to_src_m) * 1000000000.0 / 299792458.0
                else:
                    length_src_to_dst_m = 0.0
                    length_dst_to_src_m = 0.0
                    rtt_ns = 0.0

                # Add to RTT list
                rtt_ns_list.append((t, rtt_ns))

                # Only if there is a new path, print new path
                new_path = get_path(src, dst, fstate)
                if current_path != new_path:

                    # This is the new path
                    current_path = new_path

                    # Write change nicely to the console
                    print("Change at t=" + str(t) + " ns (= " + str(t / 1e9) + " seconds)")
                    print("  > Path..... " + (" -- ".join(list(map(lambda x: str(x), current_path)))
                                              if current_path is not None else "Unreachable"))
                    print("  > Length... " + str(length_src_to_dst_m + length_dst_to_src_m) + " m")
                    print("  > RTT...... %.2f ms" % (rtt_ns / 1e6))
                    print("")

                    # Write to path file
                    data_path_file.write(str(t) + "," + ("-".join(list(map(lambda x: str(x), current_path)))
                                                         if current_path is not None else "Unreachable") + "\n")

        # Write data file
        data_filename = data_dir + "/networkx_rtt_" + str(src) + "_to_" + str(dst) + ".txt"
        with open(data_filename, "w+") as data_file:
            for i in range(len(rtt_ns_list)):
                data_file.write("%d,%.10f\n" % (rtt_ns_list[i][0], rtt_ns_list[i][1]))

        # Make plot
        pdf_filename = pdf_dir + "/time_vs_networkx_rtt_" + str(src) + "_to_" + str(dst) + ".pdf"
        tf = tempfile.NamedTemporaryFile(delete=False)
        tf.close()
        local_shell.copy_file(satgenpy_dir_with_ending_slash + "plot/plot_time_vs_networkx_rtt.plt", tf.name)
        local_shell.sed_replace_in_file_plain(tf.name, "[OUTPUT-FILE]", pdf_filename)
        local_shell.sed_replace_in_file_plain(tf.name, "[DATA-FILE]", data_filename)
        local_shell.perfect_exec("gnuplot " + tf.name)
        print("Produced plot: " + pdf_filename)
        local_shell.remove(tf.name)
    def test_end_to_end(self):
        local_shell = exputil.LocalShell()

        # Clean slate start
        local_shell.remove_force_recursive("temp_gen_data")
        local_shell.make_full_dir("temp_gen_data")

        # Both dynamic state algorithms should yield the same path and RTT
        for dynamic_state_algorithm in [
            "algorithm_free_one_only_over_isls",
            "algorithm_free_gs_one_sat_many_only_over_isls"
        ]:

            # Specific outcomes
            output_generated_data_dir = "temp_gen_data"
            num_threads = 1
            default_time_step_ms = 100
            all_time_step_ms = [50, 100, 1000, 10000]
            duration_s = 200

            # Add base name to setting
            name = "reduced_kuiper_630_" + dynamic_state_algorithm

            # Path trace we base this test on:
            # 0,1173-184-183-217-1241
            # 18000000000,1173-218-217-1241
            # 27600000000,1173-648-649-650-616-1241
            # 74300000000,1173-218-217-216-250-1241
            # 125900000000,1173-647-648-649-650-616-1241
            # 128700000000,1173-647-648-649-615-1241

            # Create output directories
            if not os.path.isdir(output_generated_data_dir):
                os.makedirs(output_generated_data_dir)
            if not os.path.isdir(output_generated_data_dir + "/" + name):
                os.makedirs(output_generated_data_dir + "/" + name)

            # Ground stations
            print("Generating ground stations...")
            with open(output_generated_data_dir + "/" + name + "/ground_stations.basic.txt", "w+") as f_out:
                f_out.write("0,Manila,14.6042,120.9822,0\n")  # Originally no. 17
                f_out.write("1,Dalian,38.913811,121.602322,0\n")  # Originally no. 85
            satgen.extend_ground_stations(
                output_generated_data_dir + "/" + name + "/ground_stations.basic.txt",
                output_generated_data_dir + "/" + name + "/ground_stations.txt"
            )

            # TLEs (taken from Kuiper-610 first shell)
            print("Generating TLEs...")
            with open(output_generated_data_dir + "/" + name + "/tles.txt", "w+") as f_out:
                f_out.write("1 12\n")  # Pretend it's one orbit with 12 satellites
                f_out.write("Kuiper-630 0\n")  # 183
                f_out.write("1 00184U 00000ABC 00001.00000000  .00000000  00000-0  00000+0 0    06\n")
                f_out.write("2 00184  51.9000  52.9412 0000001   0.0000 142.9412 14.80000000    00\n")
                f_out.write("Kuiper-630 1\n")  # 184
                f_out.write("1 00185U 00000ABC 00001.00000000  .00000000  00000-0  00000+0 0    07\n")
                f_out.write("2 00185  51.9000  52.9412 0000001   0.0000 153.5294 14.80000000    07\n")
                f_out.write("Kuiper-630 2\n")  # 216
                f_out.write("1 00217U 00000ABC 00001.00000000  .00000000  00000-0  00000+0 0    03\n")
                f_out.write("2 00217  51.9000  63.5294 0000001   0.0000 127.0588 14.80000000    01\n")
                f_out.write("Kuiper-630 3\n")  # 217
                f_out.write("1 00218U 00000ABC 00001.00000000  .00000000  00000-0  00000+0 0    04\n")
                f_out.write("2 00218  51.9000  63.5294 0000001   0.0000 137.6471 14.80000000    00\n")
                f_out.write("Kuiper-630 4\n")  # 218
                f_out.write("1 00219U 00000ABC 00001.00000000  .00000000  00000-0  00000+0 0    05\n")
                f_out.write("2 00219  51.9000  63.5294 0000001   0.0000 148.2353 14.80000000    08\n")
                f_out.write("Kuiper-630 5\n")  # 250
                f_out.write("1 00251U 00000ABC 00001.00000000  .00000000  00000-0  00000+0 0    01\n")
                f_out.write("2 00251  51.9000  74.1176 0000001   0.0000 132.3529 14.80000000    00\n")
                f_out.write("Kuiper-630 6\n")  # 615
                f_out.write("1 00616U 00000ABC 00001.00000000  .00000000  00000-0  00000+0 0    06\n")
                f_out.write("2 00616  51.9000 190.5882 0000001   0.0000  31.7647 14.80000000    05\n")
                f_out.write("Kuiper-630 7\n")  # 616
                f_out.write("1 00617U 00000ABC 00001.00000000  .00000000  00000-0  00000+0 0    07\n")
                f_out.write("2 00617  51.9000 190.5882 0000001   0.0000  42.3529 14.80000000    03\n")
                f_out.write("Kuiper-630 8\n")  # 647
                f_out.write("1 00648U 00000ABC 00001.00000000  .00000000  00000-0  00000+0 0    01\n")
                f_out.write("2 00648  51.9000 201.1765 0000001   0.0000  15.8824 14.80000000    09\n")
                f_out.write("Kuiper-630 9\n")  # 648
                f_out.write("1 00649U 00000ABC 00001.00000000  .00000000  00000-0  00000+0 0    02\n")
                f_out.write("2 00649  51.9000 201.1765 0000001   0.0000  26.4706 14.80000000    07\n")
                f_out.write("Kuiper-630 10\n")  # 649
                f_out.write("1 00650U 00000ABC 00001.00000000  .00000000  00000-0  00000+0 0    04\n")
                f_out.write("2 00650  51.9000 201.1765 0000001   0.0000  37.0588 14.80000000    05\n")
                f_out.write("Kuiper-630 11\n")  # 650
                f_out.write("1 00651U 00000ABC 00001.00000000  .00000000  00000-0  00000+0 0    05\n")
                f_out.write("2 00651  51.9000 201.1765 0000001   0.0000  47.6471 14.80000000    04\n")

            # Nodes
            #
            # Original ID   Test ID
            # 183           0
            # 184           1
            # 216           2
            # 217           3
            # 218           4
            # 250           5
            # 615           6
            # 616           7
            # 647           8
            # 648           9
            # 649           10
            # 650           11
            #
            # ISLs
            #
            # Original      Test
            # 183-184       0-1
            # 183-217       0-3
            # 216-217       2-3
            # 216-250       2-5
            # 217-218       3-4
            # 615-649       6-10
            # 616-650       7-11
            # 647-648       8-9
            # 648-649       9-10
            # 649-650       10-11
            #
            # Necessary ISLs (above) inferred from trace:
            #
            # 0,1173-184-183-217-1241
            # 18000000000,1173-218-217-1241
            # 27600000000,1173-648-649-650-616-1241
            # 74300000000,1173-218-217-216-250-1241
            # 125900000000,1173-647-648-649-650-616-1241
            # 128700000000,1173-647-648-649-615-1241
            #
            print("Generating ISLs...")
            with open(output_generated_data_dir + "/" + name + "/isls.txt", "w+") as f_out:
                f_out.write("0 1\n")
                f_out.write("0 3\n")
                f_out.write("2 3\n")
                f_out.write("2 5\n")
                f_out.write("3 4\n")
                f_out.write("6 10\n")
                f_out.write("7 11\n")
                f_out.write("8 9\n")
                f_out.write("9 10\n")
                f_out.write("10 11\n")

            # Description
            print("Generating description...")
            satgen.generate_description(
                output_generated_data_dir + "/" + name + "/description.txt",
                MAX_GSL_LENGTH_M,
                MAX_ISL_LENGTH_M
            )

            # Extended ground stations
            ground_stations = satgen.read_ground_stations_extended(
                output_generated_data_dir + "/" + name + "/ground_stations.txt"
            )

            # GSL interfaces
            if dynamic_state_algorithm == "algorithm_free_one_only_over_isls":
                gsl_interfaces_per_satellite = 1
                gsl_satellite_max_agg_bandwidth = 1.0
            elif dynamic_state_algorithm == "algorithm_free_gs_one_sat_many_only_over_isls":
                gsl_interfaces_per_satellite = len(ground_stations)
                gsl_satellite_max_agg_bandwidth = len(ground_stations)
            else:
                raise ValueError("Unknown dynamic state algorithm: " + dynamic_state_algorithm)
            print("Generating GSL interfaces info..")
            satgen.generate_simple_gsl_interfaces_info(
                output_generated_data_dir + "/" + name + "/gsl_interfaces_info.txt",
                12,  # 12 satellites
                len(ground_stations),
                gsl_interfaces_per_satellite,  # GSL interfaces per satellite
                1,  # (GSL) Interfaces per ground station
                gsl_satellite_max_agg_bandwidth,  # Aggregate max. bandwidth satellite (unit unspecified)
                1   # Aggregate max. bandwidth ground station (same unspecified unit)
            )

            # Forwarding state
            for time_step_ms in all_time_step_ms:
                print("Generating forwarding state...")
                satgen.help_dynamic_state(
                    output_generated_data_dir,
                    num_threads,
                    name,
                    time_step_ms,
                    duration_s,
                    MAX_GSL_LENGTH_M,
                    MAX_ISL_LENGTH_M,
                    dynamic_state_algorithm,
                    False
                )

            # Clean slate start
            local_shell.remove_force_recursive("temp_analysis_data")
            local_shell.make_full_dir("temp_analysis_data")
            output_analysis_data_dir = "temp_analysis_data"
            satgen.post_analysis.print_routes_and_rtt(
                output_analysis_data_dir + "/" + name,
                output_generated_data_dir + "/" + name,
                default_time_step_ms,
                duration_s,
                12,
                13,
                ""
            )

            # Now, we just want to see that the output path matches
            with open(output_analysis_data_dir + "/" + name + "/data/networkx_path_12_to_13.txt", "r") as f_in:
                i = 0
                for line in f_in:
                    line = line.strip()
                    if i == 0:
                        self.assertEqual(line, "0,12-1-0-3-13")
                    elif i == 1:
                        self.assertEqual(line, "18000000000,12-4-3-13")
                    elif i == 2:
                        self.assertEqual(line, "27600000000,12-9-10-11-7-13")
                    elif i == 3:
                        self.assertEqual(line, "74300000000,12-4-3-2-5-13")
                    elif i == 4:
                        self.assertEqual(line, "125900000000,12-8-9-10-11-7-13")
                    elif i == 5:
                        self.assertEqual(line, "128700000000,12-8-9-10-6-13")
                    else:
                        self.fail()
                    i += 1

            # ... and the RTT
            with open(output_analysis_data_dir + "/" + name + "/data/networkx_rtt_12_to_13.txt", "r") as f_in1:
                with open("tests/data_to_match/kuiper_630/networkx_rtt_1173_to_1241.txt", "r") as f_in2:
                    lines1 = []
                    for line in f_in1:
                        lines1.append(line.strip())
                    lines2 = []
                    for line in f_in2:
                        lines2.append(line.strip())

                    # Too computationally costly, so the below is equivalent: self.assertEqual(lines1, lines2)
                    self.assertEqual(len(lines1), len(lines2))
                    for i in range(len(lines1)):
                        a_spl = lines1[i].split(",")
                        b_spl = lines2[i].split(",")
                        self.assertEqual(len(a_spl), len(b_spl))
                        self.assertEqual(len(a_spl), 2)
                        a_time = int(a_spl[0])
                        b_time = int(b_spl[0])
                        a_rtt = float(a_spl[1])
                        b_rtt = float(b_spl[1])
                        self.assertEqual(a_time, b_time)
                        self.assertAlmostEqual(a_rtt, b_rtt, places=6)

            # Now let's run all analyses available

            # TODO: Disabled because it requires downloading files from CDNs, which can take too long
            # # Print graphically
            #
            # satgen.post_analysis.print_graphical_routes_and_rtt(
            #     output_analysis_data_dir + "/" + name,
            #     output_generated_data_dir + "/" + name,
            #     default_time_step_ms,
            #     duration_s,
            #     12,
            #     13
            # )

            # Analyze paths
            satgen.post_analysis.analyze_path(
                output_analysis_data_dir + "/" + name,
                output_generated_data_dir + "/" + name,
                default_time_step_ms,
                duration_s,
                ""
            )

            # Number of path changes per pair
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name +
                "/" + name + "/100ms_for_200s/path/data/ecdf_pairs_num_path_changes.txt",
                "float,pos_float"
            )
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                else:
                    self.assertEqual(columns[1][i], 1.0)

                # Only one pair with 5 path changes
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                else:
                    self.assertEqual(columns[0][i], 5)

            # Max minus min hop count per pair
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name +
                "/" + name + "/100ms_for_200s/path/data/ecdf_pairs_max_minus_min_hop_count.txt",
                "float,pos_float"
            )
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                else:
                    self.assertEqual(columns[1][i], 1.0)

                # Shortest is 3 hops, longest is 6 hops, max delta is 3
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                else:
                    self.assertEqual(columns[0][i], 3)

            # Max divided by min hop count per pair
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name +
                "/" + name + "/100ms_for_200s/path/data/ecdf_pairs_max_hop_count_to_min_hop_count.txt",
                "float,pos_float"
            )
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                else:
                    self.assertEqual(columns[1][i], 1.0)

                # Shortest is 3 hops, longest is 6 hops, max/min division is 2.0
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                else:
                    self.assertEqual(columns[0][i], 2.0)

            # For all pairs, the distribution how many times they changed path
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name +
                "/" + name + "/100ms_for_200s/path/data/ecdf_time_step_num_path_changes.txt",
                "float,pos_float"
            )
            start_cumulative = 0.0
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], start_cumulative)
                else:
                    self.assertGreater(columns[1][i], start_cumulative)
                if i - 1 == range(len(columns[0])):
                    self.assertEqual(columns[1][i], 1.0)

                # There are only 5 time moments, none of which overlap, so this needs to be 5 times 1
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                elif i > 2000 - 6:
                    self.assertEqual(columns[0][i], 1.0)
                else:
                    self.assertEqual(columns[0][i], 0)

            # Analyze RTTs
            satgen.post_analysis.analyze_rtt(
                output_analysis_data_dir + "/" + name,
                output_generated_data_dir + "/" + name,
                default_time_step_ms,
                duration_s,
                ""
            )

            # Min. RTT
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name +
                "/" + name + "/100ms_for_200s/rtt/data/ecdf_pairs_min_rtt_ns.txt",
                "float,pos_float"
            )
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                else:
                    self.assertEqual(columns[1][i], 1.0)

                # Only one pair with minimum RTT 25ish
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                else:
                    self.assertAlmostEqual(columns[0][i], 25229775.250687573, delta=100)

            # Max. RTT
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name +
                "/" + name + "/100ms_for_200s/rtt/data/ecdf_pairs_max_rtt_ns.txt",
                "float,pos_float"
            )
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                else:
                    self.assertEqual(columns[1][i], 1.0)

                # Only one pair with max. RTT 48ish
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                else:
                    self.assertAlmostEqual(columns[0][i], 48165140.010532916, delta=100)

            # Max. - Min. RTT
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name +
                "/" + name + "/100ms_for_200s/rtt/data/ecdf_pairs_max_minus_min_rtt_ns.txt",
                "float,pos_float"
            )
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                else:
                    self.assertEqual(columns[1][i], 1.0)

                # Only one pair with minimum RTT of 25ish, max. RTT is 48ish
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                else:
                    self.assertAlmostEqual(columns[0][i], 48165140.010532916 - 25229775.250687573, delta=100)

            # Max. / Min. RTT
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name +
                "/" + name + "/100ms_for_200s/rtt/data/ecdf_pairs_max_rtt_to_min_rtt_slowdown.txt",
                "float,pos_float"
            )
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                else:
                    self.assertEqual(columns[1][i], 1.0)

                # Only one pair with minimum RTT of 25ish, max. RTT is 48ish
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                else:
                    self.assertAlmostEqual(columns[0][i], 48165140.010532916 / 25229775.250687573, delta=0.01)

            # Geodesic slowdown
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name +
                "/" + name + "/100ms_for_200s/rtt/data/ecdf_pairs_max_rtt_to_geodesic_slowdown.txt",
                "float,pos_float"
            )
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                else:
                    self.assertEqual(columns[1][i], 1.0)

                # Distance Manila to Dalian is 2,703 km according to Google Maps, RTT = 2*D / c
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                else:
                    self.assertAlmostEqual(columns[0][i], 48165140.010532916 / (2 * 2703000 / 0.299792), delta=0.01)

            # Analyze time step paths
            satgen.post_analysis.analyze_time_step_path(
                output_analysis_data_dir + "/" + name,
                output_generated_data_dir + "/" + name,
                all_time_step_ms,
                duration_s
            )

            # Missed path changes
            for time_step_ms in all_time_step_ms:
                columns = exputil.read_csv_direct_in_columns(
                    output_analysis_data_dir + "/" + name +
                    "/" + name + "/200s/path/data/"
                    + "ecdf_pairs_" + str(time_step_ms) + "ms_missed_path_changes.txt",
                    "float,pos_float"
                )
                for i in range(len(columns[0])):

                    # Cumulative y-axis check
                    if i == 0:
                        self.assertEqual(columns[1][i], 0)
                    else:
                        self.assertEqual(columns[1][i], 1.0)

                    # Only one should have missed for the 10s one
                    if i == 0:
                        self.assertEqual(columns[0][i], float("-inf"))
                    else:
                        if time_step_ms == 10000:
                            self.assertEqual(columns[0][i], 1)
                        else:
                            self.assertEqual(columns[0][i], 0)

            # Time between path changes
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name +
                "/" + name + "/200s/path/data/"
                + "ecdf_overall_time_between_path_change.txt",
                "float,pos_float"
            )
            self.assertEqual(len(columns[0]), 5)  # Total 5 path changes, but only 4 of them are not from epoch
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                else:
                    if i == 1:
                        self.assertEqual(columns[1][i], 0.25)
                    elif i == 2:
                        self.assertEqual(columns[1][i], 0.5)
                    elif i == 3:
                        self.assertEqual(columns[1][i], 0.75)
                    elif i == 4:
                        self.assertEqual(columns[1][i], 1.0)

                # Gap values
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                else:
                    if i == 1:
                        self.assertEqual(columns[0][i], 2750000000)
                    elif i == 2:
                        self.assertEqual(columns[0][i], 9600000000)
                    elif i == 3:
                        self.assertEqual(columns[0][i], 46700000000)
                    elif i == 4:
                        self.assertEqual(columns[0][i], 51650000000)

            # Clean up
            local_shell.remove_force_recursive("temp_gen_data")
            local_shell.remove_force_recursive("temp_analysis_data")
Esempio n. 4
0
def plot_pair_path_max_utilization(path_networkx_data, run_name, src_node_id,
                                   dst_node_id, is_static):

    # Read in the paths (list of: (time, path as a node list))
    paths = []
    with open(
            path_networkx_data + "/networkx_path_%d_to_%d.txt" %
        (src_node_id, dst_node_id), "r") as f_path:
        for line in f_path:
            spl = line.split(",")
            if spl[1].strip() == "Unreachable":
                paths.append((int(spl[0]), []))
            else:
                paths.append(
                    (int(spl[0]), list(map(lambda x: int(x),
                                           spl[1].split("-")))))
            if is_static:
                break

    # Read in the utilization file
    link_to_utilization = {}
    with open("runs/" + run_name + "/logs_ns3/isl_utilization.csv",
              "r") as f_utilization_in:
        for line in f_utilization_in:
            spl = line.split(",")
            from_id = int(spl[0])
            to_id = int(spl[1])
            from_time_ns = int(spl[2])
            till_time_ns = int(spl[3])
            utilization = float(spl[4])
            if from_time_ns == 0:
                link_to_utilization[(from_id, to_id)] = []
            link_to_utilization[(from_id, to_id)].append(
                (from_time_ns, till_time_ns, utilization))

    # Create data and pdf filenames
    exputil.LocalShell().make_full_dir("data/" + run_name)
    exputil.LocalShell().make_full_dir("pdf/" + run_name)

    # The three variables we keep track of
    number_of_intervals_total = 0
    number_of_intervals_with_a_path = 0
    number_of_intervals_with_at_least_a_third_unused_bandwidth = 0
    all_intervals = []
    data_filename_at_100ms = "data/%s/pair_path_utilization_at_100ms_%d_to_%d.txt" % (
        run_name, src_node_id, dst_node_id)
    with open(data_filename_at_100ms, "w+") as f_out:
        path_idx = -1
        for t in range(0, 200 * 1000 * 1000 * 1000, 100 * 1000 * 1000):

            # If the next path takes over
            if path_idx != len(paths) - 1 and paths[path_idx + 1][0] == t:
                path_idx += 1

            # Fetch the utilization of all the ISL links on the path
            utilization_list = []
            for i in range(2, len(paths[path_idx][1]) - 1):
                pair = paths[path_idx][1][i - 1], paths[path_idx][1][i]
                for (from_time_ns, till_time_ns,
                     utilization) in link_to_utilization[pair]:
                    if from_time_ns <= t < till_time_ns:
                        utilization_list.append(utilization)

            # And finally write the result
            f_out.write(
                str(t) + "," +
                str(max(utilization_list) if len(utilization_list) > 0 else 0)
                + "\n")
            all_intervals.append(
                (t, max(utilization_list) if len(utilization_list) > 0 else 0))

            # If there was a path, find what the max utilization was,
            # and then count if the utilization was less than 2/3rds
            if len(utilization_list) > 0:
                if max(utilization_list) < 2.0 / 3.0:
                    number_of_intervals_with_at_least_a_third_unused_bandwidth += 1
                number_of_intervals_with_a_path += 1
            number_of_intervals_total += 1

    # Print the statistics
    data_filename_utilization_information = "data/%s/utilization_information_%d_to_%d.txt" % (
        run_name, src_node_id, dst_node_id)
    with open(data_filename_utilization_information, "w+") as f_out:
        s = "Total intervals.............................. %d" % number_of_intervals_total
        f_out.write(s + "\n")
        print(s)

        s = "Intervals with a path........................ %d" % number_of_intervals_with_a_path
        f_out.write(s + "\n")
        print(s)

        s = "Intervals (w/path) with utilization <= 2/3... %d" % (
            number_of_intervals_with_at_least_a_third_unused_bandwidth)
        f_out.write(s + "\n")
        print(s)

        s = "%.2f%% of the intervals have at least 33%% unused bandwidth" % (
            float(number_of_intervals_with_at_least_a_third_unused_bandwidth) /
            float(number_of_intervals_with_a_path) * 100.0)
        f_out.write(s + "\n")
        print(s)

    # Write the pair path utilization at 1 second granularity
    data_filename_at_1s = "data/%s/pair_path_utilization_at_1s_%d_to_%d.txt" % (
        run_name, src_node_id, dst_node_id)
    with open(data_filename_at_1s, "w+") as f_out_1s:
        second_utilization_sum = 0.0
        for i in range(1, len(all_intervals) + 1):
            second_utilization_sum += all_intervals[i - 1][1]
            if i % 10 == 0:
                f_out_1s.write("%d,%.20f\n" %
                               ((i / 10 - 1) * 1000 * 1000 * 1000,
                                second_utilization_sum / 10.0))
                second_utilization_sum = 0.0

    # Perform the final plot
    pdf_filename = "pdf/%s/pair_available_bandwidth_%d_to_%d.pdf" % (
        run_name, src_node_id, dst_node_id)
    local_shell = exputil.LocalShell()
    if is_static:
        local_shell.copy_file(
            "plots/plot_pair_path_available_bandwidth_no_red_box.plt",
            "temp.plt")
    else:
        local_shell.copy_file("plots/plot_pair_path_available_bandwidth.plt",
                              "temp.plt")
    local_shell.sed_replace_in_file_plain("temp.plt", "[DATA-FILE]",
                                          data_filename_at_1s)
    local_shell.sed_replace_in_file_plain("temp.plt", "[OUTPUT-FILE]",
                                          pdf_filename)
    local_shell.perfect_exec("gnuplot temp.plt")
    local_shell.remove("temp.plt")
Esempio n. 5
0
def main():

    # Create the data file
    local_shell = exputil.LocalShell()
    local_shell.remove_force_recursive("data")
    local_shell.make_full_dir("data")
    local_shell.remove_force_recursive("pdf")
    local_shell.make_full_dir("pdf")

    # Rio de Janeiro to St. Petersburg with only ISLs on Kuiper
    local_shell.perfect_exec(
        "cd ../../../satgenpy; python -m satgen.post_analysis.main_print_routes_and_rtt "
        "../paper/ns3_experiments/two_compete/extra_satgenpy_analysis_data ../paper/satellite_networks_state/gen_data/"
        "kuiper_630_isls_plus_grid_ground_stations_top_100_algorithm_free_one_only_over_isls "
        "100 200 1174 1229 "
        "> ../paper/ns3_experiments/two_compete/extra_satgenpy_analysis_data/manual_kuiper_isls_1174_to_1229.log 2>&1"
    )

    # Fortaleza to Tehran with only ISLs on Kuiper
    local_shell.perfect_exec(
        "cd ../../../satgenpy; python -m satgen.post_analysis.main_print_routes_and_rtt "
        "../paper/ns3_experiments/two_compete/extra_satgenpy_analysis_data ../paper/satellite_networks_state/gen_data/"
        "kuiper_630_isls_plus_grid_ground_stations_top_100_algorithm_free_one_only_over_isls "
        "100 200 1254 1195 "
        "> ../paper/ns3_experiments/two_compete/extra_satgenpy_analysis_data/manual_kuiper_isls_1254_to_1195.log 2>&1"
    )

    # Plot all the pair path utilization
    for movement in ["static", "moving"]:

        # Pair path max utilization
        plot_pair_path_max_utilization(
            "extra_satgenpy_analysis_data/"
            "kuiper_630_isls_plus_grid_ground_stations_top_100_algorithm_free_one_only_over_isls/100ms_for_200s"
            "/manual/data", "run_two_kuiper_isls_%s" % movement, 1174, 1229,
            movement == "static")
        plot_pair_path_max_utilization(
            "extra_satgenpy_analysis_data/"
            "kuiper_630_isls_plus_grid_ground_stations_top_100_algorithm_free_one_only_over_isls/100ms_for_200s"
            "/manual/data", "run_two_kuiper_isls_%s" % movement, 1254, 1195,
            movement == "static")

        # Perform simple flow plot for debugging purposes
        run_name = "run_two_kuiper_isls_%s" % movement
        local_shell.make_full_dir("pdf/" + run_name)
        local_shell.make_full_dir("data/" + run_name)
        local_shell.perfect_exec(
            "cd ../../../ns3-sat-sim/simulator/contrib/basic-sim/tools/plotting/plot_tcp_flow; "
            "python plot_tcp_flow.py "
            "../../../../../../../paper/ns3_experiments/two_compete/runs/" +
            run_name + "/logs_ns3 "
            "../../../../../../../paper/ns3_experiments/two_compete/data/" +
            run_name + " "
            "../../../../../../../paper/ns3_experiments/two_compete/pdf/" +
            run_name + " " + "0 " +
            str(1 * 1000 * 1000 *
                1000),  # Flow id = 0, 1 * 1000 * 1000 * 1000 ns = 1s interval
            output_redirect=exputil.OutputRedirect.CONSOLE)
        local_shell.perfect_exec(
            "cd ../../../ns3-sat-sim/simulator/contrib/basic-sim/tools/plotting/plot_tcp_flow; "
            "python plot_tcp_flow.py "
            "../../../../../../../paper/ns3_experiments/two_compete/runs/" +
            run_name + "/logs_ns3 "
            "../../../../../../../paper/ns3_experiments/two_compete/data/" +
            run_name + " "
            "../../../../../../../paper/ns3_experiments/two_compete/pdf/" +
            run_name + " " + "1 " +
            str(1 * 1000 * 1000 *
                1000),  # Flow id = 1, 1 * 1000 * 1000 * 1000 ns = 1s interval
            output_redirect=exputil.OutputRedirect.CONSOLE)
    def test_end_to_end(self):
        local_shell = exputil.LocalShell()

        # Clean slate start
        local_shell.remove_force_recursive("temp_gen_data")
        local_shell.make_full_dir("temp_gen_data")

        # Both dynamic state algorithms should yield the same path and RTT
        for dynamic_state_algorithm in [
                "algorithm_free_one_only_over_isls",
                "algorithm_free_gs_one_sat_many_only_over_isls"
        ]:

            # Specific outcomes
            output_generated_data_dir = "temp_gen_data"
            num_threads = 1
            default_time_step_ms = 100
            all_time_step_ms = [50, 100, 1000, 10000, 20000]
            duration_s = 200

            # Add base name to setting
            name = "triangle_reduced_kuiper_630_" + dynamic_state_algorithm

            # Create output directories
            if not os.path.isdir(output_generated_data_dir):
                os.makedirs(output_generated_data_dir)
            if not os.path.isdir(output_generated_data_dir + "/" + name):
                os.makedirs(output_generated_data_dir + "/" + name)

            # Ground stations
            print("Generating ground stations...")
            with open(
                    output_generated_data_dir + "/" + name +
                    "/ground_stations.basic.txt", "w+") as f_out:
                f_out.write(
                    "0,Manila,14.6042,120.9822,0\n")  # Originally no. 17
                f_out.write(
                    "1,Dalian,38.913811,121.602322,0\n")  # Originally no. 85
                f_out.write(
                    "2,Sankt-Peterburg-(Saint-Petersburg),59.929858,30.326228,0\n"
                )  # Originally no. 73
            satgen.extend_ground_stations(
                output_generated_data_dir + "/" + name +
                "/ground_stations.basic.txt", output_generated_data_dir + "/" +
                name + "/ground_stations.txt")

            # Path trace we base this test on:

            # (1) 1173 -> 1241
            # 0,1173-184-183-217-1241
            # 18000000000,1173-218-217-1241
            # 27600000000,1173-648-649-650-616-1241
            # 74300000000,1173-218-217-216-250-1241
            # 125900000000,1173-647-648-649-650-616-1241
            # 128700000000,1173-647-648-649-615-1241

            # (2) 1229 -> 1241
            # 0,1229-144-178-212-246-280-281-282-283-1241
            # 3300000000,1229-177-178-212-246-280-281-282-283-1241
            # 10100000000,1229-177-178-212-246-247-248-249-1241
            # 128700000000,1229-177-211-245-246-247-248-249-1241
            # 139500000000,1229-144-178-212-246-247-248-249-1241
            # 155400000000,Unreachable
            # 165200000000,1229-143-177-211-245-279-280-281-282-1241
            # 178800000000,1229-176-177-211-245-279-280-281-282-1241

            # (3) 1229 -> 1173
            # 0,1229-144-178-179-180-181-182-183-184-1173
            # 3300000000,1229-177-178-179-180-181-182-183-184-1173
            # 139500000000,1229-144-178-179-180-181-182-183-184-1173
            # 150100000000,1229-144-178-179-180-181-182-183-1173
            # 155400000000,Unreachable
            # 165200000000,1229-143-177-178-179-180-181-182-183-1173
            # 178800000000,1229-176-177-178-179-180-181-182-183-1173

            # Select all satellite IDs
            subset_of_satellites = set()
            for path_filename in [
                    "tests/data_to_match/kuiper_630/networkx_path_1173_to_1241.txt",
                    "tests/data_to_match/kuiper_630/networkx_path_1229_to_1173.txt",
                    "tests/data_to_match/kuiper_630/networkx_path_1229_to_1241.txt",
            ]:
                columns = exputil.read_csv_direct_in_columns(
                    path_filename, "pos_int,string")
                for path in columns[1]:
                    if path != "Unreachable":
                        for sat_id in list(
                                map(lambda x: int(x),
                                    path.split("-")[1:-1])):
                            subset_of_satellites.add(sat_id)
            list_of_satellites = sorted(list(subset_of_satellites))
            original_sat_id_to_new_sat_id = {}
            for i in range(len(list_of_satellites)):
                original_sat_id_to_new_sat_id[list_of_satellites[i]] = i

            # Generate normal TLEs and then only filter out the limited satellite list
            print("Generating TLEs...")
            satgen.generate_tles_from_scratch_manual(
                output_generated_data_dir + "/" + name + "/tles_complete.txt",
                NICE_NAME, NUM_ORBS, NUM_SATS_PER_ORB, PHASE_DIFF,
                INCLINATION_DEGREE, ECCENTRICITY, ARG_OF_PERIGEE_DEGREE,
                MEAN_MOTION_REV_PER_DAY)
            with open(
                    output_generated_data_dir + "/" + name +
                    "/tles_complete.txt", "r") as f_in:
                with open(output_generated_data_dir + "/" + name + "/tles.txt",
                          "w+") as f_out:
                    f_out.write(
                        "1 %d\n" % len(list_of_satellites)
                    )  # Pretend its one orbit with N satellites simply
                    i = 0
                    for line in f_in:
                        line = line.strip()
                        if int(math.floor(
                            (i - 1) / 3.0)) in list_of_satellites:
                            if (i - 1) % 3 == 0:
                                f_out.write("%s %d\n" %
                                            (line.split(" ")[0],
                                             original_sat_id_to_new_sat_id[int(
                                                 line.split(" ")[1])]))
                            else:
                                f_out.write("%s\n" % line)
                        i += 1

            # ISLs
            print("Generating ISLs...")
            complete_list_isls = satgen.generate_plus_grid_isls(
                output_generated_data_dir + "/" + name +
                "/isls_complete.temp.txt",
                NUM_ORBS,
                NUM_SATS_PER_ORB,
                isl_shift=0,
                idx_offset=0)
            with open(output_generated_data_dir + "/" + name + "/isls.txt",
                      "w+") as f_out:
                for isl in complete_list_isls:
                    if isl[0] in list_of_satellites and isl[
                            1] in list_of_satellites:
                        f_out.write("%d %d\n" %
                                    (original_sat_id_to_new_sat_id[isl[0]],
                                     original_sat_id_to_new_sat_id[isl[1]]))

            # Description
            print("Generating description...")
            satgen.generate_description(
                output_generated_data_dir + "/" + name + "/description.txt",
                MAX_GSL_LENGTH_M, MAX_ISL_LENGTH_M)

            # Extended ground stations
            ground_stations = satgen.read_ground_stations_extended(
                output_generated_data_dir + "/" + name +
                "/ground_stations.txt")

            # GSL interfaces
            if dynamic_state_algorithm == "algorithm_free_one_only_over_isls":
                gsl_interfaces_per_satellite = 1
                gsl_satellite_max_agg_bandwidth = 1.0
            elif dynamic_state_algorithm == "algorithm_free_gs_one_sat_many_only_over_isls":
                gsl_interfaces_per_satellite = len(ground_stations)
                gsl_satellite_max_agg_bandwidth = len(ground_stations)
            else:
                raise ValueError("Unknown dynamic state algorithm: " +
                                 dynamic_state_algorithm)
            print("Generating GSL interfaces info..")
            satgen.generate_simple_gsl_interfaces_info(
                output_generated_data_dir + "/" + name +
                "/gsl_interfaces_info.txt",
                len(list_of_satellites),  # N satellites
                len(ground_stations),
                gsl_interfaces_per_satellite,  # GSL interfaces per satellite
                1,  # (GSL) Interfaces per ground station
                gsl_satellite_max_agg_bandwidth,  # Aggregate max. bandwidth satellite (unit unspecified)
                1  # Aggregate max. bandwidth ground station (same unspecified unit)
            )

            # Forwarding state
            for time_step_ms in all_time_step_ms:
                print("Generating forwarding state...")
                satgen.help_dynamic_state(output_generated_data_dir,
                                          num_threads, name, time_step_ms,
                                          duration_s, MAX_GSL_LENGTH_M,
                                          MAX_ISL_LENGTH_M,
                                          dynamic_state_algorithm, False)

            # Clean slate start
            local_shell.remove_force_recursive("temp_analysis_data")
            local_shell.make_full_dir("temp_analysis_data")
            output_analysis_data_dir = "temp_analysis_data"

            # Check the path and RTT for each pair
            new_gs_id_to_old_node_id = {0: 1173, 1: 1241, 2: 1229}
            old_node_id_to_new_node_id = {
                1173: len(list_of_satellites) + 0,
                1241: len(list_of_satellites) + 1,
                1229: len(list_of_satellites) + 2,
            }
            min_rtts = []
            max_rtts = []
            for (src, dst) in [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]:

                # Find node identifiers
                src_node_id = len(list_of_satellites) + src
                dst_node_id = len(list_of_satellites) + dst
                old_src_node_id = new_gs_id_to_old_node_id[src]
                old_dst_node_id = new_gs_id_to_old_node_id[dst]

                # Print the routes
                satgen.post_analysis.print_routes_and_rtt(
                    output_analysis_data_dir + "/" + name,
                    output_generated_data_dir + "/" + name,
                    default_time_step_ms, duration_s, src_node_id, dst_node_id,
                    "")

                # Now, we just want to see that the output path matches
                with open(
                        output_analysis_data_dir + "/" + name +
                        "/data/networkx_path_%d_to_%d.txt" %
                    (src_node_id, dst_node_id), "r") as f_in1:
                    with open(
                            "tests/data_to_match/kuiper_630/networkx_path_%d_to_%d.txt"
                            % (old_src_node_id, old_dst_node_id),
                            "r") as f_in2:
                        lines1 = []
                        for line in f_in1:
                            lines1.append(line.strip())
                        lines2 = []
                        for line in f_in2:
                            lines2.append(line.strip())
                        self.assertEqual(len(lines1), len(lines2))
                        for i in range(len(lines1)):
                            spl1 = lines1[i].split(",")
                            spl2 = lines2[i].split(",")

                            # Time must be equal
                            self.assertEqual(spl1[0], spl2[0])

                            # Path must be equal
                            if spl1[1] == "Unreachable" or spl2[
                                    1] == "Unreachable":
                                self.assertEqual(spl1[1], spl2[1])
                            else:
                                node_list1 = list(
                                    map(lambda x: int(x), spl1[1].split("-")))
                                node_list2 = list(
                                    map(lambda x: int(x), spl2[1].split("-")))
                                new_node_list2 = []
                                for j in range(len(node_list2)):
                                    if j == 0 or j == len(node_list2) - 1:
                                        new_node_list2.append(
                                            old_node_id_to_new_node_id[
                                                node_list2[j]])
                                    else:
                                        new_node_list2.append(
                                            original_sat_id_to_new_sat_id[
                                                node_list2[j]])
                                self.assertEqual(node_list1, new_node_list2)

                # ... and the RTT
                lowest_rtt_ns = 100000000000
                highest_rtt_ns = 0
                with open(
                        output_analysis_data_dir + "/" + name +
                        "/data/networkx_rtt_%d_to_%d.txt" %
                    (src_node_id, dst_node_id), "r") as f_in1:
                    with open(
                            "tests/data_to_match/kuiper_630/networkx_rtt_%d_to_%d.txt"
                            % (old_src_node_id, old_dst_node_id),
                            "r") as f_in2:
                        lines1 = []
                        for line in f_in1:
                            lines1.append(line.strip())
                        lines2 = []
                        for line in f_in2:
                            lines2.append(line.strip())

                        # Too computationally costly, so the below is equivalent: self.assertEqual(lines1, lines2)
                        self.assertEqual(len(lines1), len(lines2))
                        for i in range(len(lines1)):
                            a_spl = lines1[i].split(",")
                            b_spl = lines2[i].split(",")
                            self.assertEqual(len(a_spl), len(b_spl))
                            self.assertEqual(len(a_spl), 2)
                            a_time = int(a_spl[0])
                            b_time = int(b_spl[0])
                            a_rtt = float(a_spl[1])
                            b_rtt = float(b_spl[1])
                            if a_rtt != 0:
                                lowest_rtt_ns = min(a_rtt, lowest_rtt_ns)
                                highest_rtt_ns = max(a_rtt, highest_rtt_ns)
                            self.assertEqual(a_time, b_time)
                            self.assertAlmostEqual(a_rtt, b_rtt, places=5)

                # Save RTTs
                if src < dst:
                    min_rtts.append(lowest_rtt_ns)
                    max_rtts.append(highest_rtt_ns)

            # Now let's run all analyses available

            # TODO: Disabled because it requires downloading files from CDNs, which can take too long
            # # Print graphically
            #
            # satgen.post_analysis.print_graphical_routes_and_rtt(
            #     output_analysis_data_dir + "/" + name,
            #     output_generated_data_dir + "/" + name,
            #     default_time_step_ms,
            #     duration_s,
            #     12,
            #     13
            # )

            # Analyze paths
            satgen.post_analysis.analyze_path(
                output_analysis_data_dir + "/" + name,
                output_generated_data_dir + "/" + name, default_time_step_ms,
                duration_s, "")

            # Number of path changes per pair
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name + "/" + name +
                "/100ms_for_200s/path/data/ecdf_pairs_num_path_changes.txt",
                "float,pos_float")
            self.assertEqual(4, len(columns[0]))
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                elif i == 1:
                    self.assertAlmostEqual(columns[1][i],
                                           1.0 / 3.0,
                                           delta=0.0001)
                elif i == 2:
                    self.assertAlmostEqual(columns[1][i],
                                           2.0 / 3.0,
                                           delta=0.0001)
                else:
                    self.assertEqual(columns[1][i], 1.0)

                # There are three pairs, with 5, 6 and 7 path changes
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                elif i == 1:
                    self.assertEqual(columns[0][i], 5.0)
                elif i == 2:
                    self.assertEqual(columns[0][i], 6.0)
                else:
                    self.assertEqual(columns[0][i], 7.0)

            # Max minus min hop count per pair
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name + "/" + name +
                "/100ms_for_200s/path/data/ecdf_pairs_max_minus_min_hop_count.txt",
                "float,pos_float")
            self.assertEqual(4, len(columns[0]))
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                elif i == 1:
                    self.assertAlmostEqual(columns[1][i],
                                           1.0 / 3.0,
                                           delta=0.0001)
                elif i == 2:
                    self.assertAlmostEqual(columns[1][i],
                                           2.0 / 3.0,
                                           delta=0.0001)
                else:
                    self.assertEqual(columns[1][i], 1.0)

                # One with 3 vs. 6, and two with 8 vs. 9
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                elif i == 1:
                    self.assertEqual(columns[0][i], 1)
                elif i == 2:
                    self.assertEqual(columns[0][i], 1)
                else:
                    self.assertEqual(columns[0][i], 3)

            # Max divided by min hop count per pair
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name + "/" + name +
                "/100ms_for_200s/path/data/ecdf_pairs_max_hop_count_to_min_hop_count.txt",
                "float,pos_float")
            self.assertEqual(4, len(columns[0]))
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                elif i == 1:
                    self.assertAlmostEqual(columns[1][i],
                                           1.0 / 3.0,
                                           delta=0.0001)
                elif i == 2:
                    self.assertAlmostEqual(columns[1][i],
                                           2.0 / 3.0,
                                           delta=0.0001)
                else:
                    self.assertEqual(columns[1][i], 1.0)

                # One with 3 vs. 6, and two with 8 vs. 9
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                elif i == 1:
                    self.assertAlmostEqual(columns[0][i],
                                           9.0 / 8.0,
                                           delta=0.0001)
                elif i == 2:
                    self.assertAlmostEqual(columns[0][i],
                                           9.0 / 8.0,
                                           delta=0.0001)
                else:
                    self.assertEqual(columns[0][i], 2.0)

            # These are the path changes

            # 18000000000,1173-218-217-1241

            # 27600000000,1173-648-649-650-616-1241

            # 3300000000,1229-177-178-179-180-181-182-183-184-1173
            # 3300000000,1229-177-178-212-246-280-281-282-283-1241

            # 74300000000,1173-218-217-216-250-1241

            # 10100000000,1229-177-178-212-246-247-248-249-1241

            # 125900000000,1173-647-648-649-650-616-1241

            # 128700000000,1229-177-211-245-246-247-248-249-1241
            # 128700000000,1173-647-648-649-615-1241

            # 139500000000,1229-144-178-179-180-181-182-183-184-1173
            # 139500000000,1229-144-178-212-246-247-248-249-1241

            # 150100000000,1229-144-178-179-180-181-182-183-1173

            # 155400000000,Unreachable
            # 155400000000,Unreachable

            # 165200000000,1229-143-177-211-245-279-280-281-282-1241
            # 165200000000,1229-143-177-178-179-180-181-182-183-1173

            # 178800000000,1229-176-177-211-245-279-280-281-282-1241
            # 178800000000,1229-176-177-178-179-180-181-182-183-1173

            # For all pairs, the distribution how many times they changed path in a time step
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name + "/" + name +
                "/100ms_for_200s/path/data/ecdf_time_step_num_path_changes.txt",
                "float,pos_float")
            start_cumulative = 0.0
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], start_cumulative)
                else:
                    self.assertGreater(columns[1][i], start_cumulative)
                if i - 1 == range(len(columns[0])):
                    self.assertEqual(columns[1][i], 1.0)

                # There are 12 time steps, of which 6 have 2 changes, and 6 have 1 change
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                elif i > 2000 - 7:
                    self.assertEqual(columns[0][i], 2.0)
                elif i > 2000 - 13:
                    self.assertEqual(columns[0][i], 1.0)
                else:
                    self.assertEqual(columns[0][i], 0)

            # Analyze RTTs
            satgen.post_analysis.analyze_rtt(
                output_analysis_data_dir + "/" + name,
                output_generated_data_dir + "/" + name, default_time_step_ms,
                duration_s, "")

            # Min. RTT
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name + "/" + name +
                "/100ms_for_200s/rtt/data/ecdf_pairs_min_rtt_ns.txt",
                "float,pos_float")
            self.assertEqual(4, len(columns[0]))
            sorted_min_rtts = sorted(min_rtts)
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                elif i == 1:
                    self.assertAlmostEqual(columns[1][i],
                                           1.0 / 3.0,
                                           delta=0.0001)
                elif i == 2:
                    self.assertAlmostEqual(columns[1][i],
                                           2.0 / 3.0,
                                           delta=0.0001)
                else:
                    self.assertEqual(columns[1][i], 1.0)

                # RTT
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                else:
                    self.assertAlmostEqual(columns[0][i],
                                           sorted_min_rtts[i - 1],
                                           delta=100)

            # Max. RTT
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name + "/" + name +
                "/100ms_for_200s/rtt/data/ecdf_pairs_max_rtt_ns.txt",
                "float,pos_float")
            self.assertEqual(4, len(columns[0]))
            sorted_max_rtts = sorted(max_rtts)
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                elif i == 1:
                    self.assertAlmostEqual(columns[1][i],
                                           1.0 / 3.0,
                                           delta=0.0001)
                elif i == 2:
                    self.assertAlmostEqual(columns[1][i],
                                           2.0 / 3.0,
                                           delta=0.0001)
                else:
                    self.assertEqual(columns[1][i], 1.0)

                # RTT
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                else:
                    self.assertAlmostEqual(columns[0][i],
                                           sorted_max_rtts[i - 1],
                                           delta=100)

            # Max. - Min. RTT
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name + "/" + name +
                "/100ms_for_200s/rtt/data/ecdf_pairs_max_minus_min_rtt_ns.txt",
                "float,pos_float")
            self.assertEqual(4, len(columns[0]))
            sorted_max_minus_min_rtts = sorted(
                list(map(lambda x: max_rtts[x] - min_rtts[x], list(range(3)))))
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                elif i == 1:
                    self.assertAlmostEqual(columns[1][i],
                                           1.0 / 3.0,
                                           delta=0.0001)
                elif i == 2:
                    self.assertAlmostEqual(columns[1][i],
                                           2.0 / 3.0,
                                           delta=0.0001)
                else:
                    self.assertEqual(columns[1][i], 1.0)

                # RTT
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                else:
                    self.assertAlmostEqual(columns[0][i],
                                           sorted_max_minus_min_rtts[i - 1],
                                           delta=100)

            # Max. / Min. RTT
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name + "/" + name +
                "/100ms_for_200s/rtt/data/ecdf_pairs_max_rtt_to_min_rtt_slowdown.txt",
                "float,pos_float")
            self.assertEqual(4, len(columns[0]))
            sorted_max_divided_min_rtts = sorted(
                list(map(lambda x: max_rtts[x] / min_rtts[x], list(range(3)))))
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                elif i == 1:
                    self.assertAlmostEqual(columns[1][i],
                                           1.0 / 3.0,
                                           delta=0.0001)
                elif i == 2:
                    self.assertAlmostEqual(columns[1][i],
                                           2.0 / 3.0,
                                           delta=0.0001)
                else:
                    self.assertEqual(columns[1][i], 1.0)

                # RTT
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                else:
                    self.assertAlmostEqual(columns[0][i],
                                           sorted_max_divided_min_rtts[i - 1],
                                           delta=0.01)

            # Geodesic slowdown
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name + "/" + name +
                "/100ms_for_200s/rtt/data/ecdf_pairs_max_rtt_to_geodesic_slowdown.txt",
                "float,pos_float")
            # From Google search
            # Distance Manila to Dalian is 2,703 km according to Google Maps
            # Distance St. Petersburg to Manila  is 8,635 km according to Google Maps
            # Distance St. Petersburg to Dalian is 6,406 km according to Google Maps
            self.assertEqual(4, len(columns[0]))
            geodesic_expected_distance = [2703, 8635, 6406]
            sorted_max_divided_geodesic_rtts = sorted(
                list(
                    map(
                        lambda x: max_rtts[x] /
                        (2 * geodesic_expected_distance[x] * 1000.0 / 0.299792
                         ), list(range(3)))))
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                elif i == 1:
                    self.assertAlmostEqual(columns[1][i],
                                           1.0 / 3.0,
                                           delta=0.0001)
                elif i == 2:
                    self.assertAlmostEqual(columns[1][i],
                                           2.0 / 3.0,
                                           delta=0.0001)
                else:
                    self.assertEqual(columns[1][i], 1.0)

                # Geodesic RTT = 2*D / c
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                else:
                    self.assertAlmostEqual(columns[0][i],
                                           sorted_max_divided_geodesic_rtts[i -
                                                                            1],
                                           delta=0.01)

            # Analyze time step paths
            satgen.post_analysis.analyze_time_step_path(
                output_analysis_data_dir + "/" + name,
                output_generated_data_dir + "/" + name, all_time_step_ms,
                duration_s)

            # Missed path changes
            for time_step_ms in all_time_step_ms:
                columns = exputil.read_csv_direct_in_columns(
                    output_analysis_data_dir + "/" + name + "/" + name +
                    "/200s/path/data/" + "ecdf_pairs_" + str(time_step_ms) +
                    "ms_missed_path_changes.txt", "float,pos_float")
                for i in range(len(columns[0])):

                    # Cumulative y-axis check
                    if i == 0:
                        self.assertEqual(columns[1][i], 0)
                    elif i == 1:
                        self.assertAlmostEqual(columns[1][i],
                                               1.0 / 3.0,
                                               delta=0.0001)
                    elif i == 2:
                        self.assertAlmostEqual(columns[1][i],
                                               2.0 / 3.0,
                                               delta=0.0001)
                    else:
                        self.assertEqual(columns[1][i], 1.0)

                    # Only two should have missed one for the 10s one
                    # 1, 2 and 3 respectively at 20s
                    if i == 0:
                        self.assertEqual(columns[0][i], float("-inf"))
                    else:
                        if time_step_ms == 10000:
                            if i == 1:
                                self.assertEqual(columns[0][i], 0)
                            if i == 2:
                                self.assertEqual(columns[0][i], 1)
                            if i == 3:
                                self.assertEqual(columns[0][i], 1)
                        elif time_step_ms == 20000:
                            if i == 1:
                                self.assertEqual(columns[0][i], 1)
                            if i == 2:
                                self.assertEqual(columns[0][i], 2)
                            if i == 3:
                                self.assertEqual(columns[0][i], 3)
                        else:
                            self.assertEqual(columns[0][i], 0)

            # Time between path changes
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name + "/" + name +
                "/200s/path/data/" +
                "ecdf_overall_time_between_path_change.txt", "float,pos_float")
            # Total 18 path changes, but only 15 of them are not from epoch (plus one for (0, -inf))
            self.assertEqual(len(columns[0]), 16)
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                else:
                    self.assertAlmostEqual(columns[1][i],
                                           i / float(len(columns[0]) - 1),
                                           delta=0.00001)

                # Gap values
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                else:
                    if i == 1:
                        self.assertEqual(columns[0][i], 2750000000)
                    elif i == 2:
                        self.assertEqual(columns[0][i], 5350000000)
                    elif i == 3:
                        self.assertEqual(columns[0][i], 6800000000)
                    elif i == 4:
                        self.assertEqual(columns[0][i], 9600000000)
                    elif i == 5:
                        self.assertEqual(columns[0][i], 9750000000)
                    elif i == 6:
                        self.assertEqual(columns[0][i], 9750000000)
                    elif i == 7:
                        self.assertEqual(columns[0][i], 10550000000)
                    elif i == 8:
                        self.assertEqual(columns[0][i], 10800000000)
                    elif i == 9:
                        self.assertEqual(columns[0][i], 13600000000)
                    elif i == 10:
                        self.assertEqual(columns[0][i], 13600000000)
                    elif i == 11:
                        self.assertEqual(columns[0][i], 15900000000)
                    elif i == 12:
                        self.assertEqual(columns[0][i], 46700000000)
                    elif i == 13:
                        self.assertEqual(columns[0][i], 51650000000)
                    elif i == 14:
                        self.assertEqual(columns[0][i], 118650000000)
                    elif i == 15:
                        self.assertEqual(columns[0][i], 136250000000)

            # Clean up
            local_shell.remove_force_recursive("temp_gen_data")
            local_shell.remove_force_recursive("temp_analysis_data")
Esempio n. 7
0
def plot_tcp_flow(logs_ns3_dir, data_out_dir, pdf_out_dir, tcp_flow_id,
                  interval_ns):
    local_shell = exputil.LocalShell()

    # Check that all plotting files are available
    if not local_shell.file_exists("plot_tcp_flow_time_vs_cwnd.plt") or \
       not local_shell.file_exists("plot_tcp_flow_time_vs_progress.plt") or \
       not local_shell.file_exists("plot_tcp_flow_time_vs_rtt.plt") or \
       not local_shell.file_exists("plot_tcp_flow_time_vs_rate.plt"):
        print("The gnuplot files are not present.")
        print(
            "Are you executing this python file inside the plot_tcp_flow directory?"
        )
        exit(1)

    # Create the output directories if they don't exist yet
    local_shell.make_full_dir(data_out_dir)
    local_shell.make_full_dir(pdf_out_dir)

    # Create rate file
    generate_tcp_flow_rate_csv(logs_ns3_dir, data_out_dir, tcp_flow_id,
                               interval_ns)

    # Plot time vs. rate
    data_filename = data_out_dir + "/tcp_flow_" + str(
        tcp_flow_id) + "_rate_in_intervals.csv"
    pdf_filename = pdf_out_dir + "/plot_tcp_flow_time_vs_rate_" + str(
        tcp_flow_id) + ".pdf"
    plt_filename = "plot_tcp_flow_time_vs_rate.plt"
    local_shell.copy_file(plt_filename, "temp.plt")
    local_shell.sed_replace_in_file_plain("temp.plt", "[OUTPUT-FILE]",
                                          pdf_filename)
    local_shell.sed_replace_in_file_plain("temp.plt", "[DATA-FILE]",
                                          data_filename)
    local_shell.perfect_exec("gnuplot temp.plt")
    print("Produced plot: " + pdf_filename)
    local_shell.remove("temp.plt")

    # Plot time vs. progress
    data_filename = logs_ns3_dir + "/tcp_flow_" + str(
        tcp_flow_id) + "_progress.csv"
    local_shell.copy_file(
        data_filename,
        data_out_dir + "/tcp_flow_" + str(tcp_flow_id) + "_progress.csv")
    pdf_filename = pdf_out_dir + "/plot_tcp_flow_time_vs_progress_" + str(
        tcp_flow_id) + ".pdf"
    plt_filename = "plot_tcp_flow_time_vs_progress.plt"
    local_shell.copy_file(plt_filename, "temp.plt")
    local_shell.sed_replace_in_file_plain("temp.plt", "[OUTPUT-FILE]",
                                          pdf_filename)
    local_shell.sed_replace_in_file_plain("temp.plt", "[DATA-FILE]",
                                          data_filename)
    local_shell.perfect_exec("gnuplot temp.plt")
    print("Produced plot: " + pdf_filename)
    local_shell.remove("temp.plt")

    # Plot time vs. rtt
    data_filename = logs_ns3_dir + "/tcp_flow_" + str(tcp_flow_id) + "_rtt.csv"
    local_shell.copy_file(
        data_filename,
        data_out_dir + "/tcp_flow_" + str(tcp_flow_id) + "_rtt.csv")
    pdf_filename = pdf_out_dir + "/plot_tcp_flow_time_vs_rtt_" + str(
        tcp_flow_id) + ".pdf"
    plt_filename = "plot_tcp_flow_time_vs_rtt.plt"
    local_shell.copy_file(plt_filename, "temp.plt")
    local_shell.sed_replace_in_file_plain("temp.plt", "[OUTPUT-FILE]",
                                          pdf_filename)
    local_shell.sed_replace_in_file_plain("temp.plt", "[DATA-FILE]",
                                          data_filename)
    local_shell.perfect_exec("gnuplot temp.plt")
    print("Produced plot: " + pdf_filename)
    local_shell.remove("temp.plt")

    # Plot time vs. rto
    data_filename = logs_ns3_dir + "/tcp_flow_" + str(tcp_flow_id) + "_rto.csv"
    local_shell.copy_file(
        data_filename,
        data_out_dir + "/tcp_flow_" + str(tcp_flow_id) + "_rto.csv")
    pdf_filename = pdf_out_dir + "/plot_tcp_flow_time_vs_rto_" + str(
        tcp_flow_id) + ".pdf"
    plt_filename = "plot_tcp_flow_time_vs_rto.plt"
    local_shell.copy_file(plt_filename, "temp.plt")
    local_shell.sed_replace_in_file_plain("temp.plt", "[OUTPUT-FILE]",
                                          pdf_filename)
    local_shell.sed_replace_in_file_plain("temp.plt", "[DATA-FILE]",
                                          data_filename)
    local_shell.perfect_exec("gnuplot temp.plt")
    print("Produced plot: " + pdf_filename)
    local_shell.remove("temp.plt")

    # Plot time vs. cwnd
    data_filename = logs_ns3_dir + "/tcp_flow_" + str(
        tcp_flow_id) + "_cwnd.csv"
    local_shell.copy_file(
        data_filename,
        data_out_dir + "/tcp_flow_" + str(tcp_flow_id) + "_cwnd.csv")
    pdf_filename = pdf_out_dir + "/plot_tcp_flow_time_vs_cwnd_" + str(
        tcp_flow_id) + ".pdf"
    plt_filename = "plot_tcp_flow_time_vs_cwnd.plt"
    local_shell.copy_file(plt_filename, "temp.plt")
    local_shell.sed_replace_in_file_plain("temp.plt", "[OUTPUT-FILE]",
                                          pdf_filename)
    local_shell.sed_replace_in_file_plain("temp.plt", "[DATA-FILE]",
                                          data_filename)
    local_shell.perfect_exec("gnuplot temp.plt")
    print("Produced plot: " + pdf_filename)
    local_shell.remove("temp.plt")

    # Plot time vs. cwnd_inflated
    data_filename = logs_ns3_dir + "/tcp_flow_" + str(
        tcp_flow_id) + "_cwnd_inflated.csv"
    local_shell.copy_file(
        data_filename,
        data_out_dir + "/tcp_flow_" + str(tcp_flow_id) + "_cwnd_inflated.csv")
    pdf_filename = pdf_out_dir + "/plot_tcp_flow_time_vs_cwnd_inflated_" + str(
        tcp_flow_id) + ".pdf"
    plt_filename = "plot_tcp_flow_time_vs_cwnd_inflated.plt"
    local_shell.copy_file(plt_filename, "temp.plt")
    local_shell.sed_replace_in_file_plain("temp.plt", "[OUTPUT-FILE]",
                                          pdf_filename)
    local_shell.sed_replace_in_file_plain("temp.plt", "[DATA-FILE]",
                                          data_filename)
    local_shell.perfect_exec("gnuplot temp.plt")
    print("Produced plot: " + pdf_filename)
    local_shell.remove("temp.plt")

    # Plot time vs. ssthresh
    data_filename = logs_ns3_dir + "/tcp_flow_" + str(
        tcp_flow_id) + "_ssthresh.csv"

    # Retrieve the highest ssthresh which is not a max. integer
    ssthresh_values = exputil.read_csv_direct_in_columns(
        data_filename, "pos_int,pos_int,pos_int")[2]
    max_ssthresh = 0
    for ssthresh in ssthresh_values:
        if ssthresh > max_ssthresh and ssthresh != 4294967295:
            max_ssthresh = ssthresh
    if max_ssthresh == 0:  # If it never got out of initial slow-start, we just set it to 1 for the plot
        max_ssthresh = 1.0

    # Execute ssthresh plotting
    local_shell.copy_file(
        data_filename,
        data_out_dir + "/tcp_flow_" + str(tcp_flow_id) + "_ssthresh.csv")
    pdf_filename = pdf_out_dir + "/plot_tcp_flow_time_vs_ssthresh_" + str(
        tcp_flow_id) + ".pdf"
    plt_filename = "plot_tcp_flow_time_vs_ssthresh.plt"
    local_shell.copy_file(plt_filename, "temp.plt")
    local_shell.sed_replace_in_file_plain(
        "temp.plt", "[MAX-Y]", str(math.ceil(max_ssthresh / 1380.0)))
    local_shell.sed_replace_in_file_plain("temp.plt", "[OUTPUT-FILE]",
                                          pdf_filename)
    local_shell.sed_replace_in_file_plain("temp.plt", "[DATA-FILE]",
                                          data_filename)
    local_shell.perfect_exec("gnuplot temp.plt")
    print("Produced plot: " + pdf_filename)
    local_shell.remove("temp.plt")

    # Plot time vs. inflight
    data_filename = logs_ns3_dir + "/tcp_flow_" + str(
        tcp_flow_id) + "_inflight.csv"
    local_shell.copy_file(
        data_filename,
        data_out_dir + "/tcp_flow_" + str(tcp_flow_id) + "_inflight.csv")
    pdf_filename = pdf_out_dir + "/plot_tcp_flow_time_vs_inflight_" + str(
        tcp_flow_id) + ".pdf"
    plt_filename = "plot_tcp_flow_time_vs_inflight.plt"
    local_shell.copy_file(plt_filename, "temp.plt")
    local_shell.sed_replace_in_file_plain("temp.plt", "[OUTPUT-FILE]",
                                          pdf_filename)
    local_shell.sed_replace_in_file_plain("temp.plt", "[DATA-FILE]",
                                          data_filename)
    local_shell.perfect_exec("gnuplot temp.plt")
    print("Produced plot: " + pdf_filename)
    local_shell.remove("temp.plt")

    # Plot time vs. together (cwnd, cwnd_inflated, ssthresh, inflight)
    cwnd_values = exputil.read_csv_direct_in_columns(
        logs_ns3_dir + "/tcp_flow_" + str(tcp_flow_id) + "_cwnd.csv",
        "pos_int,pos_int,pos_int")[2]
    cwnd_inflated_values = exputil.read_csv_direct_in_columns(
        logs_ns3_dir + "/tcp_flow_" + str(tcp_flow_id) + "_cwnd_inflated.csv",
        "pos_int,pos_int,pos_int")[2]
    inflight_values = exputil.read_csv_direct_in_columns(
        logs_ns3_dir + "/tcp_flow_" + str(tcp_flow_id) + "_inflight.csv",
        "pos_int,pos_int,pos_int")[2]
    pdf_filename = pdf_out_dir + "/plot_tcp_flow_time_vs_together_" + str(
        tcp_flow_id) + ".pdf"
    plt_filename = "plot_tcp_flow_time_vs_together.plt"
    local_shell.copy_file(plt_filename, "temp.plt")
    local_shell.sed_replace_in_file_plain(
        "temp.plt", "[MAX-Y]",
        str(
            max(math.ceil(max_ssthresh / 1380.0),
                math.ceil(np.max(cwnd_values) / 1380.0),
                math.ceil(np.max(cwnd_inflated_values) / 1380.0),
                math.ceil(np.max(inflight_values) / 1380.0))))
    local_shell.sed_replace_in_file_plain("temp.plt", "[OUTPUT-FILE]",
                                          pdf_filename)
    local_shell.sed_replace_in_file_plain(
        "temp.plt", "[DATA-FILE-CWND]",
        logs_ns3_dir + "/tcp_flow_" + str(tcp_flow_id) + "_cwnd.csv")
    local_shell.sed_replace_in_file_plain(
        "temp.plt", "[DATA-FILE-CWND-INFLATED]",
        logs_ns3_dir + "/tcp_flow_" + str(tcp_flow_id) + "_cwnd_inflated.csv")
    local_shell.sed_replace_in_file_plain(
        "temp.plt", "[DATA-FILE-SSTHRESH]",
        logs_ns3_dir + "/tcp_flow_" + str(tcp_flow_id) + "_ssthresh.csv")
    local_shell.sed_replace_in_file_plain(
        "temp.plt", "[DATA-FILE-INFLIGHT]",
        logs_ns3_dir + "/tcp_flow_" + str(tcp_flow_id) + "_inflight.csv")
    local_shell.perfect_exec("gnuplot temp.plt")
    print("Produced plot: " + pdf_filename)
    local_shell.remove("temp.plt")
Esempio n. 8
0
import exputil
from networkload import *

local_shell = exputil.LocalShell()

# Create one central pdf directory
local_shell.make_full_dir("pdf")

for utilization_interval_ns in [("1s", 1000000000), ("100ms", 100000000),
                                ("10ms", 10000000)]:
    for expected_flows_per_s in [10, 50, 100, 200, 300, 400, 500, 600, 700]:
        run_dir = "runs/util_interval_" + utilization_interval_ns[
            0] + "_arrival_" + str(expected_flows_per_s)

        # Create utilization plots
        local_shell.perfect_exec(
            "cd ../plot_help/utilization; python utilization_plot.py " +
            "../../util_example_runs/" + run_dir + "/logs_ns3 " +
            "../../util_example_runs/" + run_dir + "/data " +
            "../../util_example_runs/" + run_dir + "/pdf ./ 0 1",
            output_redirect=exputil.OutputRedirect.CONSOLE)

        # Copy to one central directory
        local_shell.copy_file(
            run_dir + "/pdf/plot_utilization_0_to_1.pdf",
            "pdf/util_interval_" + utilization_interval_ns[0] + "_load_" +
            str(expected_flows_per_s) + ".pdf")

# Print table for intended utilization
print("Load (flows/s)  Load (Gbit/s)   Expected utilization")
for expected_flows_per_s in [10, 50, 100, 200, 300, 400, 500, 600, 700]:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.

import exputil
import numpy as np

local_shell = exputil.LocalShell()

# Create the data files
local_shell.remove_force_recursive("data")
local_shell.make_full_dir("data")
with open("data/traffic_goodput_total_data_sent_vs_runtime.csv", "w+") \
     as f_out_data_sent_vs_runtime, \
     open("data/traffic_goodput_rate_vs_slowdown.csv", "w+") \
     as f_out_rate_vs_slowdown, \
     open("data/run_dirs.csv", "w+") \
     as f_out_run_dirs:

    for protocol_chosen in ["tcp", "udp"]:

        for run_dir_details in [
            ("runs/run_loaded_tm_pairing_1_Mbps_for_10s_with_" +
Esempio n. 10
0
    def test_distance_between_ground_stations(self):
        local_shell = exputil.LocalShell()

        # Create some ground stations
        with open("ground_stations.temp.txt", "w+") as f_out:
            f_out.write("0,Amsterdam,52.379189,4.899431,0\n")
            f_out.write("1,Paris,48.864716,2.349014,0\n")
            f_out.write("2,Rio de Janeiro,-22.970722,-43.182365,0\n")
            f_out.write("3,Manila,14.599512,120.984222,0\n")
            f_out.write("4,Perth,-31.953512,115.857048,0\n")
            f_out.write("5,Some place on Antarctica,-72.927148,33.450844,0\n")
            f_out.write("6,New York,40.730610,-73.935242,0\n")
            f_out.write("7,Some place in Greenland,79.741382,-53.143087,0")
        ground_stations = read_ground_stations_basic("ground_stations.temp.txt")

        # Distance to itself is always 0
        for i in range(8):
            self.assertEqual(
                geodesic_distance_m_between_ground_stations(ground_stations[i], ground_stations[i]),
                0
            )
            self.assertEqual(
                straight_distance_m_between_ground_stations(ground_stations[i], ground_stations[i]),
                0
            )

        # Direction does not matter
        for i in range(8):
            for j in range(8):
                self.assertAlmostEqual(
                    geodesic_distance_m_between_ground_stations(ground_stations[i], ground_stations[j]),
                    geodesic_distance_m_between_ground_stations(ground_stations[j], ground_stations[i]),
                    delta=0.00001
                )
                self.assertAlmostEqual(
                    straight_distance_m_between_ground_stations(ground_stations[i], ground_stations[j]),
                    straight_distance_m_between_ground_stations(ground_stations[j], ground_stations[i]),
                    delta=0.00001
                )

                # Geodesic is always strictly greater than straight
                if i != j:
                    self.assertGreater(
                        geodesic_distance_m_between_ground_stations(ground_stations[i], ground_stations[j]),
                        straight_distance_m_between_ground_stations(ground_stations[i], ground_stations[j])
                    )

        # Amsterdam to Paris
        self.assertAlmostEqual(
            geodesic_distance_m_between_ground_stations(ground_stations[0], ground_stations[1]),
            430000,  # 430 km
            delta=1000.0
        )

        # Amsterdam to New York
        self.assertAlmostEqual(
            geodesic_distance_m_between_ground_stations(ground_stations[0], ground_stations[6]),
            5861000,  # 5861 km
            delta=5000.0
        )

        # New York to Antarctica
        self.assertAlmostEqual(
            geodesic_distance_m_between_ground_stations(ground_stations[6], ground_stations[5]),
            14861000,  # 14861 km
            delta=20000.0
        )

        # Clean up
        local_shell.remove("ground_stations.temp.txt")
Esempio n. 11
0
    def test_around_equator_connectivity_with_starlink(self):
        local_shell = exputil.LocalShell()

        # Output directory
        temp_gen_data = "temp_dynamic_state_gen_data"
        name = "small_equator_constellation"
        local_shell.make_full_dir(temp_gen_data + "/" + name)

        # At t=0
        # epoch = Time("2000-01-01 00:00:00", scale="tdb")
        # time_since_epoch_ns = 0
        time_step_ms = 1000
        duration_s = 1

        # Ground stations
        local_shell.write_file(
            temp_gen_data + "/" + name + "/ground_stations.txt",
            ("0,Luanda,-8.836820,13.234320,0.000000,6135530.183815,1442953.502786,-973332.344974\n"
             "1,Lagos,6.453060,3.395830,0.000000,6326864.177950,375422.898833,712064.787620\n"
             "2,Kinshasa,-4.327580,15.313570,0.000000,6134256.671861,1679704.404461,-478073.165313\n"
             "3,Ar-Riyadh-(Riyadh),24.690466,46.709566,0.000000,3975957.341095,4220595.030186,2647959.980346"
             ))

        # Satellites (TLEs)
        local_shell.write_file(temp_gen_data + "/" + name + "/tles.txt", (
            "1 4\n"
            "Starlink-550 0\n"
            "1 01308U 00000ABC 00001.00000000  .00000000  00000-0  00000+0 0    05\n"
            "2 01308  53.0000 295.0000 0000001   0.0000 155.4545 15.19000000    04\n"
            "Starlink-550 1\n"
            "1 01309U 00000ABC 00001.00000000  .00000000  00000-0  00000+0 0    06\n"
            "2 01309  53.0000 295.0000 0000001   0.0000 171.8182 15.19000000    04\n"
            "Starlink-550 2\n"
            "1 01310U 00000ABC 00001.00000000  .00000000  00000-0  00000+0 0    08\n"
            "2 01310  53.0000 295.0000 0000001   0.0000 188.1818 15.19000000    03\n"
            "Starlink-550 3\n"
            "1 01311U 00000ABC 00001.00000000  .00000000  00000-0  00000+0 0    09\n"
            "2 01311  53.0000 295.0000 0000001   0.0000 204.5455 15.19000000    04"
        ))

        # ISLs
        local_shell.write_file(temp_gen_data + "/" + name + "/isls.txt",
                               ("0 1\n"
                                "1 2\n"
                                "2 3"))

        # GSL interfaces info
        local_shell.write_file(
            temp_gen_data + "/" + name + "/gsl_interfaces_info.txt",
            ("0,1,1.0\n"
             "1,1,1.0\n"
             "2,1,1.0\n"
             "3,1,1.0\n"
             "4,1,1.0\n"
             "5,1,1.0\n"
             "6,1,1.0\n"
             "7,1,1.0"))

        # Maximum GSL / ISL length
        max_gsl_length_m = 1089686.4181956202
        max_isl_length_m = 5016591.2330984278

        # Algorithm
        dynamic_state_algorithm = "algorithm_free_one_only_over_isls"

        # Call the helper
        help_dynamic_state(temp_gen_data, 1, name, time_step_ms, duration_s,
                           max_gsl_length_m, max_isl_length_m,
                           dynamic_state_algorithm, True)

        # Now we are going to compare the generated fstate_0.txt and gsl_if_bandwidth_0.txt
        # again what is the expected outcome.

        # Forwarding state
        fstate = {}
        with open(
                temp_gen_data + "/" + name +
                "/dynamic_state_1000ms_for_1s/fstate_0.txt", "r") as f_in:
            for line in f_in:
                spl = line.split(",")
                self.assertEqual(len(spl), 5)
                fstate[(int(spl[0]), int(spl[1]))] = (int(spl[2]), int(spl[3]),
                                                      int(spl[4]))

        # Check forwarding state content
        self.assertEqual(len(fstate.keys()), 8 * 4 - 4)

        # Satellite 0 always forwards to satellite 1 as it is out of range of all others
        self.assertEqual(fstate[(0, 4)], (1, 0, 0))
        self.assertEqual(fstate[(0, 5)], (1, 0, 0))
        self.assertEqual(fstate[(0, 6)], (1, 0, 0))
        self.assertEqual(fstate[(0, 7)], (-1, -1, -1))

        # Satellite 1 has Lagos (5) in range, but the others not
        self.assertEqual(fstate[(1, 4)], (2, 1, 0))
        self.assertEqual(fstate[(1, 5)], (5, 2, 0))
        self.assertEqual(fstate[(1, 6)], (2, 1, 0))
        self.assertEqual(fstate[(1, 7)], (-1, -1, -1))

        # Satellite 2 has (4, 6) in range, but the others not
        self.assertEqual(fstate[(2, 4)], (4, 2, 0))
        self.assertEqual(fstate[(2, 5)], (1, 0, 1))
        self.assertEqual(fstate[(2, 6)], (6, 2, 0))
        self.assertEqual(fstate[(2, 7)], (-1, -1, -1))

        # Satellite 3 has none in range
        self.assertEqual(fstate[(3, 4)], (2, 0, 1))
        self.assertEqual(fstate[(3, 5)], (2, 0, 1))
        self.assertEqual(fstate[(3, 6)], (2, 0, 1))
        self.assertEqual(fstate[(3, 7)], (-1, -1, -1))

        # Ground station 0 (id: 4) has satellite 2 in range
        self.assertEqual(fstate[(4, 5)], (2, 0, 2))
        self.assertEqual(fstate[(4, 6)], (2, 0, 2))
        self.assertEqual(fstate[(4, 7)], (-1, -1, -1))

        # Ground station 1 (id: 5) has satellite 1 in range
        self.assertEqual(fstate[(5, 4)], (1, 0, 2))
        self.assertEqual(fstate[(5, 6)], (1, 0, 2))
        self.assertEqual(fstate[(5, 7)], (-1, -1, -1))

        # Ground station 2 (id: 6) has satellite 2 in range
        self.assertEqual(fstate[(6, 4)], (2, 0, 2))
        self.assertEqual(fstate[(6, 5)], (2, 0, 2))
        self.assertEqual(fstate[(6, 7)], (-1, -1, -1))

        # Ground station 3 (id: 7) has no satellites in range
        self.assertEqual(fstate[(7, 4)], (-1, -1, -1))
        self.assertEqual(fstate[(7, 5)], (-1, -1, -1))
        self.assertEqual(fstate[(7, 6)], (-1, -1, -1))

        # GSL interface bandwidth
        gsl_if_bandwidth = {}
        with open(
                temp_gen_data + "/" + name +
                "/dynamic_state_1000ms_for_1s/gsl_if_bandwidth_0.txt",
                "r") as f_in:
            for line in f_in:
                spl = line.split(",")
                self.assertEqual(len(spl), 3)
                gsl_if_bandwidth[(int(spl[0]), int(spl[1]))] = float(spl[2])

        # Check GSL interface content
        self.assertEqual(len(gsl_if_bandwidth.keys()), 8)
        for node_id in range(8):
            if node_id == 1 or node_id == 2:
                self.assertEqual(gsl_if_bandwidth[(node_id, 2)], 1.0)
            elif node_id == 0 or node_id == 3:
                self.assertEqual(gsl_if_bandwidth[(node_id, 1)], 1.0)
            else:
                self.assertEqual(gsl_if_bandwidth[(node_id, 0)], 1.0)

        # Clean up
        local_shell.remove_force_recursive(temp_gen_data)
Esempio n. 12
0
    def test_isls_invalid_file(self):
        local_shell = exputil.LocalShell()

        # Invalid left index
        local_shell.write_file("isls.txt.tmp", "2 3\n5 6\n9 0")
        try:
            satgen.read_isls("isls.txt.tmp", 9)
            self.fail()
        except ValueError:
            self.assertTrue(True)
        os.remove("isls.txt.tmp")

        # Invalid right index
        local_shell.write_file("isls.txt.tmp", "2 3\n5 6\n6 9\n3 99")
        try:
            satgen.read_isls("isls.txt.tmp", 50)
            self.fail()
        except ValueError:
            self.assertTrue(True)
        os.remove("isls.txt.tmp")

        # Invalid left index
        local_shell.write_file("isls.txt.tmp", "2 3\n5 6\n6 8\n-3 3")
        try:
            satgen.read_isls("isls.txt.tmp", 50)
            self.fail()
        except ValueError:
            self.assertTrue(True)
        os.remove("isls.txt.tmp")

        # Invalid right index
        local_shell.write_file("isls.txt.tmp", "2 3\n5 6\n1 -3\n6 8")
        try:
            satgen.read_isls("isls.txt.tmp", 50)
            self.fail()
        except ValueError:
            self.assertTrue(True)
        os.remove("isls.txt.tmp")

        # Left is larger than right
        local_shell.write_file("isls.txt.tmp", "6 5")
        try:
            satgen.read_isls("isls.txt.tmp", 10)
            self.fail()
        except ValueError:
            self.assertTrue(True)
        os.remove("isls.txt.tmp")

        # Left is equal to right
        local_shell.write_file("isls.txt.tmp", "5 5")
        try:
            satgen.read_isls("isls.txt.tmp", 10)
            self.fail()
        except ValueError:
            self.assertTrue(True)
        os.remove("isls.txt.tmp")

        # Duplicate
        local_shell.write_file("isls.txt.tmp", "2 3\n5 6\n3 9\n5 6\n2 9")
        try:
            satgen.read_isls("isls.txt.tmp", 10)
            self.fail()
        except ValueError:
            self.assertTrue(True)
        os.remove("isls.txt.tmp")
Esempio n. 13
0
def plot_udp_burst(logs_ns3_dir, data_out_dir, pdf_out_dir, udp_burst_id,
                   interval_ns):
    local_shell = exputil.LocalShell()

    # Check that all plotting files are available
    if (not local_shell.file_exists("plot_udp_burst_time_vs_amount_sent.plt")
            or not local_shell.file_exists(
                "plot_udp_burst_time_vs_amount_arrived.plt") or
            not local_shell.file_exists("plot_udp_burst_time_vs_sent_rate.plt")
            or not local_shell.file_exists(
                "plot_udp_burst_time_vs_arrived_rate.plt")
            or not local_shell.file_exists(
                "plot_udp_burst_time_vs_one_way_latency.plt")):
        print("The gnuplot files are not present.")
        print(
            "Are you executing this python file inside the plot_udp_burst directory?"
        )
        exit(1)

    # Create the output directories if they don't exist yet
    local_shell.make_full_dir(data_out_dir)
    local_shell.make_full_dir(pdf_out_dir)

    # Read in CSV of the outgoing packets
    outgoing_csv_columns = exputil.read_csv_direct_in_columns(
        logs_ns3_dir + "/udp_burst_" + str(udp_burst_id) + "_outgoing.csv",
        "pos_int,pos_int,pos_int")
    outgoing_num_entries = len(outgoing_csv_columns[0])
    outgoing_udp_burst_id_list = outgoing_csv_columns[0]
    if outgoing_udp_burst_id_list != [udp_burst_id] * outgoing_num_entries:
        raise ValueError("Mismatched UDP burst ID in outgoing data")
    outgoing_seq_no_list = outgoing_csv_columns[1]
    if outgoing_seq_no_list != list(range(outgoing_num_entries)):
        raise ValueError("Not all outgoing sequence numbers are incremented")
    outgoing_time_ns_list = outgoing_csv_columns[2]

    # Read in CSV of the incoming packets
    incoming_csv_columns = exputil.read_csv_direct_in_columns(
        logs_ns3_dir + "/udp_burst_" + str(udp_burst_id) + "_incoming.csv",
        "pos_int,pos_int,pos_int")
    incoming_num_entries = len(incoming_csv_columns[0])
    incoming_udp_burst_id_list = incoming_csv_columns[0]
    if incoming_udp_burst_id_list != [udp_burst_id] * incoming_num_entries:
        raise ValueError("Mismatched UDP burst ID in incoming data")
    incoming_seq_no_list = incoming_csv_columns[1]
    incoming_time_ns_list = incoming_csv_columns[2]

    # Generate the data files
    filename_sent_amount_byte = generate_udp_burst_sent_amount_csv(
        data_out_dir, udp_burst_id, outgoing_time_ns_list)
    filename_arrived_amount_byte = generate_udp_burst_arrived_amount_csv(
        data_out_dir, udp_burst_id, incoming_time_ns_list)
    filename_sent_rate_megabit_per_s = generate_udp_burst_sent_rate_csv(
        data_out_dir, udp_burst_id, outgoing_time_ns_list, interval_ns)
    filename_arrived_rate_megabit_per_s = generate_udp_burst_arrived_rate_csv(
        data_out_dir, udp_burst_id, incoming_time_ns_list, interval_ns)
    filename_latency_ns = generate_udp_burst_latency_csv(
        data_out_dir, udp_burst_id, outgoing_time_ns_list,
        incoming_seq_no_list, incoming_time_ns_list)

    # Plot time vs. amount sent
    pdf_filename = pdf_out_dir + "/plot_udp_burst_time_vs_amount_sent_" + str(
        udp_burst_id) + ".pdf"
    plt_filename = "plot_udp_burst_time_vs_amount_sent.plt"
    local_shell.copy_file(plt_filename, "temp.plt")
    local_shell.sed_replace_in_file_plain("temp.plt", "[OUTPUT-FILE]",
                                          pdf_filename)
    local_shell.sed_replace_in_file_plain("temp.plt", "[DATA-FILE]",
                                          filename_sent_amount_byte)
    local_shell.perfect_exec("gnuplot temp.plt")
    print("Produced plot: " + pdf_filename)
    local_shell.remove("temp.plt")

    # Plot time vs. amount arrived
    pdf_filename = pdf_out_dir + "/plot_udp_burst_time_vs_amount_arrived_" + str(
        udp_burst_id) + ".pdf"
    plt_filename = "plot_udp_burst_time_vs_amount_arrived.plt"
    local_shell.copy_file(plt_filename, "temp.plt")
    local_shell.sed_replace_in_file_plain("temp.plt", "[OUTPUT-FILE]",
                                          pdf_filename)
    local_shell.sed_replace_in_file_plain("temp.plt", "[DATA-FILE]",
                                          filename_arrived_amount_byte)
    local_shell.perfect_exec("gnuplot temp.plt")
    print("Produced plot: " + pdf_filename)
    local_shell.remove("temp.plt")

    # Plot time vs. sent rate
    pdf_filename = pdf_out_dir + "/plot_udp_burst_time_vs_sent_rate_" + str(
        udp_burst_id) + ".pdf"
    plt_filename = "plot_udp_burst_time_vs_sent_rate.plt"
    local_shell.copy_file(plt_filename, "temp.plt")
    local_shell.sed_replace_in_file_plain("temp.plt", "[OUTPUT-FILE]",
                                          pdf_filename)
    local_shell.sed_replace_in_file_plain("temp.plt", "[DATA-FILE]",
                                          filename_sent_rate_megabit_per_s)
    local_shell.perfect_exec("gnuplot temp.plt")
    print("Produced plot: " + pdf_filename)
    local_shell.remove("temp.plt")

    # Plot time vs. arrived rate
    pdf_filename = pdf_out_dir + "/plot_udp_burst_time_vs_arrived_rate_" + str(
        udp_burst_id) + ".pdf"
    plt_filename = "plot_udp_burst_time_vs_arrived_rate.plt"
    local_shell.copy_file(plt_filename, "temp.plt")
    local_shell.sed_replace_in_file_plain("temp.plt", "[OUTPUT-FILE]",
                                          pdf_filename)
    local_shell.sed_replace_in_file_plain("temp.plt", "[DATA-FILE]",
                                          filename_arrived_rate_megabit_per_s)
    local_shell.perfect_exec("gnuplot temp.plt")
    print("Produced plot: " + pdf_filename)
    local_shell.remove("temp.plt")

    # Plot time vs. latency
    pdf_filename = pdf_out_dir + "/plot_udp_burst_time_vs_one_way_latency_" + str(
        udp_burst_id) + ".pdf"
    plt_filename = "plot_udp_burst_time_vs_one_way_latency.plt"
    local_shell.copy_file(plt_filename, "temp.plt")
    local_shell.sed_replace_in_file_plain("temp.plt", "[OUTPUT-FILE]",
                                          pdf_filename)
    local_shell.sed_replace_in_file_plain("temp.plt", "[DATA-FILE]",
                                          filename_latency_ns)
    local_shell.perfect_exec("gnuplot temp.plt")
    print("Produced plot: " + pdf_filename)
    local_shell.remove("temp.plt")
Esempio n. 14
0
def analyze_rtt(output_data_dir, satellite_network_dir,
                dynamic_state_update_interval_ms, simulation_end_time_s,
                satgenpy_dir_with_ending_slash):

    # Dynamic state directory
    satellite_network_dynamic_state_dir = "%s/dynamic_state_%dms_for_%ds" % (
        satellite_network_dir, dynamic_state_update_interval_ms,
        simulation_end_time_s)

    # Local shell
    local_shell = exputil.LocalShell()
    core_network_folder_name = satellite_network_dir.split("/")[-1]
    base_output_dir = "%s/%s/%dms_for_%ds/rtt" % (
        output_data_dir, core_network_folder_name,
        dynamic_state_update_interval_ms, simulation_end_time_s)
    pdf_dir = base_output_dir + "/pdf"
    data_dir = base_output_dir + "/data"
    local_shell.remove_force_recursive(pdf_dir)
    local_shell.remove_force_recursive(data_dir)
    local_shell.make_full_dir(pdf_dir)
    local_shell.make_full_dir(data_dir)

    # Variables (load in for each thread such that they don't interfere)
    ground_stations = read_ground_stations_extended(satellite_network_dir +
                                                    "/ground_stations.txt")
    tles = read_tles(satellite_network_dir + "/tles.txt")
    list_isls = read_isls(satellite_network_dir + "/isls.txt")
    satellites = tles["satellites"]
    epoch = tles["epoch"]
    description = exputil.PropertiesConfig(satellite_network_dir +
                                           "/description.txt")

    # Derivatives
    simulation_end_time_ns = simulation_end_time_s * 1000 * 1000 * 1000
    dynamic_state_update_interval_ns = dynamic_state_update_interval_ms * 1000 * 1000
    max_gsl_length_m = exputil.parse_positive_float(
        description.get_property_or_fail("max_gsl_length_m"))
    max_isl_length_m = exputil.parse_positive_float(
        description.get_property_or_fail("max_isl_length_m"))

    # Analysis
    rtt_list_per_pair = []
    for i in range(len(ground_stations)):
        temp_list = []
        for j in range(len(ground_stations)):
            temp_list.append([])
        rtt_list_per_pair.append(temp_list)
    unreachable_per_pair = np.zeros(
        (len(ground_stations), len(ground_stations)))

    # For each time moment
    fstate = {}
    num_iterations = simulation_end_time_ns / dynamic_state_update_interval_ns
    it = 1
    for t in range(0, simulation_end_time_ns,
                   dynamic_state_update_interval_ns):

        # Read in forwarding state
        with open(
                satellite_network_dynamic_state_dir + "/fstate_" + str(t) +
                ".txt", "r") as f_in:
            for line in f_in:
                spl = line.split(",")
                current = int(spl[0])
                destination = int(spl[1])
                next_hop = int(spl[2])
                fstate[(current, destination)] = next_hop

            # Given we are going to graph often, we can pre-compute the edge lengths
            graph_with_distance = construct_graph_with_distances(
                epoch, t, satellites, ground_stations, list_isls,
                max_gsl_length_m, max_isl_length_m)

            # Go over each pair of ground stations and calculate the length
            for src in range(len(ground_stations)):
                for dst in range(src + 1, len(ground_stations)):
                    src_node_id = len(satellites) + src
                    dst_node_id = len(satellites) + dst
                    path = get_path(src_node_id, dst_node_id, fstate)
                    if path is None:
                        unreachable_per_pair[(src, dst)] += 1
                    else:
                        length_path_m = compute_path_length_with_graph(
                            path, graph_with_distance)
                        rtt_list_per_pair[src][dst].append(
                            (2 * length_path_m) * 1000000000.0 /
                            SPEED_OF_LIGHT_M_PER_S)

        # Show progress a bit
        print("%d / %d" % (it, num_iterations))
        it += 1
    print("")

    #################################################

    # ECDF stuff, which is quick, so we do that first

    # Find all the lists
    list_min_rtt_ns = []
    list_max_rtt_ns = []
    list_max_minus_min_rtt_ns = []
    list_max_rtt_to_min_rtt_slowdown = []
    list_max_rtt_to_geodesic_slowdown = []
    for src in range(len(ground_stations)):
        for dst in range(src + 1, len(ground_stations)):
            min_rtt_ns = np.min(rtt_list_per_pair[src][dst])
            max_rtt_ns = np.max(rtt_list_per_pair[src][dst])
            max_rtt_slowdown = float(max_rtt_ns) / float(min_rtt_ns)
            list_min_rtt_ns.append(min_rtt_ns)
            list_max_rtt_ns.append(max_rtt_ns)
            list_max_minus_min_rtt_ns.append(max_rtt_ns - min_rtt_ns)
            list_max_rtt_to_min_rtt_slowdown.append(max_rtt_slowdown)

            geodesic_distance_m = great_circle(
                (ground_stations[src]["latitude"],
                 ground_stations[src]["longitude"]),
                (ground_stations[dst]["latitude"],
                 ground_stations[dst]["longitude"]),
                radius=EARTH_RADIUS_KM).m
            if geodesic_distance_m >= GEODESIC_ECDF_PLOT_CUTOFF_KM * 1000:
                geodesic_rtt_ns = geodesic_distance_m * 2 * 1000000000.0 / SPEED_OF_LIGHT_M_PER_S
                list_max_rtt_to_geodesic_slowdown.append(
                    float(max_rtt_ns) / float(geodesic_rtt_ns))

    # Write and plot ECDFs
    for element in [
        ("ecdf_pairs_min_rtt_ns", ECDF(list_min_rtt_ns)),
        ("ecdf_pairs_max_rtt_ns", ECDF(list_max_rtt_ns)),
        ("ecdf_pairs_max_minus_min_rtt_ns", ECDF(list_max_minus_min_rtt_ns)),
        ("ecdf_pairs_max_rtt_to_min_rtt_slowdown",
         ECDF(list_max_rtt_to_min_rtt_slowdown)),
        ("ecdf_pairs_max_rtt_to_geodesic_slowdown",
         ECDF(list_max_rtt_to_geodesic_slowdown)),
    ]:
        name = element[0]
        ecdf = element[1]
        with open(data_dir + "/" + name + ".txt", "w+") as f_out:
            for i in range(len(ecdf.x)):
                f_out.write(str(ecdf.x[i]) + "," + str(ecdf.y[i]) + "\n")

    #################################################

    # Largest RTT delta
    with open(data_dir + "/top_10_largest_rtt_delta.txt", "w+") as f_out:
        largest_rtt_delta_list = []
        for src in range(len(ground_stations)):
            for dst in range(src + 1, len(ground_stations)):
                min_rtt_ns = np.min(rtt_list_per_pair[src][dst])
                max_rtt_ns = np.max(rtt_list_per_pair[src][dst])
                largest_rtt_delta_list.append(
                    (max_rtt_ns - min_rtt_ns, min_rtt_ns, max_rtt_ns, src,
                     dst))
        largest_rtt_delta_list = sorted(largest_rtt_delta_list, reverse=True)
        f_out.write("LARGEST RTT DELTA TOP-10 WITHOUT DUPLICATE NODES\n")
        f_out.write(
            "---------------------------------------------------------------\n"
        )
        f_out.write(
            "#      Pair           Delta (ms)   Min. RTT (ms)   Max. RTT (ms)\n"
        )
        already_plotted_nodes = set()
        num_plotted = 0
        for i in range(len(largest_rtt_delta_list)):
            if largest_rtt_delta_list[i][3] not in already_plotted_nodes \
                    and largest_rtt_delta_list[i][4] not in already_plotted_nodes:
                f_out.write(
                    "%-3d    %-4d -> %4d   %-8.2f     %-8.2f        %-8.2f\n" %
                    (
                        i + 1,
                        len(satellites) + largest_rtt_delta_list[i][3],
                        len(satellites) + largest_rtt_delta_list[i][4],
                        largest_rtt_delta_list[i][0] / 1e6,
                        largest_rtt_delta_list[i][1] / 1e6,
                        largest_rtt_delta_list[i][2] / 1e6,
                    ))
                print_routes_and_rtt(
                    base_output_dir, satellite_network_dir,
                    dynamic_state_update_interval_ms, simulation_end_time_s,
                    len(satellites) + largest_rtt_delta_list[i][3],
                    len(satellites) + largest_rtt_delta_list[i][4],
                    satgenpy_dir_with_ending_slash)
                already_plotted_nodes.add(largest_rtt_delta_list[i][3])
                already_plotted_nodes.add(largest_rtt_delta_list[i][4])
                num_plotted += 1
                if num_plotted >= 10:
                    break
        f_out.write(
            "---------------------------------------------------------------\n"
        )
        f_out.write("\n")

    # Most unreachable
    with open(data_dir + "/top_10_most_unreachable.txt", "w+") as f_out:
        most_unreachable_list = []
        for src in range(len(ground_stations)):
            for dst in range(src + 1, len(ground_stations)):
                most_unreachable_list.append(
                    (unreachable_per_pair[(src, dst)], src, dst))
        most_unreachable_list = sorted(most_unreachable_list, reverse=True)
        f_out.write("MOST UNREACHABLE DELTA TOP-10 WITHOUT DUPLICATE NODES\n")
        f_out.write("---------------------------------------\n")
        f_out.write("#      Pair           Times unreachable\n")
        already_plotted_nodes = set()
        num_plotted = 0
        for i in range(len(most_unreachable_list)):
            if most_unreachable_list[i][1] not in already_plotted_nodes \
                    and most_unreachable_list[i][2] not in already_plotted_nodes:
                f_out.write(
                    "%-3d    %-4d -> %4d   %d\n" %
                    (i + 1, len(satellites) + most_unreachable_list[i][1],
                     len(satellites) + most_unreachable_list[i][2],
                     most_unreachable_list[i][0]))
                print_routes_and_rtt(
                    base_output_dir, satellite_network_dir,
                    dynamic_state_update_interval_ms, simulation_end_time_s,
                    len(satellites) + most_unreachable_list[i][1],
                    len(satellites) + most_unreachable_list[i][2],
                    satgenpy_dir_with_ending_slash)
                already_plotted_nodes.add(most_unreachable_list[i][1])
                already_plotted_nodes.add(most_unreachable_list[i][2])
                num_plotted += 1
                if num_plotted >= 10:
                    break
        f_out.write("---------------------------------------\n")
        f_out.write("\n")

    print("Done")
def calculate_fstate_for(num_satellites, num_ground_stations, edges):
    local_shell = exputil.LocalShell()

    sat_net_graph_only_isls = nx.Graph()
    sat_net_graph_only_gsls = nx.Graph()
    sat_net_graph_complete = nx.Graph()

    # Nodes
    ground_station_satellites_in_range = []
    for i in range(num_satellites):
        sat_net_graph_only_isls.add_node(i)
        sat_net_graph_only_gsls.add_node(i)
        sat_net_graph_complete.add_node(i)
    for i in range(num_satellites, num_satellites + num_ground_stations):
        sat_net_graph_only_gsls.add_node(i)
        sat_net_graph_complete.add_node(i)
        ground_station_satellites_in_range.append([])

    # Edges
    num_isls_per_sat = [0] * num_satellites
    sat_neighbor_to_if = {}
    for e in edges:
        if e[0] < num_satellites and e[1] < num_satellites:
            sat_net_graph_only_isls.add_edge(e[0], e[1], weight=e[2])
            sat_net_graph_complete.add_edge(e[0], e[1], weight=e[2])
            sat_neighbor_to_if[(e[0], e[1])] = num_isls_per_sat[e[0]]
            sat_neighbor_to_if[(e[1], e[0])] = num_isls_per_sat[e[1]]
            num_isls_per_sat[e[0]] += 1
            num_isls_per_sat[e[1]] += 1
        if e[0] >= num_satellites or e[1] >= num_satellites:
            sat_net_graph_only_gsls.add_edge(e[0], e[1], weight=e[2])
            sat_net_graph_complete.add_edge(e[0], e[1], weight=e[2])
            ground_station_satellites_in_range[max(e[0], e[1]) -
                                               num_satellites].append(
                                                   (e[2], min(e[0], e[1])))

    # GS relays only does not have ISLs
    num_isls_per_sat_for_only_gs_relays = [0] * num_satellites

    # Finally, GID to the satellite GSL interface index it communicates to on each satellite
    gid_to_sat_gsl_if_idx = list(range(num_ground_stations))

    # Output directory
    temp_dir = "temp_fstate_calculation_test"
    local_shell.make_full_dir(temp_dir)
    output_dynamic_state_dir = temp_dir
    time_since_epoch_ns = 0

    # Now let's call it
    prev_fstate = None
    enable_verbose_logs = True

    # Return all three
    result = {
        "without_gs_relays":
        calculate_fstate_shortest_path_without_gs_relaying(
            output_dynamic_state_dir, time_since_epoch_ns, num_satellites,
            num_ground_stations, sat_net_graph_only_isls, num_isls_per_sat,
            gid_to_sat_gsl_if_idx, ground_station_satellites_in_range,
            sat_neighbor_to_if, prev_fstate, enable_verbose_logs),
        "only_gs_relays":
        calculate_fstate_shortest_path_with_gs_relaying(
            output_dynamic_state_dir, time_since_epoch_ns, num_satellites,
            num_ground_stations, sat_net_graph_only_gsls,
            num_isls_per_sat_for_only_gs_relays, gid_to_sat_gsl_if_idx,
            sat_neighbor_to_if, prev_fstate, enable_verbose_logs),
        "combined":
        calculate_fstate_shortest_path_with_gs_relaying(
            output_dynamic_state_dir, time_since_epoch_ns, num_satellites,
            num_ground_stations, sat_net_graph_complete, num_isls_per_sat,
            gid_to_sat_gsl_if_idx, sat_neighbor_to_if, prev_fstate,
            enable_verbose_logs)
    }

    # Remove the temporary directory afterwards
    local_shell.remove_force_recursive(temp_dir)

    return result
Esempio n. 16
0
def analyze_path(output_data_dir, satellite_network_dir,
                 dynamic_state_update_interval_ms, simulation_end_time_s,
                 satgenpy_dir_with_ending_slash):

    # Variables (load in for each thread such that they don't interfere)
    satellite_network_dynamic_state_dir = "%s/dynamic_state_%dms_for_%ds" % (
        satellite_network_dir, dynamic_state_update_interval_ms,
        simulation_end_time_s)
    ground_stations = read_ground_stations_extended(satellite_network_dir +
                                                    "/ground_stations.txt")
    tles = read_tles(satellite_network_dir + "/tles.txt")
    satellites = tles["satellites"]

    # Local shell
    local_shell = exputil.LocalShell()
    core_network_folder_name = satellite_network_dir.split("/")[-1]
    base_output_dir = "%s/%s/%dms_for_%ds/path" % (
        output_data_dir, core_network_folder_name,
        dynamic_state_update_interval_ms, simulation_end_time_s)
    pdf_dir = base_output_dir + "/pdf"
    data_dir = base_output_dir + "/data"
    local_shell.remove_force_recursive(pdf_dir)
    local_shell.remove_force_recursive(data_dir)
    local_shell.make_full_dir(pdf_dir)
    local_shell.make_full_dir(data_dir)

    # Derivatives
    simulation_end_time_ns = simulation_end_time_s * 1000 * 1000 * 1000
    dynamic_state_update_interval_ns = dynamic_state_update_interval_ms * 1000 * 1000

    # Analysis
    path_list_per_pair = []
    for i in range(len(ground_stations)):
        temp_list = []
        for j in range(len(ground_stations)):
            temp_list.append([])
        path_list_per_pair.append(temp_list)

    # Time step analysis
    time_step_num_path_changes = []
    time_step_num_fstate_updates = []

    # For each time moment
    fstate = {}
    num_iterations = simulation_end_time_ns / dynamic_state_update_interval_ns
    it = 1
    for t in range(0, simulation_end_time_ns,
                   dynamic_state_update_interval_ns):
        num_path_changes = 0
        num_fstate_updates = 0

        # Read in forwarding state
        with open(
                satellite_network_dynamic_state_dir + "/fstate_" + str(t) +
                ".txt", "r") as f_in:
            for line in f_in:
                spl = line.split(",")
                current = int(spl[0])
                destination = int(spl[1])
                next_hop = int(spl[2])
                fstate[(current, destination)] = next_hop
                num_fstate_updates += 1

            # Go over each pair of ground stations and calculate the length
            for src in range(len(ground_stations)):
                for dst in range(src + 1, len(ground_stations)):
                    src_node_id = len(satellites) + src
                    dst_node_id = len(satellites) + dst
                    path = get_path(src_node_id, dst_node_id, fstate)
                    if path is None:
                        if len(
                                path_list_per_pair[src][dst]
                        ) == 0 or path_list_per_pair[src][dst][-1] != []:
                            path_list_per_pair[src][dst].append([])
                            num_path_changes += 1
                    else:
                        if len(
                                path_list_per_pair[src][dst]
                        ) == 0 or path != path_list_per_pair[src][dst][-1]:
                            path_list_per_pair[src][dst].append(path)
                            num_path_changes += 1

        # First iteration has an update for all, which is not interesting
        # to show in the ECDF and is not really a "change" / "update"
        if it != 1:
            time_step_num_path_changes.append(num_path_changes)
            time_step_num_fstate_updates.append(num_fstate_updates)

        # Show progress a bit
        print("%d / %d" % (it, num_iterations))
        it += 1
    print("")

    # Calculate hop count list
    hop_count_list_per_pair = []
    for src in range(len(ground_stations)):
        temp_list = []
        for dst in range(
                len(ground_stations)
        ):  # The one until src are empty, but those are ignored later
            r = []
            for x in path_list_per_pair[src][dst]:
                if len(x) != 0:
                    if len(x) < 2:
                        raise ValueError(
                            "Path must have 0 or at least 2 nodes")
                    r.append(len(x) -
                             1)  # Number of nodes - 1 is the hop count
            temp_list.append(r)
        hop_count_list_per_pair.append(temp_list)

    #################################################

    # ECDF stuff, which is quick, so we do that first

    # Find all the lists
    list_max_minus_min_hop_count = []
    list_max_hop_count_to_min_hop_count = []
    list_num_path_changes = []
    for src in range(len(ground_stations)):
        for dst in range(src + 1, len(ground_stations)):
            min_hop_count = np.min(hop_count_list_per_pair[src][dst])
            max_hop_count = np.max(hop_count_list_per_pair[src][dst])
            list_max_hop_count_to_min_hop_count.append(
                float(max_hop_count) / float(min_hop_count))
            list_max_minus_min_hop_count.append(max_hop_count - min_hop_count)
            list_num_path_changes.append(
                len(path_list_per_pair[src][dst]) -
                1)  # First path is not a change, so - 1

    # Write and plot ECDFs
    for element in [
        ("ecdf_pairs_max_minus_min_hop_count",
         ECDF(list_max_minus_min_hop_count)),
        ("ecdf_pairs_max_hop_count_to_min_hop_count",
         ECDF(list_max_hop_count_to_min_hop_count)),
        ("ecdf_pairs_num_path_changes", ECDF(list_num_path_changes)),
        ("ecdf_time_step_num_path_changes", ECDF(time_step_num_path_changes)),
        ("ecdf_time_step_num_fstate_updates",
         ECDF(time_step_num_fstate_updates)),
    ]:
        name = element[0]
        ecdf = element[1]
        with open(data_dir + "/" + name + ".txt", "w+") as f_out:
            for i in range(len(ecdf.x)):
                f_out.write(str(ecdf.x[i]) + "," + str(ecdf.y[i]) + "\n")

    #################################################

    # Largest hop count delta
    with open(data_dir + "/top_10_largest_hop_count_delta.txt", "w+") as f_out:
        largest_hop_count_delta_list = []
        for src in range(len(ground_stations)):
            for dst in range(src + 1, len(ground_stations)):
                min_hop_count = np.min(hop_count_list_per_pair[src][dst])
                max_hop_count = np.max(hop_count_list_per_pair[src][dst])
                largest_hop_count_delta_list.append(
                    (max_hop_count - min_hop_count, min_hop_count,
                     max_hop_count, src, dst))
        largest_hop_count_delta_list = sorted(largest_hop_count_delta_list,
                                              reverse=True)
        f_out.write(
            "LARGEST HOP-COUNT DELTA TOP-10 WITHOUT DUPLICATE NODES (EXCL. UNREACHABLE)\n"
        )
        f_out.write(
            "------------------------------------------------------------------\n"
        )
        f_out.write(
            "#      Pair              Delta         Min. hop count    Max. hop count\n"
        )
        already_plotted_nodes = set()
        num_plotted = 0
        for i in range(len(largest_hop_count_delta_list)):
            if largest_hop_count_delta_list[i][3] not in already_plotted_nodes \
                    and largest_hop_count_delta_list[i][4] not in already_plotted_nodes:
                f_out.write(
                    "%-3d    %-4d -> %4d       %8d     %-8d          %-8d\n" %
                    (
                        i + 1,
                        len(satellites) + largest_hop_count_delta_list[i][3],
                        len(satellites) + largest_hop_count_delta_list[i][4],
                        largest_hop_count_delta_list[i][0],
                        largest_hop_count_delta_list[i][1],
                        largest_hop_count_delta_list[i][2],
                    ))
                print_routes_and_rtt(
                    base_output_dir, satellite_network_dir,
                    dynamic_state_update_interval_ms, simulation_end_time_s,
                    len(satellites) + largest_hop_count_delta_list[i][3],
                    len(satellites) + largest_hop_count_delta_list[i][4],
                    satgenpy_dir_with_ending_slash)
                already_plotted_nodes.add(largest_hop_count_delta_list[i][3])
                already_plotted_nodes.add(largest_hop_count_delta_list[i][4])
                num_plotted += 1
                if num_plotted >= 10:
                    break
        f_out.write(
            "---------------------------------------------------------------\n"
        )
        f_out.write("\n")

    # Number of path changes
    with open(data_dir + "/top_10_most_path_changes.txt", "w+") as f_out:
        most_path_changes_list = []
        for src in range(len(ground_stations)):
            for dst in range(src + 1, len(ground_stations)):
                most_path_changes_list.append(
                    (len(path_list_per_pair[src][dst]) - 1, src, dst))
        most_path_changes_list = sorted(most_path_changes_list, reverse=True)
        f_out.write("MOST PATH CHANGES TOP-10 WITHOUT DUPLICATE NODES\n")
        f_out.write("-------------------------------------\n")
        f_out.write("#      Pair           Number of path changes\n")
        already_plotted_nodes = set()
        num_plotted = 0
        for i in range(len(most_path_changes_list)):
            if most_path_changes_list[i][1] not in already_plotted_nodes \
                    and most_path_changes_list[i][2] not in already_plotted_nodes:
                f_out.write(
                    "%-3d    %-4d -> %4d   %d\n" %
                    (i + 1, len(satellites) + most_path_changes_list[i][1],
                     len(satellites) + most_path_changes_list[i][2],
                     most_path_changes_list[i][0]))
                print_routes_and_rtt(
                    base_output_dir, satellite_network_dir,
                    dynamic_state_update_interval_ms, simulation_end_time_s,
                    len(satellites) + most_path_changes_list[i][1],
                    len(satellites) + most_path_changes_list[i][2],
                    satgenpy_dir_with_ending_slash)
                already_plotted_nodes.add(most_path_changes_list[i][1])
                already_plotted_nodes.add(most_path_changes_list[i][2])
                num_plotted += 1
                if num_plotted >= 10:
                    break
        f_out.write("---------------------------------------\n")
        f_out.write("\n")

    print("Done")
Esempio n. 17
0
def analyze_time_step_path(output_data_dir, satellite_network_dir,
                           multiple_dynamic_state_update_interval_ms,
                           simulation_end_time_s):

    # Variables (load in for each thread such that they don't interfere)
    ground_stations = read_ground_stations_extended(satellite_network_dir +
                                                    "/ground_stations.txt")
    tles = read_tles(satellite_network_dir + "/tles.txt")
    satellites = tles["satellites"]

    # Local shell
    local_shell = exputil.LocalShell()
    core_network_folder_name = satellite_network_dir.split("/")[-1]
    base_output_dir = "%s/%s/%ds/path" % (
        output_data_dir, core_network_folder_name, simulation_end_time_s)
    pdf_dir = base_output_dir + "/pdf"
    data_dir = base_output_dir + "/data"
    local_shell.remove_force_recursive(pdf_dir)
    local_shell.remove_force_recursive(data_dir)
    local_shell.make_full_dir(pdf_dir)
    local_shell.make_full_dir(data_dir)

    # Configs
    configs = []
    for i in range(len(multiple_dynamic_state_update_interval_ms)):
        configs.append((
            multiple_dynamic_state_update_interval_ms[i],
            "%s/dynamic_state_%dms_for_%ds" %
            (satellite_network_dir,
             multiple_dynamic_state_update_interval_ms[i],
             simulation_end_time_s),
        ))

    # Derivatives
    simulation_end_time_ns = simulation_end_time_s * 1000 * 1000 * 1000

    # Analysis
    per_dyn_state_path_list_per_pair = []
    for c in range(len(configs)):
        path_list_per_pair = []
        for i in range(len(ground_stations)):
            temp_list = []
            for j in range(len(ground_stations)):
                temp_list.append([])
            path_list_per_pair.append(temp_list)
        per_dyn_state_path_list_per_pair.append(path_list_per_pair)

    # For each time moment
    fstate = {}
    smallest_step_ns = min(
        multiple_dynamic_state_update_interval_ms) * 1000 * 1000
    num_iterations = simulation_end_time_ns / smallest_step_ns
    it = 1
    for t in range(0, simulation_end_time_ns, smallest_step_ns):

        c_idx = 0
        for c in configs:
            if t % (c[0] * 1000 * 1000) == 0:

                # Read in forwarding state
                with open(c[1] + "/fstate_" + str(t) + ".txt", "r") as f_in:
                    for line in f_in:
                        spl = line.split(",")
                        current = int(spl[0])
                        destination = int(spl[1])
                        next_hop = int(spl[2])
                        fstate[(current, destination)] = next_hop

                    # Go over each pair of ground stations and calculate the length
                    for src in range(len(ground_stations)):
                        for dst in range(src + 1, len(ground_stations)):
                            src_node_id = len(satellites) + src
                            dst_node_id = len(satellites) + dst
                            path = get_path(src_node_id, dst_node_id, fstate)
                            path_list_per_pair = per_dyn_state_path_list_per_pair[
                                c_idx]
                            if path is None:
                                if len(path_list_per_pair[src]
                                       [dst]) == 0 or [] != path_list_per_pair[
                                           src][dst][-1][0]:
                                    path_list_per_pair[src][dst].append(
                                        ([], t))

                            else:
                                if len(path_list_per_pair[src][dst]) == 0 \
                                        or path != path_list_per_pair[src][dst][-1][0]:
                                    path_list_per_pair[src][dst].append(
                                        (path, t))

            c_idx += 1

        # Show progress a bit
        print("%d / %d" % (it, num_iterations))
        it += 1
    print("")

    # Calculate path overlap
    time_between_path_change_ns_list = []
    per_config_pair_missed_path_changes_list = []
    for c_idx in range(len(configs)):
        per_config_pair_missed_path_changes_list.append([])
    for src in range(len(ground_stations)):
        for dst in range(src + 1, len(ground_stations)):
            base_path_list = per_dyn_state_path_list_per_pair[0][src][dst]
            for j in range(
                    2, len(base_path_list)
            ):  # First change is from epoch, which is not representative
                time_between_path_change_ns_list.append(base_path_list[j][1] -
                                                        base_path_list[j -
                                                                       1][1])
            for c_idx in range(0, len(configs)):
                worse_path_list = per_dyn_state_path_list_per_pair[c_idx][src][
                    dst]
                per_config_pair_missed_path_changes_list[c_idx].append(
                    len(base_path_list) - len(worse_path_list))

    #################################################

    # Write and plot ECDFs
    for element in [
        ("ecdf_overall_time_between_path_change",
         ECDF(time_between_path_change_ns_list)),
    ]:
        name = element[0]
        ecdf = element[1]
        with open(data_dir + "/" + name + ".txt", "w+") as f_out:
            for i in range(len(ecdf.x)):
                f_out.write(str(ecdf.x[i]) + "," + str(ecdf.y[i]) + "\n")

    # Find all the lists
    for c_idx in range(len(configs)):
        config = configs[c_idx]

        # Write and plot ECDFs
        for element in [
            ("ecdf_pairs_%dms_missed_path_changes" % config[0],
             ECDF(per_config_pair_missed_path_changes_list[c_idx])),
        ]:
            name = element[0]
            ecdf = element[1]
            with open(data_dir + "/" + name + ".txt", "w+") as f_out:
                for i in range(len(ecdf.x)):
                    f_out.write(str(ecdf.x[i]) + "," + str(ecdf.y[i]) + "\n")

    # Histograms
    with open(data_dir + "/histogram_missed_path_changes.txt", "w+") as f_out:
        f_out.write("Granularity ")
        for x in range(100):
            f_out.write(" " + str(x))
        f_out.write("\n")
        for c_idx in range(1, len(configs)):
            config = configs[c_idx]
            f_out.write(str(config[0]) + "ms")
            counter = [0] * 100
            for a in per_config_pair_missed_path_changes_list[c_idx]:
                counter[a] += 1
            for x in counter:
                f_out.write(" " + str(
                    x / len(per_config_pair_missed_path_changes_list[c_idx])))
            f_out.write("\n")

    print("Done")
Esempio n. 18
0
def print_graphical_routes_and_rtt(base_output_dir, satellite_network_dir,
                                   dynamic_state_update_interval_ms,
                                   simulation_end_time_s, src, dst):

    # Local shell
    local_shell = exputil.LocalShell()

    # Dynamic state dir can be inferred
    satellite_network_dynamic_state_dir = "%s/dynamic_state_%dms_for_%ds" % (
        satellite_network_dir, dynamic_state_update_interval_ms,
        simulation_end_time_s)

    # Default output dir assumes it is done manual
    pdf_dir = base_output_dir + "/pdf"
    data_dir = base_output_dir + "/data"
    local_shell.make_full_dir(pdf_dir)
    local_shell.make_full_dir(data_dir)

    # Variables (load in for each thread such that they don't interfere)
    ground_stations = read_ground_stations_extended(satellite_network_dir +
                                                    "/ground_stations.txt")
    tles = read_tles(satellite_network_dir + "/tles.txt")
    satellites = tles["satellites"]
    list_isls = read_isls(satellite_network_dir + "/isls.txt", len(satellites))
    epoch = tles["epoch"]
    description = exputil.PropertiesConfig(satellite_network_dir +
                                           "/description.txt")

    # Derivatives
    simulation_end_time_ns = simulation_end_time_s * 1000 * 1000 * 1000
    dynamic_state_update_interval_ns = dynamic_state_update_interval_ms * 1000 * 1000
    max_gsl_length_m = exputil.parse_positive_float(
        description.get_property_or_fail("max_gsl_length_m"))
    max_isl_length_m = exputil.parse_positive_float(
        description.get_property_or_fail("max_isl_length_m"))

    # For each time moment
    fstate = {}
    current_path = []
    rtt_ns_list = []
    for t in range(0, simulation_end_time_ns,
                   dynamic_state_update_interval_ns):
        with open(
                satellite_network_dynamic_state_dir + "/fstate_" + str(t) +
                ".txt", "r") as f_in:
            for line in f_in:
                spl = line.split(",")
                current = int(spl[0])
                destination = int(spl[1])
                next_hop = int(spl[2])
                fstate[(current, destination)] = next_hop

            # Calculate path length
            path_there = get_path(src, dst, fstate)
            path_back = get_path(dst, src, fstate)
            if path_there is not None and path_back is not None:
                length_src_to_dst_m = compute_path_length_without_graph(
                    path_there, epoch, t, satellites, ground_stations,
                    list_isls, max_gsl_length_m, max_isl_length_m)
                length_dst_to_src_m = compute_path_length_without_graph(
                    path_back, epoch, t, satellites, ground_stations,
                    list_isls, max_gsl_length_m, max_isl_length_m)
                rtt_ns = (length_src_to_dst_m +
                          length_dst_to_src_m) * 1000000000.0 / 299792458.0
            else:
                length_src_to_dst_m = 0.0
                length_dst_to_src_m = 0.0
                rtt_ns = 0.0

            # Add to RTT list
            rtt_ns_list.append((t, rtt_ns))

            # Only if there is a new path, print new path
            new_path = get_path(src, dst, fstate)
            if current_path != new_path:

                # This is the new path
                current_path = new_path

                # Write change nicely to the console
                print("Change at t=" + str(t) + " ns (= " + str(t / 1e9) +
                      " seconds)")
                print("  > Path..... " +
                      (" -- ".join(list(map(lambda x: str(x), current_path)))
                       if current_path is not None else "Unreachable"))
                print("  > Length... " +
                      str(length_src_to_dst_m + length_dst_to_src_m) + " m")
                print("  > RTT...... %.2f ms" % (rtt_ns / 1e6))
                print("")

                # Now we make a pdf for it
                pdf_filename = pdf_dir + "/graphics_%d_to_%d_time_%dms.pdf" % (
                    src, dst, int(t / 1000000))
                f = plt.figure()

                # Projection
                ax = plt.axes(projection=ccrs.PlateCarree())

                # Background
                ax.add_feature(cartopy.feature.OCEAN, zorder=0)
                ax.add_feature(cartopy.feature.LAND,
                               zorder=0,
                               edgecolor='black',
                               linewidth=0.2)
                ax.add_feature(cartopy.feature.BORDERS,
                               edgecolor='gray',
                               linewidth=0.2)

                # Time moment
                time_moment_str = str(epoch + t * u.ns)

                # Other satellites
                for node_id in range(len(satellites)):
                    shadow_ground_station = create_basic_ground_station_for_satellite_shadow(
                        satellites[node_id], str(epoch), time_moment_str)
                    latitude_deg = float(
                        shadow_ground_station["latitude_degrees_str"])
                    longitude_deg = float(
                        shadow_ground_station["longitude_degrees_str"])

                    # Other satellite
                    plt.plot(
                        longitude_deg,
                        latitude_deg,
                        color=SATELLITE_UNUSED_COLOR,
                        fillstyle='none',
                        markeredgewidth=0.1,
                        markersize=0.5,
                        marker='^',
                    )
                    plt.text(longitude_deg + 0.5,
                             latitude_deg,
                             str(node_id),
                             color=SATELLITE_UNUSED_COLOR,
                             fontdict={"size": 1})

                # # ISLs
                # for isl in list_isls:
                #     ephem_body = satellites[isl[0]]
                #     ephem_body.compute(time_moment_str)
                #     from_latitude_deg = math.degrees(ephem_body.sublat)
                #     from_longitude_deg = math.degrees(ephem_body.sublong)
                #
                #     ephem_body = satellites[isl[1]]
                #     ephem_body.compute(time_moment_str)
                #     to_latitude_deg = math.degrees(ephem_body.sublat)
                #     to_longitude_deg = math.degrees(ephem_body.sublong)
                #
                #     # Plot the line
                #     if ground_stations[src - len(satellites)]["longitude_degrees_str"] <= \
                #        from_longitude_deg \
                #        <= ground_stations[dst - len(satellites)]["longitude_degrees_str"] \
                #        and \
                #        ground_stations[src - len(satellites)]["latitude_degrees_str"] <= \
                #        from_latitude_deg \
                #        <= ground_stations[dst - len(satellites)]["latitude_degrees_str"] \
                #        and \
                #        ground_stations[src - len(satellites)]["longitude_degrees_str"] <= \
                #        to_longitude_deg \
                #        <= ground_stations[dst - len(satellites)]["longitude_degrees_str"] \
                #        and \
                #        ground_stations[src - len(satellites)]["latitude_degrees_str"] <= \
                #        to_latitude_deg \
                #        <= ground_stations[dst - len(satellites)]["latitude_degrees_str"]:
                #             plt.plot(
                #         [from_longitude_deg, to_longitude_deg],
                #         [from_latitude_deg, to_latitude_deg],
                #         color='#eb6b38', linewidth=0.1, marker='',
                #         transform=ccrs.Geodetic(),
                #     )

                # Other ground stations
                for gid in range(len(ground_stations)):
                    latitude_deg = float(
                        ground_stations[gid]["latitude_degrees_str"])
                    longitude_deg = float(
                        ground_stations[gid]["longitude_degrees_str"])

                    # Other ground station
                    plt.plot(
                        longitude_deg,
                        latitude_deg,
                        color=GROUND_STATION_UNUSED_COLOR,
                        fillstyle='none',
                        markeredgewidth=0.2,
                        markersize=1.0,
                        marker='o',
                    )

                # Lines between
                if current_path is not None:
                    for v in range(1, len(current_path)):
                        from_node_id = current_path[v - 1]
                        to_node_id = current_path[v]

                        # From coordinates
                        if from_node_id < len(satellites):
                            shadow_ground_station = create_basic_ground_station_for_satellite_shadow(
                                satellites[from_node_id], str(epoch),
                                time_moment_str)
                            from_latitude_deg = float(
                                shadow_ground_station["latitude_degrees_str"])
                            from_longitude_deg = float(
                                shadow_ground_station["longitude_degrees_str"])
                        else:
                            from_latitude_deg = float(
                                ground_stations[from_node_id - len(satellites)]
                                ["latitude_degrees_str"])
                            from_longitude_deg = float(
                                ground_stations[from_node_id - len(satellites)]
                                ["longitude_degrees_str"])

                        # To coordinates
                        if to_node_id < len(satellites):
                            shadow_ground_station = create_basic_ground_station_for_satellite_shadow(
                                satellites[to_node_id], str(epoch),
                                time_moment_str)
                            to_latitude_deg = float(
                                shadow_ground_station["latitude_degrees_str"])
                            to_longitude_deg = float(
                                shadow_ground_station["longitude_degrees_str"])
                        else:
                            to_latitude_deg = float(
                                ground_stations[to_node_id - len(satellites)]
                                ["latitude_degrees_str"])
                            to_longitude_deg = float(
                                ground_stations[to_node_id - len(satellites)]
                                ["longitude_degrees_str"])

                        # Plot the line
                        plt.plot(
                            [from_longitude_deg, to_longitude_deg],
                            [from_latitude_deg, to_latitude_deg],
                            color=ISL_COLOR,
                            linewidth=0.5,
                            marker='',
                            transform=ccrs.Geodetic(),
                        )

                # Across all points, we need to find the latitude / longitude to zoom into
                # min_latitude = min(
                #     ground_stations[src - len(satellites)]["latitude_degrees_str"],
                #     ground_stations[dst - len(satellites)]["latitude_degrees_str"]
                # )
                # max_latitude = max(
                #     ground_stations[src - len(satellites)]["latitude_degrees_str"],
                #     ground_stations[dst - len(satellites)]["latitude_degrees_str"]
                # )
                # min_longitude = min(
                #     ground_stations[src - len(satellites)]["longitude_degrees_str"],
                #     ground_stations[dst - len(satellites)]["longitude_degrees_str"]
                # )
                # max_longitude = max(
                #     ground_stations[src - len(satellites)]["longitude_degrees_str"],
                #     ground_stations[dst - len(satellites)]["longitude_degrees_str"]
                # )

                # Points
                if current_path is not None:
                    for v in range(0, len(current_path)):
                        node_id = current_path[v]
                        if node_id < len(satellites):
                            shadow_ground_station = create_basic_ground_station_for_satellite_shadow(
                                satellites[node_id], str(epoch),
                                time_moment_str)
                            latitude_deg = float(
                                shadow_ground_station["latitude_degrees_str"])
                            longitude_deg = float(
                                shadow_ground_station["longitude_degrees_str"])
                            # min_latitude = min(min_latitude, latitude_deg)
                            # max_latitude = max(max_latitude, latitude_deg)
                            # min_longitude = min(min_longitude, longitude_deg)
                            # max_longitude = max(max_longitude, longitude_deg)
                            # Satellite
                            plt.plot(
                                longitude_deg,
                                latitude_deg,
                                color=SATELLITE_USED_COLOR,
                                marker='^',
                                markersize=0.65,
                            )
                            plt.text(longitude_deg + 0.9,
                                     latitude_deg,
                                     str(node_id),
                                     fontdict={
                                         "size": 2,
                                         "weight": "bold"
                                     })
                        else:
                            latitude_deg = float(
                                ground_stations[node_id - len(satellites)]
                                ["latitude_degrees_str"])
                            longitude_deg = float(
                                ground_stations[node_id - len(satellites)]
                                ["longitude_degrees_str"])
                            # min_latitude = min(min_latitude, latitude_deg)
                            # max_latitude = max(max_latitude, latitude_deg)
                            # min_longitude = min(min_longitude, longitude_deg)
                            # max_longitude = max(max_longitude, longitude_deg)
                            if v == 0 or v == len(current_path) - 1:
                                # Endpoint (start or finish) ground station
                                plt.plot(
                                    longitude_deg,
                                    latitude_deg,
                                    color=GROUND_STATION_USED_COLOR,
                                    marker='o',
                                    markersize=0.9,
                                )
                            else:
                                # Intermediary ground station
                                plt.plot(
                                    longitude_deg,
                                    latitude_deg,
                                    color=GROUND_STATION_USED_COLOR,
                                    marker='o',
                                    markersize=0.9,
                                )

                # Zoom into region
                # ax.set_extent([
                #     min_longitude - 5,
                #     max_longitude + 5,
                #     min_latitude - 5,
                #     max_latitude + 5,
                # ])

                # Legend
                ax.legend(handles=(
                    Line2D([0], [0],
                           marker='o',
                           label="Ground station (used)",
                           linewidth=0,
                           color='#3b3b3b',
                           markersize=5),
                    Line2D([0], [0],
                           marker='o',
                           label="Ground station (unused)",
                           linewidth=0,
                           color='black',
                           markersize=5,
                           fillstyle='none',
                           markeredgewidth=0.5),
                    Line2D([0], [0],
                           marker='^',
                           label="Satellite (used)",
                           linewidth=0,
                           color='#a61111',
                           markersize=5),
                    Line2D([0], [0],
                           marker='^',
                           label="Satellite (unused)",
                           linewidth=0,
                           color='red',
                           markersize=5,
                           fillstyle='none',
                           markeredgewidth=0.5),
                ),
                          loc='lower left',
                          fontsize='xx-small')

                # Save final PDF figure
                f.savefig(pdf_filename, bbox_inches='tight')
Esempio n. 19
0
def plot_tcp_flows_ecdfs(logs_ns3_dir, data_out_dir, pdf_out_dir):
    local_shell = exputil.LocalShell()

    # Check that all plotting files are available
    if not local_shell.file_exists("plot_tcp_flows_ecdf_fct.plt") \
       or not local_shell.file_exists("plot_tcp_flows_ecdf_avg_throughput.plt"):
        print("The gnuplot files are not present.")
        print(
            "Are you executing this python file inside the plot_tcp_flows_ecdfs directory?"
        )
        exit(1)

    # Create the output directories if they don't exist yet
    local_shell.make_full_dir(data_out_dir)
    local_shell.make_full_dir(pdf_out_dir)

    # Create rate file
    tcp_flows_csv_columns = exputil.read_csv_direct_in_columns(
        logs_ns3_dir + "/tcp_flows.csv",
        "idx_int,pos_int,pos_int,pos_int,pos_int,pos_int,pos_int,pos_int,string,string"
    )
    num_flows = len(tcp_flows_csv_columns[0])
    # flow_id_list = tcp_flows_csv_columns[0]
    # from_node_id_list = tcp_flows_csv_columns[1]
    # to_node_id_list = tcp_flows_csv_columns[2]
    size_byte_list = tcp_flows_csv_columns[3]
    # start_time_ns_list = tcp_flows_csv_columns[4]
    # end_time_ns_list = tcp_flows_csv_columns[5]
    duration_ns_list = tcp_flows_csv_columns[6]
    # amount_sent_ns_list = tcp_flows_csv_columns[7]
    finished_list = tcp_flows_csv_columns[8]
    # metadata_list = tcp_flows_csv_columns[9]

    # Retrieve FCTs
    num_finished = 0
    num_unfinished = 0
    fct_ms_list = []
    avg_throughput_megabit_per_s_list = []
    for i in range(num_flows):
        if finished_list[i] == "YES":
            fct_ms_list.append(duration_ns_list[i] / 1e6)
            avg_throughput_megabit_per_s_list.append(
                float(size_byte_list[i]) / float(duration_ns_list[i]) * 8000.0)
            num_finished += 1
        else:
            num_unfinished += 1

    # Exit if no TCP flows finished
    if num_finished == 0:
        raise ValueError(
            "No TCP flows were finished so an ECDF could not be produced")

    # Now create ECDF for average throughput
    avg_throughput_megabit_per_s_ecdf = ECDF(avg_throughput_megabit_per_s_list)
    data_filename = data_out_dir + "/tcp_flows_ecdf_avg_throughput_megabit_per_s.csv"
    with open(data_filename, "w+") as f_out:
        for i in range(len(avg_throughput_megabit_per_s_ecdf.x)):
            f_out.write(
                str(avg_throughput_megabit_per_s_ecdf.x[i]) + "," +
                str(avg_throughput_megabit_per_s_ecdf.y[i]) + "\n")

    # Plot ECDF of average throughput of each TCP flow
    pdf_filename = pdf_out_dir + "/plot_tcp_flows_ecdf_avg_throughput.pdf"
    plt_filename = "plot_tcp_flows_ecdf_avg_throughput.plt"
    local_shell.copy_file(plt_filename, "temp.plt")
    local_shell.sed_replace_in_file_plain("temp.plt", "[OUTPUT-FILE]",
                                          pdf_filename)
    local_shell.sed_replace_in_file_plain("temp.plt", "[DATA-FILE]",
                                          data_filename)
    local_shell.perfect_exec("gnuplot temp.plt")
    local_shell.remove("temp.plt")

    # Show final result
    print("Average throughput statistics:")
    print("  > Included (finished)....... %.2f%% (%d out of %d)" %
          (float(num_finished) / float(num_flows) * 100.0, num_finished,
           num_flows))
    print("  > Average throughput........ %.2f Mbit/s" %
          (np.mean(avg_throughput_megabit_per_s_list)))
    print("  > Minimum throughput........ %.2f Mbit/s (slowest)" %
          (np.min(avg_throughput_megabit_per_s_list)))
    print("  > 1th %%-tile throughput..... %.2f Mbit/s" %
          (np.percentile(avg_throughput_megabit_per_s_list, 1.0)))
    print("  > 10th %%-tile throughput.... %.2f Mbit/s" %
          (np.percentile(avg_throughput_megabit_per_s_list, 10.0)))
    print("  > Median throughput......... %.2f Mbit/s" %
          (np.percentile(avg_throughput_megabit_per_s_list, 50.0)))
    print("  > Maximum throughput........ %.2f Mbit/s (fastest)" %
          (np.max(avg_throughput_megabit_per_s_list)))
    print("")
    print("Produced ECDF data: " + data_filename)
    print("Produced ECDF plot: " + pdf_filename)

    # Now create ECDF for FCTs
    fct_ms_ecdf = ECDF(fct_ms_list)
    data_filename = data_out_dir + "/tcp_flows_ecdf_fct_ms.csv"
    with open(data_filename, "w+") as f_out:
        for i in range(len(fct_ms_ecdf.x)):
            f_out.write(
                str(fct_ms_ecdf.x[i]) + "," + str(fct_ms_ecdf.y[i]) + "\n")

    # Plot ECDF of FCTs
    pdf_filename = pdf_out_dir + "/plot_tcp_flows_ecdf_fct.pdf"
    plt_filename = "plot_tcp_flows_ecdf_fct.plt"
    local_shell.copy_file(plt_filename, "temp.plt")
    local_shell.sed_replace_in_file_plain("temp.plt", "[OUTPUT-FILE]",
                                          pdf_filename)
    local_shell.sed_replace_in_file_plain("temp.plt", "[DATA-FILE]",
                                          data_filename)
    local_shell.perfect_exec("gnuplot temp.plt")
    local_shell.remove("temp.plt")

    # Show final result
    print("FCT statistics:")
    print("  > Included (finished)... %.2f%% (%d out of %d)" %
          (float(num_finished) / float(num_flows) * 100.0, num_finished,
           num_flows))
    print("  > Average FCT........... %.2f ms" % (np.mean(fct_ms_list)))
    print("  > Minimum FCT........... %.2f ms (fastest)" %
          (np.min(fct_ms_list)))
    print("  > Median FCT............ %.2f ms" %
          (np.percentile(fct_ms_list, 50.0)))
    print("  > 90th %%-tile FCT....... %.2f ms" %
          (np.percentile(fct_ms_list, 90.0)))
    print("  > 99th %%-tile FCT....... %.2f ms" %
          (np.percentile(fct_ms_list, 99.0)))
    print("  > Maximum FCT........... %.2f ms (slowest)" %
          (np.max(fct_ms_list)))
    print("")
    print("Produced ECDF data: " + data_filename)
    print("Produced ECDF plot: " + pdf_filename)