예제 #1
0
파일: analyze.py 프로젝트: snkas/floodns
def analyze_link_info(logs_floodns_dir, analysis_folder_dir):

    # Read in all the columns
    link_info_csv_columns = exputil.read_csv_direct_in_columns(
        logs_floodns_dir + '/link_info.csv',
        "pos_int,pos_int,pos_int,pos_int,pos_int,pos_int,pos_float,pos_float,string"
    )
    link_id_list = link_info_csv_columns[0]
    source_id_list = link_info_csv_columns[1]
    target_id_list = link_info_csv_columns[2]
    # start_time_list = link_info_csv_columns[3]
    # end_time_list = link_info_csv_columns[4]
    # duration_list = link_info_csv_columns[5]
    avg_utilization_list = link_info_csv_columns[6]
    # avg_active_flows_list = link_info_csv_columns[7]
    # metadata_list = link_info_csv_columns[8]

    # Count how many links had utilization of zero
    num_link_inactive = 0
    num_link_active = 0
    for u in avg_utilization_list:
        if u == 0:
            num_link_inactive += 1
        else:
            num_link_active += 1

    # Calculate some statistics
    if len(link_id_list) == 0:
        statistics = {
            'all_num_links': len(link_id_list),
        }
    else:

        # General statistics
        statistics = {
            'all_num_links': len(link_id_list),
            'all_num_links_active': num_link_active,
            'all_num_links_inactive': num_link_inactive,
            'all_link_unique_sources': len(set(source_id_list)),
            'all_link_unique_targets': len(set(target_id_list)),

            'all_link_avg_utilization_min': np.min(avg_utilization_list),
            'all_link_avg_utilization_0.1th': np.percentile(avg_utilization_list, 0.1),
            'all_link_avg_utilization_1th': np.percentile(avg_utilization_list, 1),
            'all_link_avg_utilization_mean': np.mean(avg_utilization_list),
            'all_link_avg_utilization_median': np.median(avg_utilization_list),
            'all_link_avg_utilization_std': np.std(avg_utilization_list),
            'all_link_avg_utilization_99th': np.percentile(avg_utilization_list, 99),
            'all_link_avg_utilization_99.9th': np.percentile(avg_utilization_list, 99.9),
            'all_link_avg_utilization_max': np.max(avg_utilization_list),
        }

    # Print raw results
    output_filename = analysis_folder_dir + '/link_info.statistics'
    print('Writing link statistics: %s' % output_filename)
    with open(output_filename, 'w+') as outfile:
        for key, value in sorted(statistics.items()):
            outfile.write(str(key) + "=" + str(value) + "\n")
예제 #2
0
파일: analyze.py 프로젝트: snkas/floodns
def analyze_flow_info(logs_floodns_dir, analysis_folder_dir):

    # Read in all the columns
    flows_info_csv_columns = exputil.read_csv_direct_in_columns(
        logs_floodns_dir + '/flow_info.csv',
        "pos_int,pos_int,pos_int,string,pos_int,pos_int,pos_int,pos_float,pos_float,string"
    )
    flow_id_list = flows_info_csv_columns[0]
    source_id_list = flows_info_csv_columns[1]
    target_id_list = flows_info_csv_columns[2]
    path_list = flows_info_csv_columns[3]
    path_length_list = list(map(lambda x: len(x.split(">")) - 1, path_list))
    # start_time_list = flows_info_csv_columns[4]
    # end_time_list = flows_info_csv_columns[5]
    # duration_list = flows_info_csv_columns[6]
    # total_sent_list = flows_info_csv_columns[7]
    avg_throughput_list = flows_info_csv_columns[8]
    # metadata_list = flows_info_csv_columns[9]

    # Calculate some statistics
    if len(flow_id_list) == 0:
        statistics = {
            'all_num_flows': len(flow_id_list)
        }
        
    else:
        statistics = {
            'all_num_flows': len(flow_id_list),
            'all_flow_num_unique_sources': len(set(source_id_list)),
            'all_flow_num_unique_targets': len(set(target_id_list)),
    
            'all_flow_avg_throughput_sum': sum(avg_throughput_list),
            'all_flow_avg_throughput_min': np.min(avg_throughput_list),
            'all_flow_avg_throughput_0.1th': np.percentile(avg_throughput_list, 0.1),
            'all_flow_avg_throughput_1th': np.percentile(avg_throughput_list, 1),
            'all_flow_avg_throughput_mean': np.mean(avg_throughput_list),
            'all_flow_avg_throughput_median': np.median(avg_throughput_list),
            'all_flow_avg_throughput_99th': np.percentile(avg_throughput_list, 99),
            'all_flow_avg_throughput_99.9th': np.percentile(avg_throughput_list, 99.9),
            'all_flow_avg_throughput_max': np.max(avg_throughput_list),
    
            'all_flow_path_length_min': np.min(path_length_list),
            'all_flow_path_length_0.1th': np.percentile(path_length_list, 0.1),
            'all_flow_path_length_1th': np.percentile(path_length_list, 1),
            'all_flow_path_length_mean': np.mean(path_length_list),
            'all_flow_path_length_median': np.median(path_length_list),
            'all_flow_path_length_99th': np.percentile(path_length_list, 99),
            'all_flow_path_length_99.9th': np.percentile(path_length_list, 99.9),
            'all_flow_path_length_max': np.max(path_length_list),
        }
    
    # Print results
    output_filename = analysis_folder_dir + '/flow_info.statistics'
    print('Writing flow statistics: ' + output_filename)
    with open(output_filename, 'w+') as outfile:
        for key, value in sorted(statistics.items()):
            outfile.write(str(key) + "=" + str(value) + "\n")
예제 #3
0
파일: analyze.py 프로젝트: snkas/floodns
def analyze_node_info(logs_floodns_dir, analysis_folder_dir):

    # Read in all the columns
    link_info_csv_columns = exputil.read_csv_direct_in_columns(
        logs_floodns_dir + '/node_info.csv',
        "pos_int,pos_float,string"
    )
    node_id_list = link_info_csv_columns[0]
    avg_active_flows_list = link_info_csv_columns[1]
    # metadata_list = link_info_csv_columns[2]

    # Count how many nodes did not see any flows
    num_node_inactive = 0
    num_node_active = 0
    for a in avg_active_flows_list:
        if a == 0:
            num_node_inactive += 1
        else:
            num_node_active += 1

    # Calculate some statistics
    if len(node_id_list) == 0:
        statistics = {
            'all_num_nodes': len(node_id_list),
        }
    else:

        # General statistics
        statistics = {
            'all_num_nodes': len(node_id_list),
            'all_num_nodes_active': num_node_active,
            'all_num_nodes_inactive': num_node_inactive,

            'all_node_avg_num_active_flows_min': np.min(avg_active_flows_list),
            'all_node_avg_num_active_flows_1th': np.percentile(avg_active_flows_list, 1),
            'all_node_avg_num_active_flows_0.1th': np.percentile(avg_active_flows_list, 0.1),
            'all_node_avg_num_active_flows_mean': np.mean(avg_active_flows_list),
            'all_node_avg_num_active_flows_median': np.median(avg_active_flows_list),
            'all_node_avg_num_active_flows_std': np.std(avg_active_flows_list),
            'all_node_avg_num_active_flows_99th': np.percentile(avg_active_flows_list, 99),
            'all_node_avg_num_active_flows_99.9th': np.percentile(avg_active_flows_list, 99.9),
            'all_node_avg_num_active_flows_max': np.max(avg_active_flows_list),
        }

    # Print raw results
    output_filename = analysis_folder_dir + '/node_info.statistics'
    print('Writing node statistics: %s' % output_filename)
    with open(output_filename, 'w+') as outfile:
        for key, value in sorted(statistics.items()):
            outfile.write(str(key) + "=" + str(value) + "\n")
    def test_end_to_end(self):
        local_shell = exputil.LocalShell()

        # Clean slate start
        local_shell.remove_force_recursive("temp_gen_data")
        local_shell.make_full_dir("temp_gen_data")

        # Both dynamic state algorithms should yield the same path and RTT
        for dynamic_state_algorithm in [
            "algorithm_free_one_only_over_isls",
            "algorithm_free_gs_one_sat_many_only_over_isls"
        ]:

            # Specific outcomes
            output_generated_data_dir = "temp_gen_data"
            num_threads = 1
            default_time_step_ms = 100
            all_time_step_ms = [50, 100, 1000, 10000]
            duration_s = 200

            # Add base name to setting
            name = "reduced_kuiper_630_" + dynamic_state_algorithm

            # Path trace we base this test on:
            # 0,1173-184-183-217-1241
            # 18000000000,1173-218-217-1241
            # 27600000000,1173-648-649-650-616-1241
            # 74300000000,1173-218-217-216-250-1241
            # 125900000000,1173-647-648-649-650-616-1241
            # 128700000000,1173-647-648-649-615-1241

            # Create output directories
            if not os.path.isdir(output_generated_data_dir):
                os.makedirs(output_generated_data_dir)
            if not os.path.isdir(output_generated_data_dir + "/" + name):
                os.makedirs(output_generated_data_dir + "/" + name)

            # Ground stations
            print("Generating ground stations...")
            with open(output_generated_data_dir + "/" + name + "/ground_stations.basic.txt", "w+") as f_out:
                f_out.write("0,Manila,14.6042,120.9822,0\n")  # Originally no. 17
                f_out.write("1,Dalian,38.913811,121.602322,0\n")  # Originally no. 85
            satgen.extend_ground_stations(
                output_generated_data_dir + "/" + name + "/ground_stations.basic.txt",
                output_generated_data_dir + "/" + name + "/ground_stations.txt"
            )

            # TLEs (taken from Kuiper-610 first shell)
            print("Generating TLEs...")
            with open(output_generated_data_dir + "/" + name + "/tles.txt", "w+") as f_out:
                f_out.write("1 12\n")  # Pretend it's one orbit with 12 satellites
                f_out.write("Kuiper-630 0\n")  # 183
                f_out.write("1 00184U 00000ABC 00001.00000000  .00000000  00000-0  00000+0 0    06\n")
                f_out.write("2 00184  51.9000  52.9412 0000001   0.0000 142.9412 14.80000000    00\n")
                f_out.write("Kuiper-630 1\n")  # 184
                f_out.write("1 00185U 00000ABC 00001.00000000  .00000000  00000-0  00000+0 0    07\n")
                f_out.write("2 00185  51.9000  52.9412 0000001   0.0000 153.5294 14.80000000    07\n")
                f_out.write("Kuiper-630 2\n")  # 216
                f_out.write("1 00217U 00000ABC 00001.00000000  .00000000  00000-0  00000+0 0    03\n")
                f_out.write("2 00217  51.9000  63.5294 0000001   0.0000 127.0588 14.80000000    01\n")
                f_out.write("Kuiper-630 3\n")  # 217
                f_out.write("1 00218U 00000ABC 00001.00000000  .00000000  00000-0  00000+0 0    04\n")
                f_out.write("2 00218  51.9000  63.5294 0000001   0.0000 137.6471 14.80000000    00\n")
                f_out.write("Kuiper-630 4\n")  # 218
                f_out.write("1 00219U 00000ABC 00001.00000000  .00000000  00000-0  00000+0 0    05\n")
                f_out.write("2 00219  51.9000  63.5294 0000001   0.0000 148.2353 14.80000000    08\n")
                f_out.write("Kuiper-630 5\n")  # 250
                f_out.write("1 00251U 00000ABC 00001.00000000  .00000000  00000-0  00000+0 0    01\n")
                f_out.write("2 00251  51.9000  74.1176 0000001   0.0000 132.3529 14.80000000    00\n")
                f_out.write("Kuiper-630 6\n")  # 615
                f_out.write("1 00616U 00000ABC 00001.00000000  .00000000  00000-0  00000+0 0    06\n")
                f_out.write("2 00616  51.9000 190.5882 0000001   0.0000  31.7647 14.80000000    05\n")
                f_out.write("Kuiper-630 7\n")  # 616
                f_out.write("1 00617U 00000ABC 00001.00000000  .00000000  00000-0  00000+0 0    07\n")
                f_out.write("2 00617  51.9000 190.5882 0000001   0.0000  42.3529 14.80000000    03\n")
                f_out.write("Kuiper-630 8\n")  # 647
                f_out.write("1 00648U 00000ABC 00001.00000000  .00000000  00000-0  00000+0 0    01\n")
                f_out.write("2 00648  51.9000 201.1765 0000001   0.0000  15.8824 14.80000000    09\n")
                f_out.write("Kuiper-630 9\n")  # 648
                f_out.write("1 00649U 00000ABC 00001.00000000  .00000000  00000-0  00000+0 0    02\n")
                f_out.write("2 00649  51.9000 201.1765 0000001   0.0000  26.4706 14.80000000    07\n")
                f_out.write("Kuiper-630 10\n")  # 649
                f_out.write("1 00650U 00000ABC 00001.00000000  .00000000  00000-0  00000+0 0    04\n")
                f_out.write("2 00650  51.9000 201.1765 0000001   0.0000  37.0588 14.80000000    05\n")
                f_out.write("Kuiper-630 11\n")  # 650
                f_out.write("1 00651U 00000ABC 00001.00000000  .00000000  00000-0  00000+0 0    05\n")
                f_out.write("2 00651  51.9000 201.1765 0000001   0.0000  47.6471 14.80000000    04\n")

            # Nodes
            #
            # Original ID   Test ID
            # 183           0
            # 184           1
            # 216           2
            # 217           3
            # 218           4
            # 250           5
            # 615           6
            # 616           7
            # 647           8
            # 648           9
            # 649           10
            # 650           11
            #
            # ISLs
            #
            # Original      Test
            # 183-184       0-1
            # 183-217       0-3
            # 216-217       2-3
            # 216-250       2-5
            # 217-218       3-4
            # 615-649       6-10
            # 616-650       7-11
            # 647-648       8-9
            # 648-649       9-10
            # 649-650       10-11
            #
            # Necessary ISLs (above) inferred from trace:
            #
            # 0,1173-184-183-217-1241
            # 18000000000,1173-218-217-1241
            # 27600000000,1173-648-649-650-616-1241
            # 74300000000,1173-218-217-216-250-1241
            # 125900000000,1173-647-648-649-650-616-1241
            # 128700000000,1173-647-648-649-615-1241
            #
            print("Generating ISLs...")
            with open(output_generated_data_dir + "/" + name + "/isls.txt", "w+") as f_out:
                f_out.write("0 1\n")
                f_out.write("0 3\n")
                f_out.write("2 3\n")
                f_out.write("2 5\n")
                f_out.write("3 4\n")
                f_out.write("6 10\n")
                f_out.write("7 11\n")
                f_out.write("8 9\n")
                f_out.write("9 10\n")
                f_out.write("10 11\n")

            # Description
            print("Generating description...")
            satgen.generate_description(
                output_generated_data_dir + "/" + name + "/description.txt",
                MAX_GSL_LENGTH_M,
                MAX_ISL_LENGTH_M
            )

            # Extended ground stations
            ground_stations = satgen.read_ground_stations_extended(
                output_generated_data_dir + "/" + name + "/ground_stations.txt"
            )

            # GSL interfaces
            if dynamic_state_algorithm == "algorithm_free_one_only_over_isls":
                gsl_interfaces_per_satellite = 1
                gsl_satellite_max_agg_bandwidth = 1.0
            elif dynamic_state_algorithm == "algorithm_free_gs_one_sat_many_only_over_isls":
                gsl_interfaces_per_satellite = len(ground_stations)
                gsl_satellite_max_agg_bandwidth = len(ground_stations)
            else:
                raise ValueError("Unknown dynamic state algorithm: " + dynamic_state_algorithm)
            print("Generating GSL interfaces info..")
            satgen.generate_simple_gsl_interfaces_info(
                output_generated_data_dir + "/" + name + "/gsl_interfaces_info.txt",
                12,  # 12 satellites
                len(ground_stations),
                gsl_interfaces_per_satellite,  # GSL interfaces per satellite
                1,  # (GSL) Interfaces per ground station
                gsl_satellite_max_agg_bandwidth,  # Aggregate max. bandwidth satellite (unit unspecified)
                1   # Aggregate max. bandwidth ground station (same unspecified unit)
            )

            # Forwarding state
            for time_step_ms in all_time_step_ms:
                print("Generating forwarding state...")
                satgen.help_dynamic_state(
                    output_generated_data_dir,
                    num_threads,
                    name,
                    time_step_ms,
                    duration_s,
                    MAX_GSL_LENGTH_M,
                    MAX_ISL_LENGTH_M,
                    dynamic_state_algorithm,
                    False
                )

            # Clean slate start
            local_shell.remove_force_recursive("temp_analysis_data")
            local_shell.make_full_dir("temp_analysis_data")
            output_analysis_data_dir = "temp_analysis_data"
            satgen.post_analysis.print_routes_and_rtt(
                output_analysis_data_dir + "/" + name,
                output_generated_data_dir + "/" + name,
                default_time_step_ms,
                duration_s,
                12,
                13,
                ""
            )

            # Now, we just want to see that the output path matches
            with open(output_analysis_data_dir + "/" + name + "/data/networkx_path_12_to_13.txt", "r") as f_in:
                i = 0
                for line in f_in:
                    line = line.strip()
                    if i == 0:
                        self.assertEqual(line, "0,12-1-0-3-13")
                    elif i == 1:
                        self.assertEqual(line, "18000000000,12-4-3-13")
                    elif i == 2:
                        self.assertEqual(line, "27600000000,12-9-10-11-7-13")
                    elif i == 3:
                        self.assertEqual(line, "74300000000,12-4-3-2-5-13")
                    elif i == 4:
                        self.assertEqual(line, "125900000000,12-8-9-10-11-7-13")
                    elif i == 5:
                        self.assertEqual(line, "128700000000,12-8-9-10-6-13")
                    else:
                        self.fail()
                    i += 1

            # ... and the RTT
            with open(output_analysis_data_dir + "/" + name + "/data/networkx_rtt_12_to_13.txt", "r") as f_in1:
                with open("tests/data_to_match/kuiper_630/networkx_rtt_1173_to_1241.txt", "r") as f_in2:
                    lines1 = []
                    for line in f_in1:
                        lines1.append(line.strip())
                    lines2 = []
                    for line in f_in2:
                        lines2.append(line.strip())

                    # Too computationally costly, so the below is equivalent: self.assertEqual(lines1, lines2)
                    self.assertEqual(len(lines1), len(lines2))
                    for i in range(len(lines1)):
                        a_spl = lines1[i].split(",")
                        b_spl = lines2[i].split(",")
                        self.assertEqual(len(a_spl), len(b_spl))
                        self.assertEqual(len(a_spl), 2)
                        a_time = int(a_spl[0])
                        b_time = int(b_spl[0])
                        a_rtt = float(a_spl[1])
                        b_rtt = float(b_spl[1])
                        self.assertEqual(a_time, b_time)
                        self.assertAlmostEqual(a_rtt, b_rtt, places=6)

            # Now let's run all analyses available

            # TODO: Disabled because it requires downloading files from CDNs, which can take too long
            # # Print graphically
            #
            # satgen.post_analysis.print_graphical_routes_and_rtt(
            #     output_analysis_data_dir + "/" + name,
            #     output_generated_data_dir + "/" + name,
            #     default_time_step_ms,
            #     duration_s,
            #     12,
            #     13
            # )

            # Analyze paths
            satgen.post_analysis.analyze_path(
                output_analysis_data_dir + "/" + name,
                output_generated_data_dir + "/" + name,
                default_time_step_ms,
                duration_s,
                ""
            )

            # Number of path changes per pair
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name +
                "/" + name + "/100ms_for_200s/path/data/ecdf_pairs_num_path_changes.txt",
                "float,pos_float"
            )
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                else:
                    self.assertEqual(columns[1][i], 1.0)

                # Only one pair with 5 path changes
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                else:
                    self.assertEqual(columns[0][i], 5)

            # Max minus min hop count per pair
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name +
                "/" + name + "/100ms_for_200s/path/data/ecdf_pairs_max_minus_min_hop_count.txt",
                "float,pos_float"
            )
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                else:
                    self.assertEqual(columns[1][i], 1.0)

                # Shortest is 3 hops, longest is 6 hops, max delta is 3
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                else:
                    self.assertEqual(columns[0][i], 3)

            # Max divided by min hop count per pair
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name +
                "/" + name + "/100ms_for_200s/path/data/ecdf_pairs_max_hop_count_to_min_hop_count.txt",
                "float,pos_float"
            )
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                else:
                    self.assertEqual(columns[1][i], 1.0)

                # Shortest is 3 hops, longest is 6 hops, max/min division is 2.0
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                else:
                    self.assertEqual(columns[0][i], 2.0)

            # For all pairs, the distribution how many times they changed path
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name +
                "/" + name + "/100ms_for_200s/path/data/ecdf_time_step_num_path_changes.txt",
                "float,pos_float"
            )
            start_cumulative = 0.0
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], start_cumulative)
                else:
                    self.assertGreater(columns[1][i], start_cumulative)
                if i - 1 == range(len(columns[0])):
                    self.assertEqual(columns[1][i], 1.0)

                # There are only 5 time moments, none of which overlap, so this needs to be 5 times 1
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                elif i > 2000 - 6:
                    self.assertEqual(columns[0][i], 1.0)
                else:
                    self.assertEqual(columns[0][i], 0)

            # Analyze RTTs
            satgen.post_analysis.analyze_rtt(
                output_analysis_data_dir + "/" + name,
                output_generated_data_dir + "/" + name,
                default_time_step_ms,
                duration_s,
                ""
            )

            # Min. RTT
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name +
                "/" + name + "/100ms_for_200s/rtt/data/ecdf_pairs_min_rtt_ns.txt",
                "float,pos_float"
            )
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                else:
                    self.assertEqual(columns[1][i], 1.0)

                # Only one pair with minimum RTT 25ish
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                else:
                    self.assertAlmostEqual(columns[0][i], 25229775.250687573, delta=100)

            # Max. RTT
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name +
                "/" + name + "/100ms_for_200s/rtt/data/ecdf_pairs_max_rtt_ns.txt",
                "float,pos_float"
            )
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                else:
                    self.assertEqual(columns[1][i], 1.0)

                # Only one pair with max. RTT 48ish
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                else:
                    self.assertAlmostEqual(columns[0][i], 48165140.010532916, delta=100)

            # Max. - Min. RTT
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name +
                "/" + name + "/100ms_for_200s/rtt/data/ecdf_pairs_max_minus_min_rtt_ns.txt",
                "float,pos_float"
            )
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                else:
                    self.assertEqual(columns[1][i], 1.0)

                # Only one pair with minimum RTT of 25ish, max. RTT is 48ish
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                else:
                    self.assertAlmostEqual(columns[0][i], 48165140.010532916 - 25229775.250687573, delta=100)

            # Max. / Min. RTT
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name +
                "/" + name + "/100ms_for_200s/rtt/data/ecdf_pairs_max_rtt_to_min_rtt_slowdown.txt",
                "float,pos_float"
            )
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                else:
                    self.assertEqual(columns[1][i], 1.0)

                # Only one pair with minimum RTT of 25ish, max. RTT is 48ish
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                else:
                    self.assertAlmostEqual(columns[0][i], 48165140.010532916 / 25229775.250687573, delta=0.01)

            # Geodesic slowdown
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name +
                "/" + name + "/100ms_for_200s/rtt/data/ecdf_pairs_max_rtt_to_geodesic_slowdown.txt",
                "float,pos_float"
            )
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                else:
                    self.assertEqual(columns[1][i], 1.0)

                # Distance Manila to Dalian is 2,703 km according to Google Maps, RTT = 2*D / c
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                else:
                    self.assertAlmostEqual(columns[0][i], 48165140.010532916 / (2 * 2703000 / 0.299792), delta=0.01)

            # Analyze time step paths
            satgen.post_analysis.analyze_time_step_path(
                output_analysis_data_dir + "/" + name,
                output_generated_data_dir + "/" + name,
                all_time_step_ms,
                duration_s
            )

            # Missed path changes
            for time_step_ms in all_time_step_ms:
                columns = exputil.read_csv_direct_in_columns(
                    output_analysis_data_dir + "/" + name +
                    "/" + name + "/200s/path/data/"
                    + "ecdf_pairs_" + str(time_step_ms) + "ms_missed_path_changes.txt",
                    "float,pos_float"
                )
                for i in range(len(columns[0])):

                    # Cumulative y-axis check
                    if i == 0:
                        self.assertEqual(columns[1][i], 0)
                    else:
                        self.assertEqual(columns[1][i], 1.0)

                    # Only one should have missed for the 10s one
                    if i == 0:
                        self.assertEqual(columns[0][i], float("-inf"))
                    else:
                        if time_step_ms == 10000:
                            self.assertEqual(columns[0][i], 1)
                        else:
                            self.assertEqual(columns[0][i], 0)

            # Time between path changes
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name +
                "/" + name + "/200s/path/data/"
                + "ecdf_overall_time_between_path_change.txt",
                "float,pos_float"
            )
            self.assertEqual(len(columns[0]), 5)  # Total 5 path changes, but only 4 of them are not from epoch
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                else:
                    if i == 1:
                        self.assertEqual(columns[1][i], 0.25)
                    elif i == 2:
                        self.assertEqual(columns[1][i], 0.5)
                    elif i == 3:
                        self.assertEqual(columns[1][i], 0.75)
                    elif i == 4:
                        self.assertEqual(columns[1][i], 1.0)

                # Gap values
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                else:
                    if i == 1:
                        self.assertEqual(columns[0][i], 2750000000)
                    elif i == 2:
                        self.assertEqual(columns[0][i], 9600000000)
                    elif i == 3:
                        self.assertEqual(columns[0][i], 46700000000)
                    elif i == 4:
                        self.assertEqual(columns[0][i], 51650000000)

            # Clean up
            local_shell.remove_force_recursive("temp_gen_data")
            local_shell.remove_force_recursive("temp_analysis_data")
    def test_end_to_end(self):
        local_shell = exputil.LocalShell()

        # Clean slate start
        local_shell.remove_force_recursive("temp_gen_data")
        local_shell.make_full_dir("temp_gen_data")

        # Both dynamic state algorithms should yield the same path and RTT
        for dynamic_state_algorithm in [
                "algorithm_free_one_only_over_isls",
                "algorithm_free_gs_one_sat_many_only_over_isls"
        ]:

            # Specific outcomes
            output_generated_data_dir = "temp_gen_data"
            num_threads = 1
            default_time_step_ms = 100
            all_time_step_ms = [50, 100, 1000, 10000, 20000]
            duration_s = 200

            # Add base name to setting
            name = "triangle_reduced_kuiper_630_" + dynamic_state_algorithm

            # Create output directories
            if not os.path.isdir(output_generated_data_dir):
                os.makedirs(output_generated_data_dir)
            if not os.path.isdir(output_generated_data_dir + "/" + name):
                os.makedirs(output_generated_data_dir + "/" + name)

            # Ground stations
            print("Generating ground stations...")
            with open(
                    output_generated_data_dir + "/" + name +
                    "/ground_stations.basic.txt", "w+") as f_out:
                f_out.write(
                    "0,Manila,14.6042,120.9822,0\n")  # Originally no. 17
                f_out.write(
                    "1,Dalian,38.913811,121.602322,0\n")  # Originally no. 85
                f_out.write(
                    "2,Sankt-Peterburg-(Saint-Petersburg),59.929858,30.326228,0\n"
                )  # Originally no. 73
            satgen.extend_ground_stations(
                output_generated_data_dir + "/" + name +
                "/ground_stations.basic.txt", output_generated_data_dir + "/" +
                name + "/ground_stations.txt")

            # Path trace we base this test on:

            # (1) 1173 -> 1241
            # 0,1173-184-183-217-1241
            # 18000000000,1173-218-217-1241
            # 27600000000,1173-648-649-650-616-1241
            # 74300000000,1173-218-217-216-250-1241
            # 125900000000,1173-647-648-649-650-616-1241
            # 128700000000,1173-647-648-649-615-1241

            # (2) 1229 -> 1241
            # 0,1229-144-178-212-246-280-281-282-283-1241
            # 3300000000,1229-177-178-212-246-280-281-282-283-1241
            # 10100000000,1229-177-178-212-246-247-248-249-1241
            # 128700000000,1229-177-211-245-246-247-248-249-1241
            # 139500000000,1229-144-178-212-246-247-248-249-1241
            # 155400000000,Unreachable
            # 165200000000,1229-143-177-211-245-279-280-281-282-1241
            # 178800000000,1229-176-177-211-245-279-280-281-282-1241

            # (3) 1229 -> 1173
            # 0,1229-144-178-179-180-181-182-183-184-1173
            # 3300000000,1229-177-178-179-180-181-182-183-184-1173
            # 139500000000,1229-144-178-179-180-181-182-183-184-1173
            # 150100000000,1229-144-178-179-180-181-182-183-1173
            # 155400000000,Unreachable
            # 165200000000,1229-143-177-178-179-180-181-182-183-1173
            # 178800000000,1229-176-177-178-179-180-181-182-183-1173

            # Select all satellite IDs
            subset_of_satellites = set()
            for path_filename in [
                    "tests/data_to_match/kuiper_630/networkx_path_1173_to_1241.txt",
                    "tests/data_to_match/kuiper_630/networkx_path_1229_to_1173.txt",
                    "tests/data_to_match/kuiper_630/networkx_path_1229_to_1241.txt",
            ]:
                columns = exputil.read_csv_direct_in_columns(
                    path_filename, "pos_int,string")
                for path in columns[1]:
                    if path != "Unreachable":
                        for sat_id in list(
                                map(lambda x: int(x),
                                    path.split("-")[1:-1])):
                            subset_of_satellites.add(sat_id)
            list_of_satellites = sorted(list(subset_of_satellites))
            original_sat_id_to_new_sat_id = {}
            for i in range(len(list_of_satellites)):
                original_sat_id_to_new_sat_id[list_of_satellites[i]] = i

            # Generate normal TLEs and then only filter out the limited satellite list
            print("Generating TLEs...")
            satgen.generate_tles_from_scratch_manual(
                output_generated_data_dir + "/" + name + "/tles_complete.txt",
                NICE_NAME, NUM_ORBS, NUM_SATS_PER_ORB, PHASE_DIFF,
                INCLINATION_DEGREE, ECCENTRICITY, ARG_OF_PERIGEE_DEGREE,
                MEAN_MOTION_REV_PER_DAY)
            with open(
                    output_generated_data_dir + "/" + name +
                    "/tles_complete.txt", "r") as f_in:
                with open(output_generated_data_dir + "/" + name + "/tles.txt",
                          "w+") as f_out:
                    f_out.write(
                        "1 %d\n" % len(list_of_satellites)
                    )  # Pretend its one orbit with N satellites simply
                    i = 0
                    for line in f_in:
                        line = line.strip()
                        if int(math.floor(
                            (i - 1) / 3.0)) in list_of_satellites:
                            if (i - 1) % 3 == 0:
                                f_out.write("%s %d\n" %
                                            (line.split(" ")[0],
                                             original_sat_id_to_new_sat_id[int(
                                                 line.split(" ")[1])]))
                            else:
                                f_out.write("%s\n" % line)
                        i += 1

            # ISLs
            print("Generating ISLs...")
            complete_list_isls = satgen.generate_plus_grid_isls(
                output_generated_data_dir + "/" + name +
                "/isls_complete.temp.txt",
                NUM_ORBS,
                NUM_SATS_PER_ORB,
                isl_shift=0,
                idx_offset=0)
            with open(output_generated_data_dir + "/" + name + "/isls.txt",
                      "w+") as f_out:
                for isl in complete_list_isls:
                    if isl[0] in list_of_satellites and isl[
                            1] in list_of_satellites:
                        f_out.write("%d %d\n" %
                                    (original_sat_id_to_new_sat_id[isl[0]],
                                     original_sat_id_to_new_sat_id[isl[1]]))

            # Description
            print("Generating description...")
            satgen.generate_description(
                output_generated_data_dir + "/" + name + "/description.txt",
                MAX_GSL_LENGTH_M, MAX_ISL_LENGTH_M)

            # Extended ground stations
            ground_stations = satgen.read_ground_stations_extended(
                output_generated_data_dir + "/" + name +
                "/ground_stations.txt")

            # GSL interfaces
            if dynamic_state_algorithm == "algorithm_free_one_only_over_isls":
                gsl_interfaces_per_satellite = 1
                gsl_satellite_max_agg_bandwidth = 1.0
            elif dynamic_state_algorithm == "algorithm_free_gs_one_sat_many_only_over_isls":
                gsl_interfaces_per_satellite = len(ground_stations)
                gsl_satellite_max_agg_bandwidth = len(ground_stations)
            else:
                raise ValueError("Unknown dynamic state algorithm: " +
                                 dynamic_state_algorithm)
            print("Generating GSL interfaces info..")
            satgen.generate_simple_gsl_interfaces_info(
                output_generated_data_dir + "/" + name +
                "/gsl_interfaces_info.txt",
                len(list_of_satellites),  # N satellites
                len(ground_stations),
                gsl_interfaces_per_satellite,  # GSL interfaces per satellite
                1,  # (GSL) Interfaces per ground station
                gsl_satellite_max_agg_bandwidth,  # Aggregate max. bandwidth satellite (unit unspecified)
                1  # Aggregate max. bandwidth ground station (same unspecified unit)
            )

            # Forwarding state
            for time_step_ms in all_time_step_ms:
                print("Generating forwarding state...")
                satgen.help_dynamic_state(output_generated_data_dir,
                                          num_threads, name, time_step_ms,
                                          duration_s, MAX_GSL_LENGTH_M,
                                          MAX_ISL_LENGTH_M,
                                          dynamic_state_algorithm, False)

            # Clean slate start
            local_shell.remove_force_recursive("temp_analysis_data")
            local_shell.make_full_dir("temp_analysis_data")
            output_analysis_data_dir = "temp_analysis_data"

            # Check the path and RTT for each pair
            new_gs_id_to_old_node_id = {0: 1173, 1: 1241, 2: 1229}
            old_node_id_to_new_node_id = {
                1173: len(list_of_satellites) + 0,
                1241: len(list_of_satellites) + 1,
                1229: len(list_of_satellites) + 2,
            }
            min_rtts = []
            max_rtts = []
            for (src, dst) in [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]:

                # Find node identifiers
                src_node_id = len(list_of_satellites) + src
                dst_node_id = len(list_of_satellites) + dst
                old_src_node_id = new_gs_id_to_old_node_id[src]
                old_dst_node_id = new_gs_id_to_old_node_id[dst]

                # Print the routes
                satgen.post_analysis.print_routes_and_rtt(
                    output_analysis_data_dir + "/" + name,
                    output_generated_data_dir + "/" + name,
                    default_time_step_ms, duration_s, src_node_id, dst_node_id,
                    "")

                # Now, we just want to see that the output path matches
                with open(
                        output_analysis_data_dir + "/" + name +
                        "/data/networkx_path_%d_to_%d.txt" %
                    (src_node_id, dst_node_id), "r") as f_in1:
                    with open(
                            "tests/data_to_match/kuiper_630/networkx_path_%d_to_%d.txt"
                            % (old_src_node_id, old_dst_node_id),
                            "r") as f_in2:
                        lines1 = []
                        for line in f_in1:
                            lines1.append(line.strip())
                        lines2 = []
                        for line in f_in2:
                            lines2.append(line.strip())
                        self.assertEqual(len(lines1), len(lines2))
                        for i in range(len(lines1)):
                            spl1 = lines1[i].split(",")
                            spl2 = lines2[i].split(",")

                            # Time must be equal
                            self.assertEqual(spl1[0], spl2[0])

                            # Path must be equal
                            if spl1[1] == "Unreachable" or spl2[
                                    1] == "Unreachable":
                                self.assertEqual(spl1[1], spl2[1])
                            else:
                                node_list1 = list(
                                    map(lambda x: int(x), spl1[1].split("-")))
                                node_list2 = list(
                                    map(lambda x: int(x), spl2[1].split("-")))
                                new_node_list2 = []
                                for j in range(len(node_list2)):
                                    if j == 0 or j == len(node_list2) - 1:
                                        new_node_list2.append(
                                            old_node_id_to_new_node_id[
                                                node_list2[j]])
                                    else:
                                        new_node_list2.append(
                                            original_sat_id_to_new_sat_id[
                                                node_list2[j]])
                                self.assertEqual(node_list1, new_node_list2)

                # ... and the RTT
                lowest_rtt_ns = 100000000000
                highest_rtt_ns = 0
                with open(
                        output_analysis_data_dir + "/" + name +
                        "/data/networkx_rtt_%d_to_%d.txt" %
                    (src_node_id, dst_node_id), "r") as f_in1:
                    with open(
                            "tests/data_to_match/kuiper_630/networkx_rtt_%d_to_%d.txt"
                            % (old_src_node_id, old_dst_node_id),
                            "r") as f_in2:
                        lines1 = []
                        for line in f_in1:
                            lines1.append(line.strip())
                        lines2 = []
                        for line in f_in2:
                            lines2.append(line.strip())

                        # Too computationally costly, so the below is equivalent: self.assertEqual(lines1, lines2)
                        self.assertEqual(len(lines1), len(lines2))
                        for i in range(len(lines1)):
                            a_spl = lines1[i].split(",")
                            b_spl = lines2[i].split(",")
                            self.assertEqual(len(a_spl), len(b_spl))
                            self.assertEqual(len(a_spl), 2)
                            a_time = int(a_spl[0])
                            b_time = int(b_spl[0])
                            a_rtt = float(a_spl[1])
                            b_rtt = float(b_spl[1])
                            if a_rtt != 0:
                                lowest_rtt_ns = min(a_rtt, lowest_rtt_ns)
                                highest_rtt_ns = max(a_rtt, highest_rtt_ns)
                            self.assertEqual(a_time, b_time)
                            self.assertAlmostEqual(a_rtt, b_rtt, places=5)

                # Save RTTs
                if src < dst:
                    min_rtts.append(lowest_rtt_ns)
                    max_rtts.append(highest_rtt_ns)

            # Now let's run all analyses available

            # TODO: Disabled because it requires downloading files from CDNs, which can take too long
            # # Print graphically
            #
            # satgen.post_analysis.print_graphical_routes_and_rtt(
            #     output_analysis_data_dir + "/" + name,
            #     output_generated_data_dir + "/" + name,
            #     default_time_step_ms,
            #     duration_s,
            #     12,
            #     13
            # )

            # Analyze paths
            satgen.post_analysis.analyze_path(
                output_analysis_data_dir + "/" + name,
                output_generated_data_dir + "/" + name, default_time_step_ms,
                duration_s, "")

            # Number of path changes per pair
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name + "/" + name +
                "/100ms_for_200s/path/data/ecdf_pairs_num_path_changes.txt",
                "float,pos_float")
            self.assertEqual(4, len(columns[0]))
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                elif i == 1:
                    self.assertAlmostEqual(columns[1][i],
                                           1.0 / 3.0,
                                           delta=0.0001)
                elif i == 2:
                    self.assertAlmostEqual(columns[1][i],
                                           2.0 / 3.0,
                                           delta=0.0001)
                else:
                    self.assertEqual(columns[1][i], 1.0)

                # There are three pairs, with 5, 6 and 7 path changes
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                elif i == 1:
                    self.assertEqual(columns[0][i], 5.0)
                elif i == 2:
                    self.assertEqual(columns[0][i], 6.0)
                else:
                    self.assertEqual(columns[0][i], 7.0)

            # Max minus min hop count per pair
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name + "/" + name +
                "/100ms_for_200s/path/data/ecdf_pairs_max_minus_min_hop_count.txt",
                "float,pos_float")
            self.assertEqual(4, len(columns[0]))
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                elif i == 1:
                    self.assertAlmostEqual(columns[1][i],
                                           1.0 / 3.0,
                                           delta=0.0001)
                elif i == 2:
                    self.assertAlmostEqual(columns[1][i],
                                           2.0 / 3.0,
                                           delta=0.0001)
                else:
                    self.assertEqual(columns[1][i], 1.0)

                # One with 3 vs. 6, and two with 8 vs. 9
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                elif i == 1:
                    self.assertEqual(columns[0][i], 1)
                elif i == 2:
                    self.assertEqual(columns[0][i], 1)
                else:
                    self.assertEqual(columns[0][i], 3)

            # Max divided by min hop count per pair
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name + "/" + name +
                "/100ms_for_200s/path/data/ecdf_pairs_max_hop_count_to_min_hop_count.txt",
                "float,pos_float")
            self.assertEqual(4, len(columns[0]))
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                elif i == 1:
                    self.assertAlmostEqual(columns[1][i],
                                           1.0 / 3.0,
                                           delta=0.0001)
                elif i == 2:
                    self.assertAlmostEqual(columns[1][i],
                                           2.0 / 3.0,
                                           delta=0.0001)
                else:
                    self.assertEqual(columns[1][i], 1.0)

                # One with 3 vs. 6, and two with 8 vs. 9
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                elif i == 1:
                    self.assertAlmostEqual(columns[0][i],
                                           9.0 / 8.0,
                                           delta=0.0001)
                elif i == 2:
                    self.assertAlmostEqual(columns[0][i],
                                           9.0 / 8.0,
                                           delta=0.0001)
                else:
                    self.assertEqual(columns[0][i], 2.0)

            # These are the path changes

            # 18000000000,1173-218-217-1241

            # 27600000000,1173-648-649-650-616-1241

            # 3300000000,1229-177-178-179-180-181-182-183-184-1173
            # 3300000000,1229-177-178-212-246-280-281-282-283-1241

            # 74300000000,1173-218-217-216-250-1241

            # 10100000000,1229-177-178-212-246-247-248-249-1241

            # 125900000000,1173-647-648-649-650-616-1241

            # 128700000000,1229-177-211-245-246-247-248-249-1241
            # 128700000000,1173-647-648-649-615-1241

            # 139500000000,1229-144-178-179-180-181-182-183-184-1173
            # 139500000000,1229-144-178-212-246-247-248-249-1241

            # 150100000000,1229-144-178-179-180-181-182-183-1173

            # 155400000000,Unreachable
            # 155400000000,Unreachable

            # 165200000000,1229-143-177-211-245-279-280-281-282-1241
            # 165200000000,1229-143-177-178-179-180-181-182-183-1173

            # 178800000000,1229-176-177-211-245-279-280-281-282-1241
            # 178800000000,1229-176-177-178-179-180-181-182-183-1173

            # For all pairs, the distribution how many times they changed path in a time step
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name + "/" + name +
                "/100ms_for_200s/path/data/ecdf_time_step_num_path_changes.txt",
                "float,pos_float")
            start_cumulative = 0.0
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], start_cumulative)
                else:
                    self.assertGreater(columns[1][i], start_cumulative)
                if i - 1 == range(len(columns[0])):
                    self.assertEqual(columns[1][i], 1.0)

                # There are 12 time steps, of which 6 have 2 changes, and 6 have 1 change
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                elif i > 2000 - 7:
                    self.assertEqual(columns[0][i], 2.0)
                elif i > 2000 - 13:
                    self.assertEqual(columns[0][i], 1.0)
                else:
                    self.assertEqual(columns[0][i], 0)

            # Analyze RTTs
            satgen.post_analysis.analyze_rtt(
                output_analysis_data_dir + "/" + name,
                output_generated_data_dir + "/" + name, default_time_step_ms,
                duration_s, "")

            # Min. RTT
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name + "/" + name +
                "/100ms_for_200s/rtt/data/ecdf_pairs_min_rtt_ns.txt",
                "float,pos_float")
            self.assertEqual(4, len(columns[0]))
            sorted_min_rtts = sorted(min_rtts)
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                elif i == 1:
                    self.assertAlmostEqual(columns[1][i],
                                           1.0 / 3.0,
                                           delta=0.0001)
                elif i == 2:
                    self.assertAlmostEqual(columns[1][i],
                                           2.0 / 3.0,
                                           delta=0.0001)
                else:
                    self.assertEqual(columns[1][i], 1.0)

                # RTT
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                else:
                    self.assertAlmostEqual(columns[0][i],
                                           sorted_min_rtts[i - 1],
                                           delta=100)

            # Max. RTT
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name + "/" + name +
                "/100ms_for_200s/rtt/data/ecdf_pairs_max_rtt_ns.txt",
                "float,pos_float")
            self.assertEqual(4, len(columns[0]))
            sorted_max_rtts = sorted(max_rtts)
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                elif i == 1:
                    self.assertAlmostEqual(columns[1][i],
                                           1.0 / 3.0,
                                           delta=0.0001)
                elif i == 2:
                    self.assertAlmostEqual(columns[1][i],
                                           2.0 / 3.0,
                                           delta=0.0001)
                else:
                    self.assertEqual(columns[1][i], 1.0)

                # RTT
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                else:
                    self.assertAlmostEqual(columns[0][i],
                                           sorted_max_rtts[i - 1],
                                           delta=100)

            # Max. - Min. RTT
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name + "/" + name +
                "/100ms_for_200s/rtt/data/ecdf_pairs_max_minus_min_rtt_ns.txt",
                "float,pos_float")
            self.assertEqual(4, len(columns[0]))
            sorted_max_minus_min_rtts = sorted(
                list(map(lambda x: max_rtts[x] - min_rtts[x], list(range(3)))))
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                elif i == 1:
                    self.assertAlmostEqual(columns[1][i],
                                           1.0 / 3.0,
                                           delta=0.0001)
                elif i == 2:
                    self.assertAlmostEqual(columns[1][i],
                                           2.0 / 3.0,
                                           delta=0.0001)
                else:
                    self.assertEqual(columns[1][i], 1.0)

                # RTT
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                else:
                    self.assertAlmostEqual(columns[0][i],
                                           sorted_max_minus_min_rtts[i - 1],
                                           delta=100)

            # Max. / Min. RTT
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name + "/" + name +
                "/100ms_for_200s/rtt/data/ecdf_pairs_max_rtt_to_min_rtt_slowdown.txt",
                "float,pos_float")
            self.assertEqual(4, len(columns[0]))
            sorted_max_divided_min_rtts = sorted(
                list(map(lambda x: max_rtts[x] / min_rtts[x], list(range(3)))))
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                elif i == 1:
                    self.assertAlmostEqual(columns[1][i],
                                           1.0 / 3.0,
                                           delta=0.0001)
                elif i == 2:
                    self.assertAlmostEqual(columns[1][i],
                                           2.0 / 3.0,
                                           delta=0.0001)
                else:
                    self.assertEqual(columns[1][i], 1.0)

                # RTT
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                else:
                    self.assertAlmostEqual(columns[0][i],
                                           sorted_max_divided_min_rtts[i - 1],
                                           delta=0.01)

            # Geodesic slowdown
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name + "/" + name +
                "/100ms_for_200s/rtt/data/ecdf_pairs_max_rtt_to_geodesic_slowdown.txt",
                "float,pos_float")
            # From Google search
            # Distance Manila to Dalian is 2,703 km according to Google Maps
            # Distance St. Petersburg to Manila  is 8,635 km according to Google Maps
            # Distance St. Petersburg to Dalian is 6,406 km according to Google Maps
            self.assertEqual(4, len(columns[0]))
            geodesic_expected_distance = [2703, 8635, 6406]
            sorted_max_divided_geodesic_rtts = sorted(
                list(
                    map(
                        lambda x: max_rtts[x] /
                        (2 * geodesic_expected_distance[x] * 1000.0 / 0.299792
                         ), list(range(3)))))
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                elif i == 1:
                    self.assertAlmostEqual(columns[1][i],
                                           1.0 / 3.0,
                                           delta=0.0001)
                elif i == 2:
                    self.assertAlmostEqual(columns[1][i],
                                           2.0 / 3.0,
                                           delta=0.0001)
                else:
                    self.assertEqual(columns[1][i], 1.0)

                # Geodesic RTT = 2*D / c
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                else:
                    self.assertAlmostEqual(columns[0][i],
                                           sorted_max_divided_geodesic_rtts[i -
                                                                            1],
                                           delta=0.01)

            # Analyze time step paths
            satgen.post_analysis.analyze_time_step_path(
                output_analysis_data_dir + "/" + name,
                output_generated_data_dir + "/" + name, all_time_step_ms,
                duration_s)

            # Missed path changes
            for time_step_ms in all_time_step_ms:
                columns = exputil.read_csv_direct_in_columns(
                    output_analysis_data_dir + "/" + name + "/" + name +
                    "/200s/path/data/" + "ecdf_pairs_" + str(time_step_ms) +
                    "ms_missed_path_changes.txt", "float,pos_float")
                for i in range(len(columns[0])):

                    # Cumulative y-axis check
                    if i == 0:
                        self.assertEqual(columns[1][i], 0)
                    elif i == 1:
                        self.assertAlmostEqual(columns[1][i],
                                               1.0 / 3.0,
                                               delta=0.0001)
                    elif i == 2:
                        self.assertAlmostEqual(columns[1][i],
                                               2.0 / 3.0,
                                               delta=0.0001)
                    else:
                        self.assertEqual(columns[1][i], 1.0)

                    # Only two should have missed one for the 10s one
                    # 1, 2 and 3 respectively at 20s
                    if i == 0:
                        self.assertEqual(columns[0][i], float("-inf"))
                    else:
                        if time_step_ms == 10000:
                            if i == 1:
                                self.assertEqual(columns[0][i], 0)
                            if i == 2:
                                self.assertEqual(columns[0][i], 1)
                            if i == 3:
                                self.assertEqual(columns[0][i], 1)
                        elif time_step_ms == 20000:
                            if i == 1:
                                self.assertEqual(columns[0][i], 1)
                            if i == 2:
                                self.assertEqual(columns[0][i], 2)
                            if i == 3:
                                self.assertEqual(columns[0][i], 3)
                        else:
                            self.assertEqual(columns[0][i], 0)

            # Time between path changes
            columns = exputil.read_csv_direct_in_columns(
                output_analysis_data_dir + "/" + name + "/" + name +
                "/200s/path/data/" +
                "ecdf_overall_time_between_path_change.txt", "float,pos_float")
            # Total 18 path changes, but only 15 of them are not from epoch (plus one for (0, -inf))
            self.assertEqual(len(columns[0]), 16)
            for i in range(len(columns[0])):

                # Cumulative y-axis check
                if i == 0:
                    self.assertEqual(columns[1][i], 0)
                else:
                    self.assertAlmostEqual(columns[1][i],
                                           i / float(len(columns[0]) - 1),
                                           delta=0.00001)

                # Gap values
                if i == 0:
                    self.assertEqual(columns[0][i], float("-inf"))
                else:
                    if i == 1:
                        self.assertEqual(columns[0][i], 2750000000)
                    elif i == 2:
                        self.assertEqual(columns[0][i], 5350000000)
                    elif i == 3:
                        self.assertEqual(columns[0][i], 6800000000)
                    elif i == 4:
                        self.assertEqual(columns[0][i], 9600000000)
                    elif i == 5:
                        self.assertEqual(columns[0][i], 9750000000)
                    elif i == 6:
                        self.assertEqual(columns[0][i], 9750000000)
                    elif i == 7:
                        self.assertEqual(columns[0][i], 10550000000)
                    elif i == 8:
                        self.assertEqual(columns[0][i], 10800000000)
                    elif i == 9:
                        self.assertEqual(columns[0][i], 13600000000)
                    elif i == 10:
                        self.assertEqual(columns[0][i], 13600000000)
                    elif i == 11:
                        self.assertEqual(columns[0][i], 15900000000)
                    elif i == 12:
                        self.assertEqual(columns[0][i], 46700000000)
                    elif i == 13:
                        self.assertEqual(columns[0][i], 51650000000)
                    elif i == 14:
                        self.assertEqual(columns[0][i], 118650000000)
                    elif i == 15:
                        self.assertEqual(columns[0][i], 136250000000)

            # Clean up
            local_shell.remove_force_recursive("temp_gen_data")
            local_shell.remove_force_recursive("temp_analysis_data")
예제 #6
0
파일: analyze.py 프로젝트: snkas/floodns
def analyze_connection_info(logs_floodns_dir, analysis_folder_dir):

    # Read in all the columns
    flows_info_csv_columns = exputil.read_csv_direct_in_columns(
        logs_floodns_dir + '/connection_info.csv',
        "pos_int,pos_int,pos_int,pos_float,pos_float,string,pos_int,pos_int,pos_int,pos_float,string,string"
    )
    connection_id_list = flows_info_csv_columns[0]
    source_id_list = flows_info_csv_columns[1]
    target_id_list = flows_info_csv_columns[2]
    # total_size_list = flows_info_csv_columns[3]
    # total_sent_list = flows_info_csv_columns[4]
    # flows_string_list = flows_info_csv_columns[5]
    # num_flows_list = list(map(lambda x: len(x.split(";")), flows_string_list))
    # start_time_list = flows_info_csv_columns[6]
    # end_time_list = flows_info_csv_columns[7]
    duration_list = flows_info_csv_columns[8]
    avg_throughput_list = flows_info_csv_columns[9]
    completed_string_list = flows_info_csv_columns[10]
    completed_list = []
    count_completed = 0
    count_incomplete = 0
    for c in completed_string_list:
        if c == "T":
            completed_list.append(True)
            count_completed += 1
        elif c == "F":
            completed_list.append(False)
            count_incomplete += 1
        else:
            raise ValueError("Invalid completed value: " + c)
    # metadata_list = flows_info_csv_columns[11]

    # Calculate some statistics
    if len(connection_id_list) == 0:
        statistics = {
            'all_num_connections': len(connection_id_list),
        }
        
    else:

        statistics = {
            'all_num_connections': len(connection_id_list),
            'all_num_connections_completed': count_completed,
            'all_num_connections_incomplete': count_incomplete,
            'all_num_connections_fraction_completed': float(count_completed) / float(len(connection_id_list)),
            'all_connection_num_unique_sources': len(set(source_id_list)),
            'all_connection_num_unique_targets': len(set(target_id_list)),

            'all_connection_avg_throughput_min': np.min(avg_throughput_list),
            'all_connection_avg_throughput_0.1th': np.percentile(avg_throughput_list, 0.1),
            'all_connection_avg_throughput_1th': np.percentile(avg_throughput_list, 1),
            'all_connection_avg_throughput_mean': np.mean(avg_throughput_list),
            'all_connection_avg_throughput_median': np.median(avg_throughput_list),
            'all_connection_avg_throughput_99th': np.percentile(avg_throughput_list, 99),
            'all_connection_avg_throughput_99.9th': np.percentile(avg_throughput_list, 99.9),
            'all_connection_avg_throughput_max': np.max(avg_throughput_list),
            'all_connection_avg_throughput_sum': sum(avg_throughput_list),
        }

        completion_time = []
        completion_throughput = []
        for i in range(len(connection_id_list)):
            if completed_list[i]:
                completion_time.append(duration_list[i])
                completion_throughput.append(avg_throughput_list[i])

        if count_completed > 0:
            statistics.update({
                'completed_connection_completion_time_min': np.min(completion_time),
                'completed_connection_completion_time_0.1th': np.percentile(completion_time, 0.1),
                'completed_connection_completion_time_1th': np.percentile(completion_time, 1),
                'completed_connection_completion_time_mean': np.mean(completion_time),
                'completed_connection_completion_time_median': np.median(completion_time),
                'completed_connection_completion_time_99th': np.percentile(completion_time, 99),
                'completed_connection_completion_time_99.9th': np.percentile(completion_time, 99.9),
                'completed_connection_completion_time_max': np.max(completion_time),

                'completed_connection_throughput_min': np.min(completion_throughput),
                'completed_connection_throughput_0.1th': np.percentile(completion_throughput, 0.1),
                'completed_connection_throughput_1th': np.percentile(completion_throughput, 1),
                'completed_connection_throughput_mean': np.mean(completion_throughput),
                'completed_connection_throughput_median': np.median(completion_throughput),
                'completed_connection_throughput_99th': np.percentile(completion_throughput, 99),
                'completed_connection_throughput_99.9th': np.percentile(completion_throughput, 99.9),
                'completed_connection_throughput_max': np.max(completion_throughput),
            })

    # Print raw results
    output_filename = analysis_folder_dir + '/connection_info.statistics'
    print('Writing connection statistics: %s' % output_filename)
    with open(output_filename, 'w+') as outfile:
        for key, value in sorted(statistics.items()):
            outfile.write(str(key) + "=" + str(value) + "\n")
예제 #7
0
def generate_tcp_flow_rate_csv(logs_ns3_dir, data_out_dir, tcp_flow_id,
                               interval_ns):

    # Read in CSV of the progress
    progress_csv_columns = exputil.read_csv_direct_in_columns(
        logs_ns3_dir + "/tcp_flow_" + str(tcp_flow_id) + "_progress.csv",
        "pos_int,pos_int,pos_int")
    num_entries = len(progress_csv_columns[0])
    tcp_flow_id_list = progress_csv_columns[0]
    time_ns_list = progress_csv_columns[1]
    progress_byte_list = progress_csv_columns[2]

    # TCP Flow ID list must be all exactly tcp_flow_id
    for i in tcp_flow_id_list:
        if i != tcp_flow_id:
            raise ValueError(
                "The flow identifier does not match (it must be the same in the entire progress file)"
            )

    # Add up all the progress made in that interval
    current_interval = (0, interval_ns, 0)
    intervals = []
    last_progress_byte = 0
    for i in range(num_entries):

        # Continue to fast-forward intervals until the next entry is in it
        while time_ns_list[i] >= current_interval[1]:
            intervals.append(current_interval)
            current_interval = (current_interval[1],
                                current_interval[1] + interval_ns, 0)

        # Now it must be within current_interval
        current_interval = (current_interval[0], current_interval[1],
                            current_interval[2] + progress_byte_list[i] -
                            last_progress_byte)
        last_progress_byte = progress_byte_list[i]

    # Add the last interval if it is not empty
    if current_interval[2] != 0:
        intervals.append(current_interval)

    # Now go over the intervals
    #
    # Each interval [a, b] with progress c, gets converted into two points:
    # a, c
    # b - (small number), c
    #
    # This effectively creates a step function as a continuous line, which can then be plotted by gnuplot.
    #
    data_filename = data_out_dir + "/tcp_flow_" + str(
        tcp_flow_id) + "_rate_in_intervals.csv"
    with open(data_filename, "w+") as f_out:
        for i in range(len(intervals)):
            rate_megabit_per_s = intervals[i][2] / 125000.0 * (1e9 /
                                                               interval_ns)
            f_out.write("%d,%.10f,%.10f\n" %
                        (tcp_flow_id, intervals[i][0], rate_megabit_per_s))
            f_out.write(
                "%d,%.10f,%.10f\n" %
                (tcp_flow_id, intervals[i][1] - 0.000001, rate_megabit_per_s))

    # Show what is produced
    print("Interval: " + str(interval_ns / 1000000.0) + " ms")
    print("Line format: [tcp_flow_id],[time_moment_ns],[rate in Mbps]")
    print("Produced: " + data_filename)
예제 #8
0
            # Finished filename to check if done
            finished_filename = logs_ns3_dir + "/finished.txt"

            if not (exputil.LocalShell().file_exists(finished_filename) and
                    exputil.LocalShell().read_file(finished_filename).strip()
                    == "Yes"):
                print("Skipping: " + run_dir)

            else:
                print("Processing: " + run_dir)

                if protocol_chosen == "tcp":

                    # Sum up all goodput
                    tcp_flows_csv_columns = exputil.read_csv_direct_in_columns(
                        logs_ns3_dir + "/tcp_flows.csv",
                        "idx_int,pos_int,pos_int,pos_int,pos_int,pos_int,pos_int,pos_int,string,string"
                    )
                    amount_sent_byte_list = tcp_flows_csv_columns[7]
                    total_sent_byte = float(np.sum(amount_sent_byte_list))

                elif protocol_chosen == "udp":

                    # Sum up all goodput
                    udp_bursts_incoming_csv_columns = exputil.read_csv_direct_in_columns(
                        logs_ns3_dir + "/udp_bursts_incoming.csv",
                        "idx_int,pos_int,pos_int,pos_float,pos_int,pos_int,pos_float,pos_float,pos_float,pos_float,pos_float,string"
                    )
                    amount_payload_sent_byte_list = udp_bursts_incoming_csv_columns[
                        10]
                    total_sent_byte = float(
                        np.sum(amount_payload_sent_byte_list))
예제 #9
0
def plot_udp_burst(logs_ns3_dir, data_out_dir, pdf_out_dir, udp_burst_id,
                   interval_ns):
    local_shell = exputil.LocalShell()

    # Check that all plotting files are available
    if (not local_shell.file_exists("plot_udp_burst_time_vs_amount_sent.plt")
            or not local_shell.file_exists(
                "plot_udp_burst_time_vs_amount_arrived.plt") or
            not local_shell.file_exists("plot_udp_burst_time_vs_sent_rate.plt")
            or not local_shell.file_exists(
                "plot_udp_burst_time_vs_arrived_rate.plt")
            or not local_shell.file_exists(
                "plot_udp_burst_time_vs_one_way_latency.plt")):
        print("The gnuplot files are not present.")
        print(
            "Are you executing this python file inside the plot_udp_burst directory?"
        )
        exit(1)

    # Create the output directories if they don't exist yet
    local_shell.make_full_dir(data_out_dir)
    local_shell.make_full_dir(pdf_out_dir)

    # Read in CSV of the outgoing packets
    outgoing_csv_columns = exputil.read_csv_direct_in_columns(
        logs_ns3_dir + "/udp_burst_" + str(udp_burst_id) + "_outgoing.csv",
        "pos_int,pos_int,pos_int")
    outgoing_num_entries = len(outgoing_csv_columns[0])
    outgoing_udp_burst_id_list = outgoing_csv_columns[0]
    if outgoing_udp_burst_id_list != [udp_burst_id] * outgoing_num_entries:
        raise ValueError("Mismatched UDP burst ID in outgoing data")
    outgoing_seq_no_list = outgoing_csv_columns[1]
    if outgoing_seq_no_list != list(range(outgoing_num_entries)):
        raise ValueError("Not all outgoing sequence numbers are incremented")
    outgoing_time_ns_list = outgoing_csv_columns[2]

    # Read in CSV of the incoming packets
    incoming_csv_columns = exputil.read_csv_direct_in_columns(
        logs_ns3_dir + "/udp_burst_" + str(udp_burst_id) + "_incoming.csv",
        "pos_int,pos_int,pos_int")
    incoming_num_entries = len(incoming_csv_columns[0])
    incoming_udp_burst_id_list = incoming_csv_columns[0]
    if incoming_udp_burst_id_list != [udp_burst_id] * incoming_num_entries:
        raise ValueError("Mismatched UDP burst ID in incoming data")
    incoming_seq_no_list = incoming_csv_columns[1]
    incoming_time_ns_list = incoming_csv_columns[2]

    # Generate the data files
    filename_sent_amount_byte = generate_udp_burst_sent_amount_csv(
        data_out_dir, udp_burst_id, outgoing_time_ns_list)
    filename_arrived_amount_byte = generate_udp_burst_arrived_amount_csv(
        data_out_dir, udp_burst_id, incoming_time_ns_list)
    filename_sent_rate_megabit_per_s = generate_udp_burst_sent_rate_csv(
        data_out_dir, udp_burst_id, outgoing_time_ns_list, interval_ns)
    filename_arrived_rate_megabit_per_s = generate_udp_burst_arrived_rate_csv(
        data_out_dir, udp_burst_id, incoming_time_ns_list, interval_ns)
    filename_latency_ns = generate_udp_burst_latency_csv(
        data_out_dir, udp_burst_id, outgoing_time_ns_list,
        incoming_seq_no_list, incoming_time_ns_list)

    # Plot time vs. amount sent
    pdf_filename = pdf_out_dir + "/plot_udp_burst_time_vs_amount_sent_" + str(
        udp_burst_id) + ".pdf"
    plt_filename = "plot_udp_burst_time_vs_amount_sent.plt"
    local_shell.copy_file(plt_filename, "temp.plt")
    local_shell.sed_replace_in_file_plain("temp.plt", "[OUTPUT-FILE]",
                                          pdf_filename)
    local_shell.sed_replace_in_file_plain("temp.plt", "[DATA-FILE]",
                                          filename_sent_amount_byte)
    local_shell.perfect_exec("gnuplot temp.plt")
    print("Produced plot: " + pdf_filename)
    local_shell.remove("temp.plt")

    # Plot time vs. amount arrived
    pdf_filename = pdf_out_dir + "/plot_udp_burst_time_vs_amount_arrived_" + str(
        udp_burst_id) + ".pdf"
    plt_filename = "plot_udp_burst_time_vs_amount_arrived.plt"
    local_shell.copy_file(plt_filename, "temp.plt")
    local_shell.sed_replace_in_file_plain("temp.plt", "[OUTPUT-FILE]",
                                          pdf_filename)
    local_shell.sed_replace_in_file_plain("temp.plt", "[DATA-FILE]",
                                          filename_arrived_amount_byte)
    local_shell.perfect_exec("gnuplot temp.plt")
    print("Produced plot: " + pdf_filename)
    local_shell.remove("temp.plt")

    # Plot time vs. sent rate
    pdf_filename = pdf_out_dir + "/plot_udp_burst_time_vs_sent_rate_" + str(
        udp_burst_id) + ".pdf"
    plt_filename = "plot_udp_burst_time_vs_sent_rate.plt"
    local_shell.copy_file(plt_filename, "temp.plt")
    local_shell.sed_replace_in_file_plain("temp.plt", "[OUTPUT-FILE]",
                                          pdf_filename)
    local_shell.sed_replace_in_file_plain("temp.plt", "[DATA-FILE]",
                                          filename_sent_rate_megabit_per_s)
    local_shell.perfect_exec("gnuplot temp.plt")
    print("Produced plot: " + pdf_filename)
    local_shell.remove("temp.plt")

    # Plot time vs. arrived rate
    pdf_filename = pdf_out_dir + "/plot_udp_burst_time_vs_arrived_rate_" + str(
        udp_burst_id) + ".pdf"
    plt_filename = "plot_udp_burst_time_vs_arrived_rate.plt"
    local_shell.copy_file(plt_filename, "temp.plt")
    local_shell.sed_replace_in_file_plain("temp.plt", "[OUTPUT-FILE]",
                                          pdf_filename)
    local_shell.sed_replace_in_file_plain("temp.plt", "[DATA-FILE]",
                                          filename_arrived_rate_megabit_per_s)
    local_shell.perfect_exec("gnuplot temp.plt")
    print("Produced plot: " + pdf_filename)
    local_shell.remove("temp.plt")

    # Plot time vs. latency
    pdf_filename = pdf_out_dir + "/plot_udp_burst_time_vs_one_way_latency_" + str(
        udp_burst_id) + ".pdf"
    plt_filename = "plot_udp_burst_time_vs_one_way_latency.plt"
    local_shell.copy_file(plt_filename, "temp.plt")
    local_shell.sed_replace_in_file_plain("temp.plt", "[OUTPUT-FILE]",
                                          pdf_filename)
    local_shell.sed_replace_in_file_plain("temp.plt", "[DATA-FILE]",
                                          filename_latency_ns)
    local_shell.perfect_exec("gnuplot temp.plt")
    print("Produced plot: " + pdf_filename)
    local_shell.remove("temp.plt")
예제 #10
0
def plot_tcp_flow(logs_ns3_dir, data_out_dir, pdf_out_dir, tcp_flow_id,
                  interval_ns):
    local_shell = exputil.LocalShell()

    # Check that all plotting files are available
    if not local_shell.file_exists("plot_tcp_flow_time_vs_cwnd.plt") or \
       not local_shell.file_exists("plot_tcp_flow_time_vs_progress.plt") or \
       not local_shell.file_exists("plot_tcp_flow_time_vs_rtt.plt") or \
       not local_shell.file_exists("plot_tcp_flow_time_vs_rate.plt"):
        print("The gnuplot files are not present.")
        print(
            "Are you executing this python file inside the plot_tcp_flow directory?"
        )
        exit(1)

    # Create the output directories if they don't exist yet
    local_shell.make_full_dir(data_out_dir)
    local_shell.make_full_dir(pdf_out_dir)

    # Create rate file
    generate_tcp_flow_rate_csv(logs_ns3_dir, data_out_dir, tcp_flow_id,
                               interval_ns)

    # Plot time vs. rate
    data_filename = data_out_dir + "/tcp_flow_" + str(
        tcp_flow_id) + "_rate_in_intervals.csv"
    pdf_filename = pdf_out_dir + "/plot_tcp_flow_time_vs_rate_" + str(
        tcp_flow_id) + ".pdf"
    plt_filename = "plot_tcp_flow_time_vs_rate.plt"
    local_shell.copy_file(plt_filename, "temp.plt")
    local_shell.sed_replace_in_file_plain("temp.plt", "[OUTPUT-FILE]",
                                          pdf_filename)
    local_shell.sed_replace_in_file_plain("temp.plt", "[DATA-FILE]",
                                          data_filename)
    local_shell.perfect_exec("gnuplot temp.plt")
    print("Produced plot: " + pdf_filename)
    local_shell.remove("temp.plt")

    # Plot time vs. progress
    data_filename = logs_ns3_dir + "/tcp_flow_" + str(
        tcp_flow_id) + "_progress.csv"
    local_shell.copy_file(
        data_filename,
        data_out_dir + "/tcp_flow_" + str(tcp_flow_id) + "_progress.csv")
    pdf_filename = pdf_out_dir + "/plot_tcp_flow_time_vs_progress_" + str(
        tcp_flow_id) + ".pdf"
    plt_filename = "plot_tcp_flow_time_vs_progress.plt"
    local_shell.copy_file(plt_filename, "temp.plt")
    local_shell.sed_replace_in_file_plain("temp.plt", "[OUTPUT-FILE]",
                                          pdf_filename)
    local_shell.sed_replace_in_file_plain("temp.plt", "[DATA-FILE]",
                                          data_filename)
    local_shell.perfect_exec("gnuplot temp.plt")
    print("Produced plot: " + pdf_filename)
    local_shell.remove("temp.plt")

    # Plot time vs. rtt
    data_filename = logs_ns3_dir + "/tcp_flow_" + str(tcp_flow_id) + "_rtt.csv"
    local_shell.copy_file(
        data_filename,
        data_out_dir + "/tcp_flow_" + str(tcp_flow_id) + "_rtt.csv")
    pdf_filename = pdf_out_dir + "/plot_tcp_flow_time_vs_rtt_" + str(
        tcp_flow_id) + ".pdf"
    plt_filename = "plot_tcp_flow_time_vs_rtt.plt"
    local_shell.copy_file(plt_filename, "temp.plt")
    local_shell.sed_replace_in_file_plain("temp.plt", "[OUTPUT-FILE]",
                                          pdf_filename)
    local_shell.sed_replace_in_file_plain("temp.plt", "[DATA-FILE]",
                                          data_filename)
    local_shell.perfect_exec("gnuplot temp.plt")
    print("Produced plot: " + pdf_filename)
    local_shell.remove("temp.plt")

    # Plot time vs. rto
    data_filename = logs_ns3_dir + "/tcp_flow_" + str(tcp_flow_id) + "_rto.csv"
    local_shell.copy_file(
        data_filename,
        data_out_dir + "/tcp_flow_" + str(tcp_flow_id) + "_rto.csv")
    pdf_filename = pdf_out_dir + "/plot_tcp_flow_time_vs_rto_" + str(
        tcp_flow_id) + ".pdf"
    plt_filename = "plot_tcp_flow_time_vs_rto.plt"
    local_shell.copy_file(plt_filename, "temp.plt")
    local_shell.sed_replace_in_file_plain("temp.plt", "[OUTPUT-FILE]",
                                          pdf_filename)
    local_shell.sed_replace_in_file_plain("temp.plt", "[DATA-FILE]",
                                          data_filename)
    local_shell.perfect_exec("gnuplot temp.plt")
    print("Produced plot: " + pdf_filename)
    local_shell.remove("temp.plt")

    # Plot time vs. cwnd
    data_filename = logs_ns3_dir + "/tcp_flow_" + str(
        tcp_flow_id) + "_cwnd.csv"
    local_shell.copy_file(
        data_filename,
        data_out_dir + "/tcp_flow_" + str(tcp_flow_id) + "_cwnd.csv")
    pdf_filename = pdf_out_dir + "/plot_tcp_flow_time_vs_cwnd_" + str(
        tcp_flow_id) + ".pdf"
    plt_filename = "plot_tcp_flow_time_vs_cwnd.plt"
    local_shell.copy_file(plt_filename, "temp.plt")
    local_shell.sed_replace_in_file_plain("temp.plt", "[OUTPUT-FILE]",
                                          pdf_filename)
    local_shell.sed_replace_in_file_plain("temp.plt", "[DATA-FILE]",
                                          data_filename)
    local_shell.perfect_exec("gnuplot temp.plt")
    print("Produced plot: " + pdf_filename)
    local_shell.remove("temp.plt")

    # Plot time vs. cwnd_inflated
    data_filename = logs_ns3_dir + "/tcp_flow_" + str(
        tcp_flow_id) + "_cwnd_inflated.csv"
    local_shell.copy_file(
        data_filename,
        data_out_dir + "/tcp_flow_" + str(tcp_flow_id) + "_cwnd_inflated.csv")
    pdf_filename = pdf_out_dir + "/plot_tcp_flow_time_vs_cwnd_inflated_" + str(
        tcp_flow_id) + ".pdf"
    plt_filename = "plot_tcp_flow_time_vs_cwnd_inflated.plt"
    local_shell.copy_file(plt_filename, "temp.plt")
    local_shell.sed_replace_in_file_plain("temp.plt", "[OUTPUT-FILE]",
                                          pdf_filename)
    local_shell.sed_replace_in_file_plain("temp.plt", "[DATA-FILE]",
                                          data_filename)
    local_shell.perfect_exec("gnuplot temp.plt")
    print("Produced plot: " + pdf_filename)
    local_shell.remove("temp.plt")

    # Plot time vs. ssthresh
    data_filename = logs_ns3_dir + "/tcp_flow_" + str(
        tcp_flow_id) + "_ssthresh.csv"

    # Retrieve the highest ssthresh which is not a max. integer
    ssthresh_values = exputil.read_csv_direct_in_columns(
        data_filename, "pos_int,pos_int,pos_int")[2]
    max_ssthresh = 0
    for ssthresh in ssthresh_values:
        if ssthresh > max_ssthresh and ssthresh != 4294967295:
            max_ssthresh = ssthresh
    if max_ssthresh == 0:  # If it never got out of initial slow-start, we just set it to 1 for the plot
        max_ssthresh = 1.0

    # Execute ssthresh plotting
    local_shell.copy_file(
        data_filename,
        data_out_dir + "/tcp_flow_" + str(tcp_flow_id) + "_ssthresh.csv")
    pdf_filename = pdf_out_dir + "/plot_tcp_flow_time_vs_ssthresh_" + str(
        tcp_flow_id) + ".pdf"
    plt_filename = "plot_tcp_flow_time_vs_ssthresh.plt"
    local_shell.copy_file(plt_filename, "temp.plt")
    local_shell.sed_replace_in_file_plain(
        "temp.plt", "[MAX-Y]", str(math.ceil(max_ssthresh / 1380.0)))
    local_shell.sed_replace_in_file_plain("temp.plt", "[OUTPUT-FILE]",
                                          pdf_filename)
    local_shell.sed_replace_in_file_plain("temp.plt", "[DATA-FILE]",
                                          data_filename)
    local_shell.perfect_exec("gnuplot temp.plt")
    print("Produced plot: " + pdf_filename)
    local_shell.remove("temp.plt")

    # Plot time vs. inflight
    data_filename = logs_ns3_dir + "/tcp_flow_" + str(
        tcp_flow_id) + "_inflight.csv"
    local_shell.copy_file(
        data_filename,
        data_out_dir + "/tcp_flow_" + str(tcp_flow_id) + "_inflight.csv")
    pdf_filename = pdf_out_dir + "/plot_tcp_flow_time_vs_inflight_" + str(
        tcp_flow_id) + ".pdf"
    plt_filename = "plot_tcp_flow_time_vs_inflight.plt"
    local_shell.copy_file(plt_filename, "temp.plt")
    local_shell.sed_replace_in_file_plain("temp.plt", "[OUTPUT-FILE]",
                                          pdf_filename)
    local_shell.sed_replace_in_file_plain("temp.plt", "[DATA-FILE]",
                                          data_filename)
    local_shell.perfect_exec("gnuplot temp.plt")
    print("Produced plot: " + pdf_filename)
    local_shell.remove("temp.plt")

    # Plot time vs. together (cwnd, cwnd_inflated, ssthresh, inflight)
    cwnd_values = exputil.read_csv_direct_in_columns(
        logs_ns3_dir + "/tcp_flow_" + str(tcp_flow_id) + "_cwnd.csv",
        "pos_int,pos_int,pos_int")[2]
    cwnd_inflated_values = exputil.read_csv_direct_in_columns(
        logs_ns3_dir + "/tcp_flow_" + str(tcp_flow_id) + "_cwnd_inflated.csv",
        "pos_int,pos_int,pos_int")[2]
    inflight_values = exputil.read_csv_direct_in_columns(
        logs_ns3_dir + "/tcp_flow_" + str(tcp_flow_id) + "_inflight.csv",
        "pos_int,pos_int,pos_int")[2]
    pdf_filename = pdf_out_dir + "/plot_tcp_flow_time_vs_together_" + str(
        tcp_flow_id) + ".pdf"
    plt_filename = "plot_tcp_flow_time_vs_together.plt"
    local_shell.copy_file(plt_filename, "temp.plt")
    local_shell.sed_replace_in_file_plain(
        "temp.plt", "[MAX-Y]",
        str(
            max(math.ceil(max_ssthresh / 1380.0),
                math.ceil(np.max(cwnd_values) / 1380.0),
                math.ceil(np.max(cwnd_inflated_values) / 1380.0),
                math.ceil(np.max(inflight_values) / 1380.0))))
    local_shell.sed_replace_in_file_plain("temp.plt", "[OUTPUT-FILE]",
                                          pdf_filename)
    local_shell.sed_replace_in_file_plain(
        "temp.plt", "[DATA-FILE-CWND]",
        logs_ns3_dir + "/tcp_flow_" + str(tcp_flow_id) + "_cwnd.csv")
    local_shell.sed_replace_in_file_plain(
        "temp.plt", "[DATA-FILE-CWND-INFLATED]",
        logs_ns3_dir + "/tcp_flow_" + str(tcp_flow_id) + "_cwnd_inflated.csv")
    local_shell.sed_replace_in_file_plain(
        "temp.plt", "[DATA-FILE-SSTHRESH]",
        logs_ns3_dir + "/tcp_flow_" + str(tcp_flow_id) + "_ssthresh.csv")
    local_shell.sed_replace_in_file_plain(
        "temp.plt", "[DATA-FILE-INFLIGHT]",
        logs_ns3_dir + "/tcp_flow_" + str(tcp_flow_id) + "_inflight.csv")
    local_shell.perfect_exec("gnuplot temp.plt")
    print("Produced plot: " + pdf_filename)
    local_shell.remove("temp.plt")
예제 #11
0
def plot_tcp_flows_ecdfs(logs_ns3_dir, data_out_dir, pdf_out_dir):
    local_shell = exputil.LocalShell()

    # Check that all plotting files are available
    if not local_shell.file_exists("plot_tcp_flows_ecdf_fct.plt") \
       or not local_shell.file_exists("plot_tcp_flows_ecdf_avg_throughput.plt"):
        print("The gnuplot files are not present.")
        print(
            "Are you executing this python file inside the plot_tcp_flows_ecdfs directory?"
        )
        exit(1)

    # Create the output directories if they don't exist yet
    local_shell.make_full_dir(data_out_dir)
    local_shell.make_full_dir(pdf_out_dir)

    # Create rate file
    tcp_flows_csv_columns = exputil.read_csv_direct_in_columns(
        logs_ns3_dir + "/tcp_flows.csv",
        "idx_int,pos_int,pos_int,pos_int,pos_int,pos_int,pos_int,pos_int,string,string"
    )
    num_flows = len(tcp_flows_csv_columns[0])
    # flow_id_list = tcp_flows_csv_columns[0]
    # from_node_id_list = tcp_flows_csv_columns[1]
    # to_node_id_list = tcp_flows_csv_columns[2]
    size_byte_list = tcp_flows_csv_columns[3]
    # start_time_ns_list = tcp_flows_csv_columns[4]
    # end_time_ns_list = tcp_flows_csv_columns[5]
    duration_ns_list = tcp_flows_csv_columns[6]
    # amount_sent_ns_list = tcp_flows_csv_columns[7]
    finished_list = tcp_flows_csv_columns[8]
    # metadata_list = tcp_flows_csv_columns[9]

    # Retrieve FCTs
    num_finished = 0
    num_unfinished = 0
    fct_ms_list = []
    avg_throughput_megabit_per_s_list = []
    for i in range(num_flows):
        if finished_list[i] == "YES":
            fct_ms_list.append(duration_ns_list[i] / 1e6)
            avg_throughput_megabit_per_s_list.append(
                float(size_byte_list[i]) / float(duration_ns_list[i]) * 8000.0)
            num_finished += 1
        else:
            num_unfinished += 1

    # Exit if no TCP flows finished
    if num_finished == 0:
        raise ValueError(
            "No TCP flows were finished so an ECDF could not be produced")

    # Now create ECDF for average throughput
    avg_throughput_megabit_per_s_ecdf = ECDF(avg_throughput_megabit_per_s_list)
    data_filename = data_out_dir + "/tcp_flows_ecdf_avg_throughput_megabit_per_s.csv"
    with open(data_filename, "w+") as f_out:
        for i in range(len(avg_throughput_megabit_per_s_ecdf.x)):
            f_out.write(
                str(avg_throughput_megabit_per_s_ecdf.x[i]) + "," +
                str(avg_throughput_megabit_per_s_ecdf.y[i]) + "\n")

    # Plot ECDF of average throughput of each TCP flow
    pdf_filename = pdf_out_dir + "/plot_tcp_flows_ecdf_avg_throughput.pdf"
    plt_filename = "plot_tcp_flows_ecdf_avg_throughput.plt"
    local_shell.copy_file(plt_filename, "temp.plt")
    local_shell.sed_replace_in_file_plain("temp.plt", "[OUTPUT-FILE]",
                                          pdf_filename)
    local_shell.sed_replace_in_file_plain("temp.plt", "[DATA-FILE]",
                                          data_filename)
    local_shell.perfect_exec("gnuplot temp.plt")
    local_shell.remove("temp.plt")

    # Show final result
    print("Average throughput statistics:")
    print("  > Included (finished)....... %.2f%% (%d out of %d)" %
          (float(num_finished) / float(num_flows) * 100.0, num_finished,
           num_flows))
    print("  > Average throughput........ %.2f Mbit/s" %
          (np.mean(avg_throughput_megabit_per_s_list)))
    print("  > Minimum throughput........ %.2f Mbit/s (slowest)" %
          (np.min(avg_throughput_megabit_per_s_list)))
    print("  > 1th %%-tile throughput..... %.2f Mbit/s" %
          (np.percentile(avg_throughput_megabit_per_s_list, 1.0)))
    print("  > 10th %%-tile throughput.... %.2f Mbit/s" %
          (np.percentile(avg_throughput_megabit_per_s_list, 10.0)))
    print("  > Median throughput......... %.2f Mbit/s" %
          (np.percentile(avg_throughput_megabit_per_s_list, 50.0)))
    print("  > Maximum throughput........ %.2f Mbit/s (fastest)" %
          (np.max(avg_throughput_megabit_per_s_list)))
    print("")
    print("Produced ECDF data: " + data_filename)
    print("Produced ECDF plot: " + pdf_filename)

    # Now create ECDF for FCTs
    fct_ms_ecdf = ECDF(fct_ms_list)
    data_filename = data_out_dir + "/tcp_flows_ecdf_fct_ms.csv"
    with open(data_filename, "w+") as f_out:
        for i in range(len(fct_ms_ecdf.x)):
            f_out.write(
                str(fct_ms_ecdf.x[i]) + "," + str(fct_ms_ecdf.y[i]) + "\n")

    # Plot ECDF of FCTs
    pdf_filename = pdf_out_dir + "/plot_tcp_flows_ecdf_fct.pdf"
    plt_filename = "plot_tcp_flows_ecdf_fct.plt"
    local_shell.copy_file(plt_filename, "temp.plt")
    local_shell.sed_replace_in_file_plain("temp.plt", "[OUTPUT-FILE]",
                                          pdf_filename)
    local_shell.sed_replace_in_file_plain("temp.plt", "[DATA-FILE]",
                                          data_filename)
    local_shell.perfect_exec("gnuplot temp.plt")
    local_shell.remove("temp.plt")

    # Show final result
    print("FCT statistics:")
    print("  > Included (finished)... %.2f%% (%d out of %d)" %
          (float(num_finished) / float(num_flows) * 100.0, num_finished,
           num_flows))
    print("  > Average FCT........... %.2f ms" % (np.mean(fct_ms_list)))
    print("  > Minimum FCT........... %.2f ms (fastest)" %
          (np.min(fct_ms_list)))
    print("  > Median FCT............ %.2f ms" %
          (np.percentile(fct_ms_list, 50.0)))
    print("  > 90th %%-tile FCT....... %.2f ms" %
          (np.percentile(fct_ms_list, 90.0)))
    print("  > 99th %%-tile FCT....... %.2f ms" %
          (np.percentile(fct_ms_list, 99.0)))
    print("  > Maximum FCT........... %.2f ms (slowest)" %
          (np.max(fct_ms_list)))
    print("")
    print("Produced ECDF data: " + data_filename)
    print("Produced ECDF plot: " + pdf_filename)