Пример #1
0
def iperf_UDP_test_scenario():
    # Simulates IPERF transfers at different file sizes

    testbed = BasicTestbed(host_ip=HOST_IP)
    # from 250k to 9.75 mb in 250kb steps
    # we add one modest "Warm up" sessions to start the connections for d_pepsal and qpep which have high first packet costs  but only
    # experience these costs once, when the customer starts the respective applications
    iperf_file_sizes = [25*1000, 50*1000, 100*1000, 150*1000]+[(i/4)*1000000 for i in range(1, 47)]
    iperf_file_sizes.sort()
    benchmarks = [IperfUDPBenchmark(file_sizes=iperf_file_sizes[4:], bw_limit="50M", iterations=2)]
    plain_scenario = PlainScenario(name="Plain", testbed=testbed, benchmarks=copy.deepcopy(benchmarks))
    vpn_scenario = OpenVPNScenario(name="OpenVPN", testbed=testbed, benchmarks=copy.deepcopy(benchmarks))
    pepsal_scenario = PEPsalScenario(name="PEPSal", testbed=testbed, benchmarks=copy.deepcopy(benchmarks), terminal=True, gateway=False)
    distributed_pepsal_scenario = PEPsalScenario(name="Distributed PEPsal", gateway=True, terminal=True, testbed=testbed,benchmarks=copy.deepcopy(benchmarks))
    qpep_scenario = QPEPScenario(name="QPEP", testbed=testbed, benchmarks=copy.deepcopy(benchmarks))
    scenarios = [qpep_scenario, distributed_pepsal_scenario, vpn_scenario, plain_scenario, pepsal_scenario]
    for scenario in scenarios:
        logger.debug("Running iperf test scenario " + str(scenario.name))
        iperf_scenario_results = {}
        scenario.run_benchmarks()
        for benchmark in scenario.benchmarks:
            logger.debug("Running Iperf Test Scenario (", str(scenario.name), ") with file sizes: " + str(benchmark.file_sizes))
            iperf_scenario_results = benchmark.results
            print(iperf_scenario_results)
        scenario.print_results()
        benchmark.save_results_to_db(str(scenario.name),"opensand")
Пример #2
0
def plr_plt_scenario():
    testbed = BasicTestbed(host_ip=HOST_IP)
    
    sites_to_check = ["https://www.nasa.gov"]
    plr_levels = list(numpy.unique(list(numpy.geomspace((1*pow(10, -7)), (1*pow(10,2)), num=20)) + list(numpy.geomspace((1*pow(10,-7)), (1*pow(10,2)), num=10))))
    
    plain_scenario = PlainScenario(name="Plain", testbed=testbed, benchmarks=[])
    vpn_scenario = OpenVPNScenario(name="OpenVPN", testbed=testbed, benchmarks=[])
    pepsal_scenario = PEPsalScenario(name="PEPSal", testbed=testbed, benchmarks=[], terminal=True, gateway=False)
    distributed_pepsal_scenario = PEPsalScenario(name="Distributed PEPsal",terminal=True, gateway=True, testbed=testbed,benchmarks=[])
    qpep_scenario = QPEPScenario(name="QPEP", testbed=testbed, benchmarks=[])
    scenarios = [plain_scenario, pepsal_scenario, distributed_pepsal_scenario, qpep_scenario, vpn_scenario]

    for scenario in scenarios:
        benchmarks = [SitespeedBenchmark(hosts=sites_to_check, scenario=scenario, iterations=1, sub_iterations=int(os.getenv("PLR_PLT_ITERATIONS")))]
        if scenario.name == os.getenv("SCENARIO_NAME"):
            logger.debug("Running PLR PLT scenario" + str(scenario.name))
            plr_scenario_results = {}
            for plr_level in plr_levels[int(os.getenv("PLR_MIN_INDEX")):int(os.getenv("PLR_MAX_INDEX"))]:
                plr_string = numpy.format_float_positional(plr_level, precision=7, trim='-')
                plr_scenario_results[str(plr_string)]=[]
                for j in range(0, int(os.getenv("PLR_META_ITERATIONS"))):
                    scenario.benchmarks = copy.deepcopy(benchmarks)
                    scenario.deploy_scenario()
                    scenario.testbed.set_plr_percentage(plr_string, st_out=False, gw_out=True)
                    logger.debug("Running PLR PLT for " + str(scenario.name) + " at " + str(plr_string) + " batch " + str(j) +" of " + str(os.getenv("PLR_META_ITERATIONS")))
                    scenario.run_benchmarks(deployed=True)
                    for benchmark in scenario.benchmarks:
                        plr_scenario_results[str(plr_string)].append(benchmark.results)
                logger.debug("Interim PLR/PLT Results (PLR: " + str(plr_string) + " meta_iteration: " + str(j) + "/" + str(int(os.getenv("PLR_META_ITERATIONS"))) + " Scenario: " + str(scenario.name) +"): " + str(plr_scenario_results) + "\n")
            print("Final PLR PLT results for " + str(scenario.name))
            print("***********************************************")
            print(plr_scenario_results)
            print('\n*********************************************')
    logger.success("PLR/PLT test complete")
Пример #3
0
def plt_test_scenario(testbed=None):
    if testbed is None:
        testbed = BasicTestbed(host_ip=HOST_IP)
    benchmarks = [SitespeedBenchmark(hosts=['https://www.sina.com.cn'])]
    plain_scenario = PlainScenario(name="Plain",
                                   testbed=testbed,
                                   benchmarks=copy.deepcopy(benchmarks))
    vpn_scenario = OpenVPNScenario(name="OpenVPN",
                                   testbed=testbed,
                                   benchmarks=copy.deepcopy(benchmarks))
    pepsal_scenario = PEPsalScenario(name="PEPSal",
                                     testbed=testbed,
                                     benchmarks=copy.deepcopy(benchmarks),
                                     terminal=True,
                                     gateway=False)
    distributed_pepsal_scenario = PEPsalScenario(
        name="Distributed PEPsal  ",
        terminal=True,
        gateway=True,
        testbed=testbed,
        benchmarks=copy.deepcopy(benchmarks))
    qpep_scenario = QPEPScenario(name="QPEP",
                                 testbed=testbed,
                                 benchmarks=copy.deepcopy(benchmarks))
    #scenarios = [plain_scenario, pepsal_scenario, distributed_pepsal_scenario, qpep_scenario, vpn_scenario]
    scenarios = [distributed_pepsal_scenario]
    for scenario in scenarios:
        logger.debug("Running PLT test scenario " + str(scenario.name))
        scenario.deploy_scenario()
        scenario.run_benchmarks(deployed=True)
        for benchmark in scenario.benchmarks:
            print("Results for PLT " + str(scenario.name))
            print(benchmark.results)
Пример #4
0
def plt_test_scenario(testbed=None):
    if testbed is None:
        testbed = RealWorldTestbed()
    alexa_top_20 = [
        "https://www.google.com", "https://www.youtube.com",
        "https://www.tmall.com", "https://www.facebook.com",
        "https://www.baidu.com", "https://www.qq.com", "https://www.sohu.com",
        "https://www.taobao.com", "https://www.360.cn", "https://www.jd.com",
        "https://www.yahoo.com", "https://www.amazon.com",
        "https://www.wikipedia.org", "https://www.weibo.com",
        "https://www.sina.com.cn", "https://www.reddit.com",
        "https://www.live.com", "https://www.netflix.com",
        "https://www.okezone.com", "https://www.vk.com"
    ]
    with open(str(os.getenv("TESTBED_FILE"))) as file:
        testbed_name = file.readlines()[0]
    plain_scenario = PlainScenario(name="plain",
                                   testbed=testbed,
                                   benchmarks=[])
    vpn_scenario = OpenVPNScenario(name="ovpn", testbed=testbed, benchmarks=[])
    pepsal_scenario = PEPsalScenario(name="pepsal",
                                     testbed=testbed,
                                     benchmarks=[],
                                     terminal=True,
                                     gateway=False)
    distributed_pepsal_scenario = PEPsalScenario(name="dist_pepsal",
                                                 terminal=True,
                                                 gateway=True,
                                                 testbed=testbed,
                                                 benchmarks=[])
    qpep_scenario = QPEPScenario(name="qpep", testbed=testbed, benchmarks=[])
    scenarios = [
        plain_scenario, pepsal_scenario, distributed_pepsal_scenario,
        qpep_scenario, vpn_scenario
    ]
    for scenario in scenarios:
        scenario.benchmarks = [
            SitespeedBenchmark(
                hosts=alexa_top_20[int(os.getenv("ALEXA_MIN")
                                       ):int(os.getenv("ALEXA_MAX"))],
                scenario=scenario,
                iterations=int(os.getenv("PLT_ITERATIONS")),
                sub_iterations=int(os.getenv("PLT_SUB_ITERATIONS")))
        ]
        logger.debug("Running PLT test scenario " + str(scenario.name))
        scenario.deploy_scenario()
        scenario.run_benchmarks(deployed=True)
        for benchmark in scenario.benchmarks:
            print("Results for PLT " + str(scenario.name))
            print(benchmark.results)
            benchmark.save_results_to_db(str(scenario.name), testbed_name)
    for scenario in scenarios:
        if scenario.name == os.getenv("SCENARIO_NAME"):
            scenario.print_results()
Пример #5
0
def iperf_test_scenario():
    # Simulates IPERF transfers at different file sizes

    testbed = RealWorldTestbed()
    # from 250k to 9.75 mb in 250kb steps
    # we add one modest "Warm up" sessions to start the connections for d_pepsal and qpep which have high first packet costs  but only
    # experience these costs once, when the customer starts the respective applications
    iperf_file_sizes = [25 * 1000, 50 * 1000, 100 * 1000, 150 * 1000
                        ] + [(i / 4) * 1000000 for i in range(1, 47)]
    iperf_file_sizes.sort()
    with open(str(os.getenv("TESTBED_FILE"))) as file:
        testbed_name = file.readlines()[0]
    benchmarks = [
        IperfBenchmark(file_sizes=iperf_file_sizes[
            int(os.getenv("IPERF_MIN_SIZE_INDEX")
                ):int(os.getenv("IPERF_MAX_SIZE_INDEX"))],
                       iterations=int(os.getenv("IPERF_ITERATIONS")))
    ]
    plain_scenario = PlainScenario(name="plain",
                                   testbed=testbed,
                                   benchmarks=copy.deepcopy(benchmarks))
    vpn_scenario = OpenVPNScenario(name="ovpn",
                                   testbed=testbed,
                                   benchmarks=copy.deepcopy(benchmarks))
    pepsal_scenario = PEPsalScenario(name="pepsal",
                                     testbed=testbed,
                                     benchmarks=copy.deepcopy(benchmarks),
                                     terminal=True,
                                     gateway=False)
    distributed_pepsal_scenario = PEPsalScenario(
        name="dist_pepsal",
        gateway=True,
        terminal=True,
        testbed=testbed,
        benchmarks=copy.deepcopy(benchmarks))
    qpep_scenario = QPEPScenario(name="qpep",
                                 testbed=testbed,
                                 benchmarks=copy.deepcopy(benchmarks))
    scenarios = [
        qpep_scenario, distributed_pepsal_scenario, vpn_scenario,
        plain_scenario, pepsal_scenario
    ]
    for scenario in scenarios:
        logger.debug("Running iperf test scenario " + str(scenario.name))
        iperf_scenario_results = {}
        scenario.run_benchmarks()
        for benchmark in scenario.benchmarks:
            logger.debug("Running Iperf Test Scenario (", str(scenario.name),
                         ") with file sizes: " + str(benchmark.file_sizes))
            iperf_scenario_results = benchmark.results
            print(iperf_scenario_results)
            benchmark.save_results_to_db(str(scenario.name), testbed_name)
        scenario.print_results()
Пример #6
0
def iperf_test_scenario():
    testbed = BasicTestbed(host_ip=HOST_IP)
    # from 250k to 9.75 mb in 250kb steps
    # we add one modest "Warm up" sessions to start the connections for d_pepsal and qpep which have high first packet costs  but only
    # experience these costs once, when the customer starts the respective applications
    iperf_file_sizes = [25 * 1000, 50 * 1000, 100 * 1000, 150 * 1000
                        ] + [(i / 4) * 1000000 for i in range(1, 40)]
    #iperf_file_sizes = [(i/2)*1000000 for i in range(1, 20)]
    iperf_file_sizes.sort()
    logger.debug("Running Iperf Test Scenario with file sizes: " +
                 str(iperf_file_sizes))
    benchmarks = [IperfBenchmark(file_sizes=iperf_file_sizes)]
    plain_scenario = PlainScenario(name="Plain",
                                   testbed=testbed,
                                   benchmarks=copy.deepcopy(benchmarks))
    vpn_scenario = OpenVPNScenario(name="OpenVPN",
                                   testbed=testbed,
                                   benchmarks=copy.deepcopy(benchmarks))
    pepsal_scenario = PEPsalScenario(name="PEPSal",
                                     testbed=testbed,
                                     benchmarks=copy.deepcopy(benchmarks),
                                     terminal=True,
                                     gateway=False)
    distributed_pepsal_scenario = PEPsalScenario(
        name="Distributed",
        gateway=True,
        terminal=True,
        testbed=testbed,
        benchmarks=copy.deepcopy(benchmarks))
    qpep_scenario = QPEPScenario(name="QPEP",
                                 testbed=testbed,
                                 benchmarks=copy.deepcopy(benchmarks))
    #scenarios = [qpep_scenario, distributed_pepsal_scenario, vpn_scenario, plain_scenario, pepsal_scenario]
    scenarios = [vpn_scenario]
    for scenario in scenarios:
        logger.debug("Running iperf test scenario " + str(scenario.name))
        iperf_scenario_results = {}
        scenario.run_benchmarks()
        for benchmark in scenario.benchmarks:
            iperf_scenario_results = benchmark.results
            print(iperf_scenario_results)
        up_speeds = []
        down_speeds = []
        for key in iperf_scenario_results:
            up_speeds.append(iperf_scenario_results[key]["sent_bps"])
            down_speeds.append(iperf_scenario_results[key]["received_bps"])
        print(scenario.name)
        print("    Up: ", up_speeds)
        print("  Down:", down_speeds)
Пример #7
0
def plr_test_scenario():
    testbed = BasicTestbed(host_ip=HOST_IP)
    iperf_file_sizes=[1000000, 2000000, 5000000, 1000000]
    # a balacned distribution across log scale of 20 points, plus key powers of ten, total length of 28 checkpoints from 10e-7 to 100
    plr_levels = list(numpy.unique(list(numpy.geomspace((1*pow(10, -7)), (1*pow(10,2)), num=20)) + list(numpy.geomspace((1*pow(10,-7)), (1*pow(10,2)), num=10))))

    benchmarks = [IperfBenchmark(file_sizes=iperf_file_sizes, reset_on_run=True, iterations=1)]
    # test with pepsal vs qpep vs plain
    pepsal_scenario = PEPsalScenario(name="PEPSal", testbed=testbed,benchmarks=copy.deepcopy(benchmarks))
    qpep_scenario = QPEPScenario(name="QPEP", testbed=testbed, benchmarks=copy.deepcopy(benchmarks))
    plain_scenario = PlainScenario(name="Plain", testbed=testbed, benchmarks=copy.deepcopy(benchmarks))
    vpn_scenario  = OpenVPNScenario(name="OpenVPN", testbed=testbed, benchmarks=copy.deepcopy(benchmarks))
    distributed_pepsal_scenario = PEPsalScenario(name="Distributed PEPsal", gateway=True, terminal=True, testbed=testbed,benchmarks=copy.deepcopy(benchmarks))
    scenarios = [qpep_scenario, plain_scenario, vpn_scenario, pepsal_scenario, distributed_pepsal_scenario]
    for scenario in scenarios:
        if scenario.name == os.getenv("SCENARIO_NAME"):
            logger.debug("Running packet loss rate scenario " + str(scenario.name))
            iperf_scenario_results = {}
            for plr_level in plr_levels[int(os.getenv("PLR_MIN_INDEX")):int(os.getenv("PLR_MAX_INDEX"))]:
                plr_string = numpy.format_float_positional(plr_level, precision=7, trim='-')
                iperf_scenario_results[str(plr_string)] = []
                for j in range(0, int(os.getenv("PLR_META_ITERATIONS"))):
                    logger.debug("Running PLR for " + str(scenario.name) +  " at " + str(plr_string) + " batch " + str(j) + " of " + str(os.getenv("PLR_META_ITERATIONS")))
                    scenario.deploy_scenario()
                    scenario.testbed.set_plr_percentage(plr_string, st_out=False, gw_out=True)
                    for i in range(0, int(os.getenv("IPERF_ITERATIONS"))):
                        scenario.benchmarks = copy.deepcopy(benchmarks)
                        scenario.run_benchmarks(deployed=True)
                        for benchmark in scenario.benchmarks:
                            iperf_scenario_results[str(plr_string)].append(benchmark.results)
                            # if the link breaks, we need to restart the ip routes
                            for key in benchmark.results.keys():
                                if(benchmark.results[key]["sent_bps"]) == 0:
                                    scenario.deploy_scenario()
                                    scenario.testbed.set_plr_percentage(plr_string, st_out=False, gw_out=True)
                                    logger.warning("Failed Iperf Run @ " + str(plr_string))
                                    break
                        logger.debug("Interim PLR Results (PLR: " + str(plr_string) + " sub_iter: " + str(i) + "/" + str(int(os.getenv("IPERF_ITERATIONS"))) + " Scenario: " + str(scenario.name) +"): " + str(iperf_scenario_results))
            print("Final PLR Results for ", scenario.name)
            print("*********************************")
            print(iperf_scenario_results)
            print("\n******************************")
    logger.success("PLR Test Complete")
Пример #8
0
def attenuation_test_plt_scenario():
    testbed = BasicTestbed(host_ip=HOST_IP)
    attenuation_levels = [i * 0.5 for i in range(0, 11)]
    benchmarks = [
        SitespeedBenchmark(hosts=["https://www.bbc.co.uk"], iterations=5)
    ]

    # test with pepsal vs qpep vs plain
    # NB - due to the networking requirements of PEPsal, a special testbed launch order is required in attenuation_test_pepsal_scenario to allow access to web content
    qpep_scenario = QPEPScenario(name="QPEP Attenuation  ",
                                 testbed=testbed,
                                 benchmarks=copy.deepcopy(benchmarks))
    plain_scenario = PlainScenario(name="Plain Attenuation  ",
                                   testbed=testbed,
                                   benchmarks=copy.deepcopy(benchmarks))
    vpn_scenario = OpenVPNScenario(name="OpenVPN",
                                   testbed=testbed,
                                   benchmarks=copy.deepcopy(benchmarks))
    scenarios = [plain_scenario, qpep_scenario, vpn_scenario]
    for scenario in scenarios:
        scenario_results = []
        scenario_errors = []
        for attenuation_level in attenuation_levels:
            scenario.deploy_scenario()
            scenario.benchmarks = copy.deepcopy(benchmarks)
            logger.debug("Running attenuation scenario test for attenuation=" +
                         str(attenuation_level))
            scenario.testbed.set_downlink_attenuation(attenuation_level)
            scenario.testbed.run_attenuation_scenario()
            scenario.testbed.connect_terminal_workstation()
            scenario.run_benchmarks(deployed=True)
            for benchmark in scenario.benchmarks:
                if len(benchmark.results) > 0:
                    scenario_results.append(mean(benchmark.results))
                scenario_errors.append(benchmark.errors)
        print(scenario.name, "Results: ")
        print("    PLT: ", scenario_results)
        print("    ERR: ", scenario_errors)
    logger.success("Attenuation PLT Test Complete")
Пример #9
0
def attenuation_test_iperf_scenario():
    testbed = BasicTestbed(host_ip=HOST_IP)
    attenuation_levels = [i * 0.25 for i in range(0, 21)]
    # test a 10mb transfer
    iperf_file_sizes = [10000000]
    benchmarks = [
        IperfBenchmark(file_sizes=iperf_file_sizes, reset_on_run=True)
    ]
    # test with pepsal vs qpep vs plain
    pepsal_scenario = PEPsalScenario(name="PEPsal Attenuation  ",
                                     testbed=testbed,
                                     benchmarks=copy.deepcopy(benchmarks))
    qpep_scenario = QPEPScenario(name="QPEP Attenuation  ",
                                 testbed=testbed,
                                 benchmarks=copy.deepcopy(benchmarks))
    plain_scenario = PlainScenario(name="Plain Attenuation  ",
                                   testbed=testbed,
                                   benchmarks=copy.deepcopy(benchmarks))
    scenarios = [plain_scenario]
    for scenario in scenarios:
        up_results = []
        down_results = []
        all_measurements = {}
        for attenuation_level in attenuation_levels:
            scenario.deploy_scenario()
            at_up_measurements = []
            at_down_measurements = []
            scenario.testbed.set_downlink_attenuation(attenuation_level)
            scenario.testbed.run_attenuation_scenario()
            for i in range(0, 5):
                scenario.benchmarks = copy.deepcopy(benchmarks)
                logger.debug("Running attenuation scenario test #" + str(i) +
                             " for attenuation=" + str(attenuation_level))
                scenario.run_benchmarks(deployed=True)
                for benchmark in scenario.benchmarks:
                    for key in benchmark.results:
                        at_up_measurements.append(
                            benchmark.results[key]["sent_bps"])
                        at_down_measurements.append(
                            benchmark.results[key]["received_bps"])
                        if (benchmark.results[key]["sent_bps"]) == 0:
                            #if the attenuated link breaks you have to restart it
                            scenario.deploy_scenario()
                            scenario.testbed.set_downlink_attenuation(
                                attenuation_level)
                            scenario.testbed.run_attenuation_scenario()
                        logger.debug(
                            "Attenuation Result: " + str(attenuation_level) +
                            " --- " + str(benchmark.results[key]["sent_bps"]) +
                            "/" + str(benchmark.results[key]["received_bps"]))
            all_measurements[str(scenario.name) + "_" +
                             str(attenuation_level) +
                             "_up"] = at_up_measurements
            all_measurements[str(scenario.name) + "_" +
                             str(attenuation_level) +
                             "_down"] = at_down_measurements
            # after running 5 sample tests, add their mean to our reported average
            up_results.append(mean(at_up_measurements))
            down_results.append(mean(at_down_measurements))
            print("Current Up: ", up_results)
            print("Current Down: ", down_results)
            print("All Measurements: ", all_measurements)
        print(scenario.name, "Results: ")
        print("    Down:", down_results)
        print("    Up:", up_results)
        print("    All:", all_measurements)
    logger.success("Attenuation Test Complete")
Пример #10
0
        default=False,
        action='store_true')
    args = parser.parse_args()
    # First define our OpenSAND testbed environment
    testbed = None
    if args.orbit == 'GEO':
        testbed = BasicTestbed(host_ip=args.xhost, display_number=args.display)
    else:
        testbed = LeoTestbed(host_ip=args.xhost, display_number=args.display)

    # Next set the scenario
    scenario_dict = {
        "plain":
        PlainScenario(name="Plain", testbed=testbed, benchmarks=[]),
        "qpep":
        QPEPScenario(name="QPEP", testbed=testbed, benchmarks=[]),
        "pepsal_distributed":
        PEPsalScenario(name="Distributed PEPsal",
                       testbed=testbed,
                       gateway=True,
                       benchmarks=[]),
        "pepsal_integrated":
        PEPsalScenario(name="Integrated PEPsal",
                       testbed=testbed,
                       benchmarks=[]),
        "open_vpn":
        OpenVPNScenario(name="OpenVPN", testbed=testbed, benchmarks=[])
    }
    scenario = scenario_dict[args.scenario]

    # Launch the testbed and deploy the PEP/VPN if relevant