Esempio n. 1
0
def run_test(path, params, iterations, label):
    """
    Runs benchmark rate for the number of iterations in the command line arguments
    """
    print("-----------------------------------------------------------")
    print(label + "\n")
    results = batch_run_benchmark_rate.run(path, iterations, params, False)
    stats = batch_run_benchmark_rate.calculate_stats(results)
    print(batch_run_benchmark_rate.get_summary_string(stats, iterations, params))
Esempio n. 2
0
def test_streaming(pytestconfig, dut_type, use_dpdk, dual_10G, rate, rx_rate,
                   rx_channels, tx_rate, tx_channels, iterations, duration):

    benchmark_rate_path = Path(
        pytestconfig.getoption('uhd_build_dir')) / 'examples/benchmark_rate'

    # construct device args string
    device_args = f"master_clock_rate={rate},"

    if dut_type == "B210":
        device_args += f"name={pytestconfig.getoption('name')},"
    else:
        device_args += f"addr={pytestconfig.getoption('addr')},"

    if dual_10G:
        device_args += f"second_addr={pytestconfig.getoption('second_addr')},"

    if use_dpdk:
        device_args += f"use_dpdk=1,mgmt_addr={pytestconfig.getoption('mgmt_addr')}"

    # construct benchmark_rate params dictionary
    benchmark_rate_params = {
        "args": device_args,
        "duration": duration,
    }

    if rx_channels:
        benchmark_rate_params["rx_rate"] = rx_rate
        benchmark_rate_params["rx_channels"] = rx_channels

    if tx_channels:
        benchmark_rate_params["tx_rate"] = tx_rate
        benchmark_rate_params["tx_channels"] = tx_channels

    # run benchmark rate
    print()
    results = batch_run_benchmark_rate.run(benchmark_rate_path, iterations,
                                           benchmark_rate_params)
    stats = batch_run_benchmark_rate.calculate_stats(results)
    print(
        batch_run_benchmark_rate.get_summary_string(stats, iterations,
                                                    benchmark_rate_params))

    # compare results against thresholds
    dropped_samps_threshold = 0
    overruns_threshold = 2
    rx_timeouts_threshold = 0
    rx_seq_err_threshold = 0

    underruns_threshold = 2
    tx_timeouts_threshold = 0
    tx_seq_err_threshold = 0

    late_cmds_threshold = 0

    # TODO: define custom failed assertion explanations to avoid extra output
    # https://docs.pytest.org/en/6.2.x/assert.html#defining-your-own-explanation-for-failed-assertions

    if rx_channels:
        assert stats.avg_vals.dropped_samps <= dropped_samps_threshold, \
            f"""Number of dropped samples exceeded threshold.
                Expected dropped samples: <= {dropped_samps_threshold}
                Actual dropped samples:      {stats.avg_vals.dropped_samps}"""
        assert stats.avg_vals.overruns <= overruns_threshold, \
            f"""Number of overruns exceeded threshold.
                Expected overruns: <= {overruns_threshold}
                Actual overruns:      {stats.avg_vals.overruns}"""
        assert stats.avg_vals.rx_timeouts <= rx_timeouts_threshold, \
            f"""Number of rx timeouts exceeded threshold.
                Expected rx timeouts: <= {rx_timeouts_threshold}
                Actual rx timeouts:      {stats.avg_vals.rx_timeouts}"""
        assert stats.avg_vals.rx_seq_errs <= rx_seq_err_threshold, \
            f"""Number of rx sequence errors exceeded threshold.
                Expected rx sequence errors: <= {rx_seq_err_threshold}
                Actual rx sequence errors:      {stats.avg_vals.rx_seq_errs}"""

    if tx_channels:
        assert stats.avg_vals.underruns <= underruns_threshold, \
            f"""Number of underruns exceeded threshold.
                Expected underruns: <= {underruns_threshold}
                Actual underruns:      {stats.avg_vals.underruns}"""
        assert stats.avg_vals.tx_timeouts <= tx_timeouts_threshold, \
            f"""Number of tx timeouts exceeded threshold.
                Expected tx timeouts: <= {tx_timeouts_threshold}
                Actual tx timeouts:      {stats.avg_vals.tx_timeouts}"""
        assert stats.avg_vals.tx_seq_errs <= tx_seq_err_threshold, \
            f"""Number of tx sequence errors exceeded threshold.
                Expected tx sequence errors: <= {tx_seq_err_threshold}
                Actual tx sequence errors:      {stats.avg_vals.tx_seq_errs}"""

    assert stats.avg_vals.late_cmds <= late_cmds_threshold, \
        f"""Number of late commands exceeded threshold.