Пример #1
0
def pipes(basevm, current_avail_cpu, env_id):
    """Producer/Consumer pipes generator."""
    host_cpu_model_name = get_cpu_model_name()
    cpus_baselines = test_cfg.CONFIG["hosts"]["instances"]["m5d.metal"]["cpus"]
    stats = no_criteria_stats()
    baselines = list(
        filter(lambda baseline: baseline["model"] == host_cpu_model_name,
               cpus_baselines))

    for mode in test_cfg.CONFIG["modes"]:
        # We run bi-directional tests only on uVM with more than 2 vCPus
        # because we need to pin one iperf3/direction per vCPU, and since we
        # have two directions, we need at least two vCPUs.
        if mode == "bd" and basevm.vcpus_count < 2:
            continue

        for protocol in test_cfg.CONFIG["protocols"]:
            host_cpu_model_name = get_cpu_model_name()

            for payload_length in protocol["payload_length"]:
                iperf_guest_cmd_builder = CmdBuilder(test_cfg.IPERF3) \
                    .with_arg("--vsock") \
                    .with_arg("-c", 2)       \
                    .with_arg("--json") \
                    .with_arg("--omit", protocol["omit"]) \
                    .with_arg("--time", test_cfg.CONFIG["time"])

                if payload_length:
                    iperf_guest_cmd_builder = iperf_guest_cmd_builder \
                        .with_arg("--len", f"{payload_length}")
                    iperf3_id_payload_len = payload_length
                else:
                    iperf3_id_payload_len = "DEFAULT"

                iperf3_id = f"vsock-p{iperf3_id_payload_len}" \
                    f"-{basevm.vcpus_count}vcpu-{mode}"

                cons = consumer.LambdaConsumer(consume_stats=False,
                                               func=consume_iperf_output)

                if len(baselines) > 0:
                    stats = criteria_stats(baselines[0], iperf3_id, env_id)

                eager_map(cons.set_measurement_def, measurements())
                eager_map(cons.set_stat_def, stats)

                prod_kwargs = {
                    "guest_cmd_builder": iperf_guest_cmd_builder,
                    "basevm": basevm,
                    "current_avail_cpu": current_avail_cpu,
                    "runtime": test_cfg.CONFIG["time"],
                    "omit": protocol["omit"],
                    "load_factor": test_cfg.CONFIG["load_factor"],
                    "modes": test_cfg.CONFIG["modes"][mode],
                }
                prod = producer.LambdaProducer(produce_iperf_output,
                                               prod_kwargs)
                yield cons, prod, f"{env_id}/{iperf3_id}"
def test_snap_restore_performance(bin_cloner_path, results_file_dumper):
    """
    Test the performance of snapshot restore.

    @type: performance
    """
    logger = logging.getLogger(TEST_ID)
    artifacts = ArtifactCollection(_test_images_s3_bucket())
    microvm_artifacts = ArtifactSet(artifacts.microvms(keyword="2vcpu_1024mb"))
    kernel_artifacts = ArtifactSet(artifacts.kernels())
    disk_artifacts = ArtifactSet(artifacts.disks(keyword="ubuntu"))

    logger.info("Testing on processor %s", get_cpu_model_name())

    # Create a test context and add builder, logger, network.
    test_context = TestContext()
    test_context.custom = {
        'builder': MicrovmBuilder(bin_cloner_path),
        'logger': logger,
        'name': TEST_ID,
        'results_file_dumper': results_file_dumper
    }

    test_matrix = TestMatrix(
        context=test_context,
        artifact_sets=[microvm_artifacts, kernel_artifacts, disk_artifacts])
    test_matrix.run_test(snapshot_workload)
Пример #3
0
def test_block_performance_sync(bin_cloner_path, results_file_dumper):
    """
    Test block performance for multiple vm configurations.

    @type: performance
    """
    logger = logging.getLogger(TEST_ID)

    artifacts = ArtifactCollection(_test_images_s3_bucket())
    vm_artifacts = ArtifactSet(artifacts.microvms(keyword="1vcpu_1024mb"))
    vm_artifacts.insert(artifacts.microvms(keyword="2vcpu_1024mb"))

    kernel_artifacts = ArtifactSet(artifacts.kernels())
    disk_artifacts = ArtifactSet(artifacts.disks(keyword="ubuntu"))

    logger.info("Testing on processor %s", get_cpu_model_name())

    # Create a test context and add builder, logger, network.
    test_context = TestContext()
    test_context.custom = {
        'builder': MicrovmBuilder(bin_cloner_path),
        'logger': logger,
        'name': TEST_ID,
        'results_file_dumper': results_file_dumper,
        'io_engine': 'Sync'
    }

    test_matrix = TestMatrix(
        context=test_context,
        artifact_sets=[vm_artifacts, kernel_artifacts, disk_artifacts])
    test_matrix.run_test(fio_workload)
Пример #4
0
def _g2h_send_ping(context):
    """Send ping from guest to host."""
    logger = context.custom['logger']
    vm_builder = context.custom['builder']
    interval_between_req = context.custom['interval']
    name = context.custom['name']
    file_dumper = context.custom['results_file_dumper']

    logger.info("Testing {} with microvm: \"{}\", kernel {}, disk {} ".format(
        name, context.microvm.name(), context.kernel.name(),
        context.disk.name()))

    # Create a rw copy artifact.
    rw_disk = context.disk.copy()
    # Get ssh key from read-only artifact.
    ssh_key = context.disk.ssh_key()
    # Create a fresh microvm from aftifacts.
    basevm = vm_builder.build(kernel=context.kernel,
                              disks=[rw_disk],
                              ssh_key=ssh_key,
                              config=context.microvm)

    basevm.start()

    # Check if the needed CPU cores are available. We have the API thread, VMM
    # thread and then one thread for each configured vCPU.
    assert CpuMap.len() >= 2 + basevm.vcpus_count

    # Pin uVM threads to physical cores.
    current_cpu_id = 0
    assert basevm.pin_vmm(current_cpu_id), \
        "Failed to pin firecracker thread."
    current_cpu_id += 1
    assert basevm.pin_api(current_cpu_id), \
        "Failed to pin fc_api thread."
    for i in range(basevm.vcpus_count):
        current_cpu_id += 1
        assert basevm.pin_vcpu(i, current_cpu_id + i), \
            f"Failed to pin fc_vcpu {i} thread."

    custom = {
        "microvm": context.microvm.name(),
        "kernel": context.kernel.name(),
        "disk": context.disk.name(),
        "cpu_model_name": get_cpu_model_name()
    }

    st_core = core.Core(name="network_latency", iterations=1, custom=custom)
    cons = consumer.LambdaConsumer(
        func=consume_ping_output,
        func_kwargs={"requests": context.custom['requests']})
    cmd = PING.format(context.custom['requests'], interval_between_req,
                      DEFAULT_HOST_IP)
    prod = producer.SSHCommand(cmd, net_tools.SSHConnection(basevm.ssh_config))
    st_core.add_pipe(producer=prod, consumer=cons, tag="ping")

    # Gather results and verify pass criteria.
    result = st_core.run_exercise(file_dumper is None)
    if file_dumper:
        file_dumper.writeln(json.dumps(result))
Пример #5
0
def test_network_tcp_throughput(bin_cloner_path, results_file_dumper):
    """
    Test network throughput for multiple vm confgurations.

    @type: performance
    """
    logger = logging.getLogger(TEST_ID)
    artifacts = ArtifactCollection(_test_images_s3_bucket())
    microvm_artifacts = ArtifactSet(artifacts.microvms(keyword="1vcpu_1024mb"))
    microvm_artifacts.insert(artifacts.microvms(keyword="2vcpu_1024mb"))
    kernel_artifacts = ArtifactSet(artifacts.kernels())
    disk_artifacts = ArtifactSet(artifacts.disks(keyword="ubuntu"))

    logger.info("Testing on processor %s", get_cpu_model_name())

    # Create a test context and add builder, logger, network.
    test_context = TestContext()
    test_context.custom = {
        "builder": MicrovmBuilder(bin_cloner_path),
        "logger": logger,
        "name": TEST_ID,
        "results_file_dumper": results_file_dumper,
    }

    test_matrix = TestMatrix(
        context=test_context,
        artifact_sets=[microvm_artifacts, kernel_artifacts, disk_artifacts],
    )
    test_matrix.run_test(iperf_workload)
def statistics(mode, env_id, fio_id):
    """Define statistics based on the mode."""
    host_cpu_model = get_cpu_model_name()
    cpu_baselines = CONFIG["hosts"]["instances"]["m5d.metal"]["cpus"]
    host_cpu_baselines = None
    for baselines in cpu_baselines:
        if baselines["model"] == host_cpu_model:
            host_cpu_baselines = baselines
            break

    # Because of current fio modes (randread, randrw, readwrite, read) we can
    # always assume that we measure read operations.
    stats = no_criteria_cpu_utilization_stats()
    stats.extend(no_criteria_ops_stats("read"))

    if host_cpu_baselines:
        stats = criteria_cpu_utilization_stats(env_id, fio_id)
        stats.extend(criteria_ops_stats(env_id, fio_id, "read"))

    if mode.endswith("write") or mode.endswith("rw"):
        if host_cpu_baselines:
            stats.extend(criteria_ops_stats(env_id, fio_id, "write"))
        else:
            stats.extend(no_criteria_ops_stats("write"))

    return stats
def iperf_workload(context):
    """Iperf between guest and host in both directions for TCP workload."""
    vm_builder = context.custom['builder']
    logger = context.custom["logger"]
    file_dumper = context.custom['results_file_dumper']

    # Create a rw copy artifact.
    rw_disk = context.disk.copy()
    # Get ssh key from read-only artifact.
    ssh_key = context.disk.ssh_key()
    # Create a fresh microvm from artifacts.
    basevm = vm_builder.build(kernel=context.kernel,
                              disks=[rw_disk],
                              ssh_key=ssh_key,
                              config=context.microvm)

    basevm.start()
    custom = {
        "microvm": context.microvm.name(),
        "kernel": context.kernel.name(),
        "disk": context.disk.name(),
        "cpu_model_name": get_cpu_model_name()
    }
    st_core = core.Core(name="network_tcp_throughput",
                        iterations=1,
                        custom=custom)

    # Check if the needed CPU cores are available. We have the API thread, VMM
    # thread and then one thread for each configured vCPU.
    assert CpuMap.len() >= 2 + basevm.vcpus_count

    # Pin uVM threads to physical cores.
    current_avail_cpu = 0
    assert basevm.pin_vmm(current_avail_cpu), \
        "Failed to pin firecracker thread."
    current_avail_cpu += 1
    assert basevm.pin_api(current_avail_cpu), \
        "Failed to pin fc_api thread."
    for i in range(basevm.vcpus_count):
        current_avail_cpu += 1
        assert basevm.pin_vcpu(i, current_avail_cpu), \
            f"Failed to pin fc_vcpu {i} thread."

    logger.info("Testing with microvm: \"{}\", kernel {}, disk {}"
                .format(context.microvm.name(),
                        context.kernel.name(),
                        context.disk.name()))

    for cons, prod, tag in \
            pipes(basevm,
                  DEFAULT_HOST_IP,
                  current_avail_cpu + 1,
                  f"{context.kernel.name()}/{context.disk.name()}"):
        st_core.add_pipe(prod, cons, tag)

    # Start running the commands on guest, gather results and verify pass
    # criteria.
    results = st_core.run_exercise(check_criteria=file_dumper is None)
    if file_dumper:
        file_dumper.writeln(json.dumps(results))
Пример #8
0
def iperf_workload(context):
    """Run a statistic exercise."""
    vm_builder = context.custom['builder']
    logger = context.custom["logger"]
    file_dumper = context.custom['results_file_dumper']

    # Create a rw copy artifact.
    rw_disk = context.disk.copy()
    # Get ssh key from read-only artifact.
    ssh_key = context.disk.ssh_key()
    # Create a fresh microvm from artifacts.
    basevm = vm_builder.build(kernel=context.kernel,
                              disks=[rw_disk],
                              ssh_key=ssh_key,
                              config=context.microvm)

    # Create a vsock device
    basevm.vsock.put(vsock_id="vsock0",
                     guest_cid=3,
                     uds_path="/" + VSOCK_UDS_PATH)

    basevm.start()

    st_core = core.Core(name="vsock_throughput",
                        iterations=1,
                        custom={'cpu_model_name': get_cpu_model_name()})

    # Check if the needed CPU cores are available. We have the API thread, VMM
    # thread and then one thread for each configured vCPU.
    assert CpuMap.len() >= 2 + basevm.vcpus_count

    # Pin uVM threads to physical cores.
    current_avail_cpu = 0
    assert basevm.pin_vmm(current_avail_cpu), \
        "Failed to pin firecracker thread."
    current_avail_cpu += 1
    assert basevm.pin_api(current_avail_cpu), \
        "Failed to pin fc_api thread."
    for i in range(basevm.vcpus_count):
        current_avail_cpu += 1
        assert basevm.pin_vcpu(i, current_avail_cpu), \
            f"Failed to pin fc_vcpu {i} thread."

    logger.info("Testing with microvm: \"{}\", kernel {}, disk {}".format(
        context.microvm.name(), context.kernel.name(), context.disk.name()))

    for cons, prod, tag in \
            pipes(basevm,
                  current_avail_cpu + 1,
                  f"{context.kernel.name()}/{context.disk.name()}"):
        st_core.add_pipe(prod, cons, tag)

    # Start running the commands on guest, gather results and verify pass
    # criteria.
    results = st_core.run_exercise(file_dumper is None)
    if file_dumper:
        file_dumper.writeln(json.dumps(results))

    basevm.kill()
def create_pipes_generator(basevm,
                           mode,
                           current_avail_cpu,
                           protocol,
                           host_ip,
                           env_id):
    """Create producer/consumer pipes."""
    host_cpu_model_name = get_cpu_model_name()
    for payload_length in protocol["payload_length"]:
        for ws in protocol["window_size"]:
            iperf_guest_cmd_builder = CmdBuilder(IPERF3) \
                .with_arg("--verbose") \
                .with_arg("--client", host_ip) \
                .with_arg("--time", CONFIG["time"]) \
                .with_arg("--json") \
                .with_arg("--omit", protocol["omit"])

            if ws:
                iperf_guest_cmd_builder = iperf_guest_cmd_builder \
                    .with_arg("--window", f"{ws}")
                iperf3_id_ws = ws
            else:
                iperf3_id_ws = "DEFAULT"

            if payload_length:
                iperf_guest_cmd_builder = iperf_guest_cmd_builder \
                    .with_arg("--len", f"{payload_length}")
                iperf3_id_payload_len = payload_length
            else:
                iperf3_id_payload_len = "DEFAULT"

            iperf3_id = f"tcp-p{iperf3_id_payload_len}" \
                        f"-ws{iperf3_id_ws}-{basevm.vcpus_count}vcpu-{mode}"

            cons = consumer.LambdaConsumer(
                consume_stats=True,
                func=consume_iperf_tcp_output,
                func_kwargs={
                    "vcpus_count": basevm.vcpus_count
                }
            )

            eager_map(cons.set_measurement_def, measurements_tcp())
            eager_map(cons.set_stat_def, stats_tcp(host_cpu_model_name,
                                                   iperf3_id, env_id))

            prod_kwargs = {
                "guest_cmd_builder": iperf_guest_cmd_builder,
                "basevm": basevm,
                "current_avail_cpu": current_avail_cpu,
                "runtime": CONFIG["time"],
                "omit": protocol["omit"],
                "load_factor": CONFIG["load_factor"],
                "modes": CONFIG["modes"][mode]
            }
            prod = producer.LambdaProducer(produce_iperf_output,
                                           prod_kwargs)
            yield cons, prod, f"{env_id}/{iperf3_id}"
Пример #10
0
    def __init__(self, env_id, iperf_id):
        """Vsock throughput baseline provider initialization."""
        cpu_model_name = get_cpu_model_name()
        baselines = list(filter(
            lambda cpu_baseline: cpu_baseline["model"] == cpu_model_name,
            CONFIG["hosts"]["instances"]["m5d.metal"]["cpus"]))
        super().__init__(DictQuery(dict()))
        if len(baselines) > 0:
            super().__init__(DictQuery(baselines[0]))

        self._tag = "baselines/{}/" + env_id + "/{}/" + iperf_id
    def __init__(self, env_id):
        """Snapshot baseline provider initialization."""
        cpu_model_name = get_cpu_model_name()
        baselines = list(filter(
            lambda cpu_baseline: cpu_baseline["model"] == cpu_model_name,
            CONFIG_DICT["hosts"]["instances"][get_instance_type()]["cpus"]))

        super().__init__(DictQuery({}))
        if len(baselines) > 0:
            super().__init__(DictQuery(baselines[0]))

        self._tag = "baselines/{}/" + env_id + "/{}"
def criteria_ops_stats(env_id: str, fio_id: str, operation: str):
    """Return statistics with pass criteria given by the baselines."""
    bw_key = f"baseline_{BW.format(operation)}/{env_id}/{fio_id}"
    iops_key = f"baseline_{IOPS.format(operation)}/{env_id}/{fio_id}"
    blk_baseline_provider = BlockBaselineProvider(get_cpu_model_name())
    return [
        st.consumer.StatisticDef.avg(
            IOPS.format(operation),
            criteria=criteria.EqualWith(
                blk_baseline_provider.target(iops_key),
                blk_baseline_provider.delta(iops_key))),
        st.consumer.StatisticDef.stddev(IOPS.format(operation)),
        st.consumer.StatisticDef.avg(
            BW.format(operation),
            criteria=criteria.EqualWith(
                blk_baseline_provider.target(bw_key),
                blk_baseline_provider.delta(bw_key))),
        st.consumer.StatisticDef.stddev(BW.format(operation))]
Пример #13
0
def snapshot_workload(context):
    """Test all VM configurations for snapshot restore."""
    file_dumper = context.custom["results_file_dumper"]

    st_core = core.Core(name=TEST_ID,
                        iterations=1,
                        custom={"cpu_model_name": get_cpu_model_name()})

    snapshot_scaling_vcpus(context, st_core, vcpu_count=10)
    snapshot_scaling_mem(context, st_core, mem_exponent=9)
    snapshot_scaling_net(context, st_core)
    snapshot_scaling_block(context, st_core)
    snapshot_all_devices(context, st_core)

    # Gather results and verify pass criteria.
    try:
        result = st_core.run_exercise()
    except core.CoreException as err:
        handle_failure(file_dumper, err)

    dump_test_result(file_dumper, result)
def criteria_cpu_utilization_stats(env_id, fio_id):
    """Return the set of CPU utilization statistics with criteria."""
    cpu_util_vmm_key = f"baseline_cpu_utilization_vmm/{env_id}/{fio_id}"
    cpu_util_vcpus_total_key = "baseline_cpu_utilization_vcpus_total/" \
                               f"{env_id}/{fio_id}"
    blk_baseline_provider = BlockBaselineProvider(get_cpu_model_name())
    return [
        st.consumer.StatisticDef.get_first_observation(
            st_name="value",
            ms_name=CPU_UTILIZATION_VMM,
            criteria=criteria.EqualWith(
                blk_baseline_provider.target(cpu_util_vmm_key),
                blk_baseline_provider.delta(cpu_util_vmm_key))
        ),
        st.consumer.StatisticDef.get_first_observation(
            st_name="value",
            ms_name=CPU_UTILIZATION_VCPUS_TOTAL,
            criteria=criteria.EqualWith(
                blk_baseline_provider.target(cpu_util_vcpus_total_key),
                blk_baseline_provider.delta(cpu_util_vcpus_total_key))
        )
    ]
Пример #15
0
def test_vsock_throughput(bin_cloner_path):
    """Test vsock throughput driver for multiple artifacts."""
    logger = logging.getLogger("vsock_throughput")
    artifacts = ArtifactCollection(_test_images_s3_bucket())
    microvm_artifacts = ArtifactSet(artifacts.microvms(keyword="1vcpu_1024mb"))
    microvm_artifacts.insert(artifacts.microvms(keyword="2vcpu_1024mb"))
    kernel_artifacts = ArtifactSet(
        artifacts.kernels(keyword="vmlinux-4.14.bin"))
    disk_artifacts = ArtifactSet(artifacts.disks(keyword="ubuntu"))

    # Create a test context and add builder, logger, network.
    test_context = TestContext()
    test_context.custom = {
        'builder': MicrovmBuilder(bin_cloner_path),
        'logger': logger,
        'name': 'vsock_throughput'
    }

    print(get_cpu_model_name())

    test_matrix = TestMatrix(
        context=test_context,
        artifact_sets=[microvm_artifacts, kernel_artifacts, disk_artifacts])
    test_matrix.run_test(iperf_workload)
def test_block_performance_async(bin_cloner_path, results_file_dumper):
    """
    Test block performance for multiple vm configurations.

    @type: performance
    """
    logger = logging.getLogger(TEST_ID)

    if not is_io_uring_supported():
        logger.info("io_uring is not supported. Skipping..")
        pytest.skip("Cannot run async if io_uring is not supported")

    artifacts = ArtifactCollection(_test_images_s3_bucket())
    vm_artifacts = ArtifactSet(artifacts.microvms(keyword="1vcpu_1024mb"))
    vm_artifacts.insert(artifacts.microvms(keyword="2vcpu_1024mb"))

    kernel_artifacts = ArtifactSet(artifacts.kernels())
    disk_artifacts = ArtifactSet(artifacts.disks(keyword="ubuntu"))

    logger.info("Testing on processor %s", get_cpu_model_name())

    # Create a test context and add builder, logger, network.
    test_context = TestContext()
    test_context.custom = {
        "builder": MicrovmBuilder(bin_cloner_path),
        "logger": logger,
        "name": TEST_ID,
        "results_file_dumper": results_file_dumper,
        "io_engine": "Async",
    }

    test_matrix = TestMatrix(
        context=test_context,
        artifact_sets=[vm_artifacts, kernel_artifacts, disk_artifacts],
    )
    test_matrix.run_test(fio_workload)
def fio_workload(context):
    """Execute block device emulation benchmarking scenarios."""
    vm_builder = context.custom['builder']
    logger = context.custom["logger"]
    file_dumper = context.custom["results_file_dumper"]

    # Create a rw copy artifact.
    rw_disk = context.disk.copy()
    # Get ssh key from read-only artifact.
    ssh_key = context.disk.ssh_key()
    # Create a fresh microvm from artifacts.
    vm_instance = vm_builder.build(kernel=context.kernel,
                                   disks=[rw_disk],
                                   ssh_key=ssh_key,
                                   config=context.microvm)
    basevm = vm_instance.vm

    # Add a secondary block device for benchmark tests.
    fs = drive_tools.FilesystemFile(os.path.join(basevm.fsfiles, 'scratch'),
                                    CONFIG["block_device_size"])
    basevm.add_drive('scratch', fs.path)
    basevm.start()

    # Get names of threads in Firecracker.
    current_cpu_id = 0
    basevm.pin_vmm(current_cpu_id)
    current_cpu_id += 1
    basevm.pin_api(current_cpu_id)
    for vcpu_id in range(basevm.vcpus_count):
        current_cpu_id += 1
        basevm.pin_vcpu(vcpu_id, current_cpu_id)

    st_core = core.Core(name=TEST_ID,
                        iterations=1,
                        custom={
                            "microvm": context.microvm.name(),
                            "kernel": context.kernel.name(),
                            "disk": context.disk.name(),
                            "cpu_model_name": get_cpu_model_name()
                        })

    logger.info("Testing with microvm: \"{}\", kernel {}, disk {}".format(
        context.microvm.name(), context.kernel.name(), context.disk.name()))

    ssh_connection = net_tools.SSHConnection(basevm.ssh_config)
    env_id = f"{context.kernel.name()}/{context.disk.name()}/" \
             f"{context.microvm.name()}"

    for mode in CONFIG["fio_modes"]:
        for bs in CONFIG["fio_blk_sizes"]:
            fio_id = f"{mode}-bs{bs}"
            st_prod = st.producer.LambdaProducer(func=run_fio,
                                                 func_kwargs={
                                                     "env_id": env_id,
                                                     "basevm": basevm,
                                                     "ssh_conn":
                                                     ssh_connection,
                                                     "mode": mode,
                                                     "bs": bs
                                                 })
            st_cons = st.consumer.LambdaConsumer(
                metadata_provider=DictMetadataProvider(
                    CONFIG["measurements"],
                    BlockBaselinesProvider(env_id, fio_id)),
                func=consume_fio_output,
                func_kwargs={
                    "numjobs": basevm.vcpus_count,
                    "mode": mode,
                    "bs": bs,
                    "env_id": env_id,
                    "logs_path": basevm.jailer.chroot_base_with_id()
                })
            st_core.add_pipe(st_prod, st_cons, tag=f"{env_id}/{fio_id}")

    # Gather results and verify pass criteria.
    try:
        result = st_core.run_exercise()
    except core.CoreException as err:
        handle_failure(file_dumper, err)

    dump_test_result(file_dumper, result)
Пример #18
0
def iperf_workload(context):
    """Iperf between guest and host in both directions for TCP workload."""
    vm_builder = context.custom["builder"]
    logger = context.custom["logger"]
    file_dumper = context.custom["results_file_dumper"]

    # Create a rw copy artifact.
    rw_disk = context.disk.copy()
    # Get ssh key from read-only artifact.
    ssh_key = context.disk.ssh_key()
    # Create a fresh microvm from artifacts.
    vm_instance = vm_builder.build(
        kernel=context.kernel, disks=[rw_disk], ssh_key=ssh_key, config=context.microvm
    )
    basevm = vm_instance.vm
    basevm.start()
    custom = {
        "microvm": context.microvm.name(),
        "kernel": context.kernel.name(),
        "disk": context.disk.name(),
        "cpu_model_name": get_cpu_model_name(),
    }
    st_core = core.Core(name=TEST_ID, iterations=1, custom=custom)

    # Check if the needed CPU cores are available. We have the API thread, VMM
    # thread and then one thread for each configured vCPU.
    assert CpuMap.len() >= 2 + basevm.vcpus_count

    # Pin uVM threads to physical cores.
    current_avail_cpu = 0
    assert basevm.pin_vmm(current_avail_cpu), "Failed to pin firecracker thread."
    current_avail_cpu += 1
    assert basevm.pin_api(current_avail_cpu), "Failed to pin fc_api thread."
    for i in range(basevm.vcpus_count):
        current_avail_cpu += 1
        assert basevm.pin_vcpu(
            i, current_avail_cpu
        ), f"Failed to pin fc_vcpu {i} thread."

    logger.info(
        'Testing with microvm: "{}", kernel {}, disk {}'.format(
            context.microvm.name(), context.kernel.name(), context.disk.name()
        )
    )

    for cons, prod, tag in pipes(
        basevm,
        DEFAULT_HOST_IP,
        current_avail_cpu + 1,
        f"{context.kernel.name()}/{context.disk.name()}/" f"{context.microvm.name()}",
    ):
        st_core.add_pipe(prod, cons, tag)

    # Start running the commands on guest, gather results and verify pass
    # criteria.
    try:
        result = st_core.run_exercise()
    except core.CoreException as err:
        handle_failure(file_dumper, err)

    file_dumper.dump(result)
def create_pipes_generator(basevm,
                           mode,
                           current_avail_cpu,
                           protocol,
                           host_ip,
                           env_id):
    """Create producer/consumer pipes."""
    host_cpu_model_name = get_cpu_model_name()
    cpus_baselines = test_cfg.CONFIG["hosts"]["instances"]["m5d.metal"]["cpus"]
    stats = no_criteria_stats()
    baselines = list(filter(
        lambda baseline: baseline["model"] == host_cpu_model_name,
        cpus_baselines))

    for payload_length in protocol["payload_length"]:
        for ws in protocol["window_size"]:
            iperf_guest_cmd_builder = CmdBuilder(test_cfg.IPERF3) \
                .with_arg("--verbose") \
                .with_arg("--client", host_ip) \
                .with_arg("--time", test_cfg.CONFIG["time"]) \
                .with_arg("--json") \
                .with_arg("--omit", protocol["omit"])

            if ws:
                iperf_guest_cmd_builder = iperf_guest_cmd_builder \
                    .with_arg("--window", f"{ws}")
                iperf3_id_ws = ws
            else:
                iperf3_id_ws = "DEFAULT"

            if payload_length:
                iperf_guest_cmd_builder = iperf_guest_cmd_builder \
                    .with_arg("--len", f"{payload_length}")
                iperf3_id_payload_len = payload_length
            else:
                iperf3_id_payload_len = "DEFAULT"

            iperf3_id = f"tcp-p{iperf3_id_payload_len}" \
                        f"-ws{iperf3_id_ws}-{basevm.vcpus_count}vcpu-{mode}"

            cons = consumer.LambdaConsumer(
                consume_stats=False,
                func=consume_iperf_tcp_output,
                func_kwargs={
                    "vcpus_count": basevm.vcpus_count
                }
            )

            if len(baselines) > 0:
                stats = criteria_stats(baselines[0], iperf3_id, env_id)

            eager_map(cons.set_measurement_def, measurements())
            eager_map(cons.set_stat_def, stats)

            prod_kwargs = {
                "guest_cmd_builder": iperf_guest_cmd_builder,
                "basevm": basevm,
                "current_avail_cpu": current_avail_cpu,
                "runtime": test_cfg.CONFIG["time"],
                "omit": protocol["omit"],
                "load_factor": test_cfg.CONFIG["load_factor"],
                "modes": test_cfg.CONFIG["modes"][mode]
            }
            prod = producer.LambdaProducer(produce_iperf_output,
                                           prod_kwargs)
            yield cons, prod, f"{env_id}/{iperf3_id}"