def default_lambda_consumer(env_id):
    """Create a default lambda consumer for the snapshot restore test."""
    return st.consumer.LambdaConsumer(
        metadata_provider=DictMetadataProvider(
            CONFIG_DICT["measurements"], SnapRestoreBaselinesProvider(env_id)),
        func=consume_output,
        func_kwargs={},
    )
Пример #2
0
def create_pipes_generator(basevm, mode, current_avail_cpu, protocol, host_ip, env_id):
    """Create producer/consumer pipes."""
    for payload_length in protocol["payload_length"]:
        for ws in protocol["window_size"]:
            iperf_guest_cmd_builder = (
                CmdBuilder(IPERF3)
                .with_arg("--verbose")
                .with_arg("--client", host_ip)
                .with_arg("--time", CONFIG_DICT["time"])
                .with_arg("--json")
                .with_arg("--omit", protocol["omit"])
            )

            if ws != "DEFAULT":
                iperf_guest_cmd_builder = iperf_guest_cmd_builder.with_arg(
                    "--window", f"{ws}"
                )

            if payload_length != "DEFAULT":
                iperf_guest_cmd_builder = iperf_guest_cmd_builder.with_arg(
                    "--len", f"{payload_length}"
                )

            iperf3_id = f"tcp-p{payload_length}-ws{ws}-{mode}"

            cons = consumer.LambdaConsumer(
                metadata_provider=DictMetadataProvider(
                    measurements=CONFIG_DICT["measurements"],
                    baseline_provider=NetTCPThroughputBaselineProvider(
                        env_id, iperf3_id
                    ),
                ),
                func=consume_iperf_tcp_output,
                func_kwargs={"vcpus_count": basevm.vcpus_count},
            )

            prod_kwargs = {
                "guest_cmd_builder": iperf_guest_cmd_builder,
                "basevm": basevm,
                "current_avail_cpu": current_avail_cpu,
                "runtime": CONFIG_DICT["time"],
                "omit": protocol["omit"],
                "load_factor": CONFIG_DICT["load_factor"],
                "modes": CONFIG_DICT["modes"][mode],
            }
            prod = producer.LambdaProducer(produce_iperf_output, prod_kwargs)
            yield cons, prod, f"{env_id}/{iperf3_id}"
def pipes(basevm, current_avail_cpu, env_id):
    """Producer/Consumer pipes generator."""
    for mode in CONFIG_DICT["modes"]:
        # We run bi-directional tests only on uVM with more than 2 vCPus
        # because we need to pin one iperf3/direction per vCPU, and since we
        # have two directions, we need at least two vCPUs.
        if mode == "bd" and basevm.vcpus_count < 2:
            continue

        for protocol in CONFIG_DICT["protocols"]:
            for payload_length in protocol["payload_length"]:
                iperf_guest_cmd_builder = (
                    CmdBuilder(IPERF3).with_arg("--vsock").with_arg(
                        "-c", 2).with_arg("--json").with_arg(
                            "--omit",
                            protocol["omit"]).with_arg("--time",
                                                       CONFIG_DICT["time"]))

                if payload_length != "DEFAULT":
                    iperf_guest_cmd_builder = iperf_guest_cmd_builder.with_arg(
                        "--len", f"{payload_length}")

                iperf3_id = f"vsock-p{payload_length}-{mode}"

                cons = consumer.LambdaConsumer(
                    metadata_provider=DictMetadataProvider(
                        CONFIG_DICT["measurements"],
                        VsockThroughputBaselineProvider(env_id, iperf3_id),
                    ),
                    func=consume_iperf_output,
                )

                prod_kwargs = {
                    "guest_cmd_builder": iperf_guest_cmd_builder,
                    "basevm": basevm,
                    "current_avail_cpu": current_avail_cpu,
                    "runtime": CONFIG_DICT["time"],
                    "omit": protocol["omit"],
                    "load_factor": CONFIG_DICT["load_factor"],
                    "modes": CONFIG_DICT["modes"][mode],
                }
                prod = producer.LambdaProducer(produce_iperf_output,
                                               prod_kwargs)
                yield cons, prod, f"{env_id}/{iperf3_id}"
def fio_workload(context):
    """Execute block device emulation benchmarking scenarios."""
    vm_builder = context.custom['builder']
    logger = context.custom["logger"]
    file_dumper = context.custom["results_file_dumper"]

    # Create a rw copy artifact.
    rw_disk = context.disk.copy()
    # Get ssh key from read-only artifact.
    ssh_key = context.disk.ssh_key()
    # Create a fresh microvm from artifacts.
    vm_instance = vm_builder.build(kernel=context.kernel,
                                   disks=[rw_disk],
                                   ssh_key=ssh_key,
                                   config=context.microvm)
    basevm = vm_instance.vm

    # Add a secondary block device for benchmark tests.
    fs = drive_tools.FilesystemFile(os.path.join(basevm.fsfiles, 'scratch'),
                                    CONFIG["block_device_size"])
    basevm.add_drive('scratch', fs.path)
    basevm.start()

    # Get names of threads in Firecracker.
    current_cpu_id = 0
    basevm.pin_vmm(current_cpu_id)
    current_cpu_id += 1
    basevm.pin_api(current_cpu_id)
    for vcpu_id in range(basevm.vcpus_count):
        current_cpu_id += 1
        basevm.pin_vcpu(vcpu_id, current_cpu_id)

    st_core = core.Core(name=TEST_ID,
                        iterations=1,
                        custom={
                            "microvm": context.microvm.name(),
                            "kernel": context.kernel.name(),
                            "disk": context.disk.name(),
                            "cpu_model_name": get_cpu_model_name()
                        })

    logger.info("Testing with microvm: \"{}\", kernel {}, disk {}".format(
        context.microvm.name(), context.kernel.name(), context.disk.name()))

    ssh_connection = net_tools.SSHConnection(basevm.ssh_config)
    env_id = f"{context.kernel.name()}/{context.disk.name()}/" \
             f"{context.microvm.name()}"

    for mode in CONFIG["fio_modes"]:
        for bs in CONFIG["fio_blk_sizes"]:
            fio_id = f"{mode}-bs{bs}"
            st_prod = st.producer.LambdaProducer(func=run_fio,
                                                 func_kwargs={
                                                     "env_id": env_id,
                                                     "basevm": basevm,
                                                     "ssh_conn":
                                                     ssh_connection,
                                                     "mode": mode,
                                                     "bs": bs
                                                 })
            st_cons = st.consumer.LambdaConsumer(
                metadata_provider=DictMetadataProvider(
                    CONFIG["measurements"],
                    BlockBaselinesProvider(env_id, fio_id)),
                func=consume_fio_output,
                func_kwargs={
                    "numjobs": basevm.vcpus_count,
                    "mode": mode,
                    "bs": bs,
                    "env_id": env_id,
                    "logs_path": basevm.jailer.chroot_base_with_id()
                })
            st_core.add_pipe(st_prod, st_cons, tag=f"{env_id}/{fio_id}")

    # Gather results and verify pass criteria.
    try:
        result = st_core.run_exercise()
    except core.CoreException as err:
        handle_failure(file_dumper, err)

    dump_test_result(file_dumper, result)