def produce_iperf_output(basevm, guest_cmd_builder, current_avail_cpu, runtime,
                         omit, load_factor, modes):
    """Produce iperf raw output from server-client connection."""
    # Check if we have enough CPUs to pin the servers on the host.
    # The available CPUs are the total minus vcpus, vmm and API threads.
    assert load_factor * basevm.vcpus_count < CpuMap.len() - \
        basevm.vcpus_count - 2

    host_uds_path = os.path.join(basevm.path, VSOCK_UDS_PATH)

    # Start the servers.
    for server_idx in range(load_factor * basevm.vcpus_count):
        assigned_cpu = CpuMap(current_avail_cpu)
        iperf_server = \
            CmdBuilder(f"taskset --cpu-list {assigned_cpu}") \
            .with_arg(IPERF3) \
            .with_arg("-sD") \
            .with_arg("--vsock") \
            .with_arg("-B", host_uds_path) \
            .with_arg("-p", f"{BASE_PORT + server_idx}") \
            .with_arg("-1") \
            .build()

        run_cmd(iperf_server)
        current_avail_cpu += 1

    # Wait for iperf3 servers to start.
    time.sleep(SERVER_STARTUP_TIME)

    # Start `vcpus` iperf3 clients. We can not use iperf3 parallel streams
    # due to non deterministic results and lack of scaling.
    def spawn_iperf_client(conn, client_idx, mode):
        # Add the port where the iperf3 client is going to send/receive.
        cmd = guest_cmd_builder.with_arg("-p", BASE_PORT +
                                         client_idx).with_arg(mode).build()

        # Bind the UDS in the jailer's root.
        basevm.create_jailed_resource(
            os.path.join(
                basevm.path,
                make_host_port_path(VSOCK_UDS_PATH, BASE_PORT + client_idx)))

        pinned_cmd = f"taskset --cpu-list {client_idx % basevm.vcpus_count}" \
            f" {cmd}"
        rc, stdout, stderr = conn.execute_command(pinned_cmd)

        assert rc == 0, stderr.read()

        return stdout.read()

    with concurrent.futures.ThreadPoolExecutor() as executor:
        futures = []
        cpu_load_future = executor.submit(get_cpu_percent,
                                          basevm.jailer_clone_pid,
                                          runtime - SERVER_STARTUP_TIME, omit)

        modes_len = len(modes)
        ssh_connection = net_tools.SSHConnection(basevm.ssh_config)
        for client_idx in range(load_factor * basevm.vcpus_count):
            futures.append(
                executor.submit(
                    spawn_iperf_client,
                    ssh_connection,
                    client_idx,
                    # Distribute the modes evenly.
                    modes[client_idx % modes_len]))

        cpu_load = cpu_load_future.result()
        for future in futures[:-1]:
            res = json.loads(future.result())
            res[IPERF3_END_RESULTS_TAG][
                IPERF3_CPU_UTILIZATION_PERCENT_OUT_TAG] = None
            yield res

        # Attach the real CPU utilization vmm/vcpus to
        # the last iperf3 server-client pair measurements.
        res = json.loads(futures[-1].result())

        # We expect a single emulation thread tagged with `firecracker` name.
        tag = "firecracker"
        assert tag in cpu_load and len(cpu_load[tag]) == 1
        thread_id = list(cpu_load[tag])[0]
        data = cpu_load[tag][thread_id]
        vmm_util = sum(data) / len(data)
        cpu_util_perc = res[IPERF3_END_RESULTS_TAG][
            IPERF3_CPU_UTILIZATION_PERCENT_OUT_TAG] = {}
        cpu_util_perc[CPU_UTILIZATION_VMM] = vmm_util

        vcpus_util = 0
        for vcpu in range(basevm.vcpus_count):
            # We expect a single fc_vcpu thread tagged with
            # f`fc_vcpu {vcpu}`.
            tag = f"fc_vcpu {vcpu}"
            assert tag in cpu_load and len(cpu_load[tag]) == 1
            thread_id = list(cpu_load[tag])[0]
            data = cpu_load[tag][thread_id]
            vcpus_util += (sum(data) / len(data))

        cpu_util_perc[CPU_UTILIZATION_VCPUS_TOTAL] = vcpus_util

        yield res
예제 #2
0
def run_fio(env_id, basevm, ssh_conn, mode, bs):
    """Run a fio test in the specified mode with block size bs."""
    logs_path = f"{basevm.jailer.chroot_base_with_id()}/{env_id}/{mode}{bs}"

    # Compute the fio command. Pin it to the first guest CPU.
    cmd = CmdBuilder(FIO) \
        .with_arg(f"--name={mode}-{bs}") \
        .with_arg(f"--rw={mode}") \
        .with_arg(f"--bs={bs}") \
        .with_arg("--filename=/dev/vdb") \
        .with_arg("--time_base=1") \
        .with_arg(f"--size={CONFIG['block_device_size']}M") \
        .with_arg("--direct=1") \
        .with_arg("--ioengine=libaio") \
        .with_arg("--iodepth=32") \
        .with_arg(f"--ramp_time={CONFIG['omit']}") \
        .with_arg(f"--numjobs={CONFIG['load_factor'] * basevm.vcpus_count}") \
        .with_arg("--randrepeat=0") \
        .with_arg(f"--runtime={CONFIG['time']}") \
        .with_arg(f"--write_iops_log={mode}{bs}") \
        .with_arg(f"--write_bw_log={mode}{bs}") \
        .with_arg("--log_avg_msec=1000") \
        .with_arg("--output-format=json+") \
        .build()

    rc, _, stderr = ssh_conn.execute_command(
        "echo 'none' > /sys/block/vdb/queue/scheduler")
    assert rc == 0, stderr.read()
    assert stderr.read() == ""

    run_cmd("echo 3 > /proc/sys/vm/drop_caches")

    rc, _, stderr = ssh_conn.execute_command(
        "echo 3 > /proc/sys/vm/drop_caches")
    assert rc == 0, stderr.read()
    assert stderr.read() == ""

    # Start the CPU load monitor.
    with concurrent.futures.ThreadPoolExecutor() as executor:
        cpu_load_future = executor.submit(get_cpu_percent,
                                          basevm.jailer_clone_pid,
                                          CONFIG["time"],
                                          omit=CONFIG["omit"])

        # Print the fio command in the log and run it
        rc, _, stderr = ssh_conn.execute_command(cmd)
        assert rc == 0, stderr.read()
        assert stderr.read() == ""

        if os.path.isdir(logs_path):
            shutil.rmtree(logs_path)

        os.makedirs(logs_path)

        ssh_conn.scp_get_file("*.log", logs_path)
        rc, _, stderr = ssh_conn.execute_command("rm *.log")
        assert rc == 0, stderr.read()

        result = dict()
        cpu_load = cpu_load_future.result()
        tag = "firecracker"
        assert tag in cpu_load and len(cpu_load[tag]) == 1

        data = list(cpu_load[tag].values())[0]
        data_len = len(data)
        assert data_len == CONFIG["time"]

        result[CPU_UTILIZATION_VMM] = sum(data) / data_len
        if DEBUG:
            result[CPU_UTILIZATION_VMM_SAMPLES_TAG] = data

        vcpus_util = 0
        for vcpu in range(basevm.vcpus_count):
            # We expect a single fc_vcpu thread tagged with
            # f`fc_vcpu {vcpu}`.
            tag = f"fc_vcpu {vcpu}"
            assert tag in cpu_load and len(cpu_load[tag]) == 1
            data = list(cpu_load[tag].values())[0]
            data_len = len(data)

            assert data_len == CONFIG["time"]
            if DEBUG:
                samples_tag = f"cpu_utilization_fc_vcpu_{vcpu}_samples"
                result[samples_tag] = data
            vcpus_util += sum(data) / data_len

        result[CPU_UTILIZATION_VCPUS_TOTAL] = vcpus_util
        return result
def produce_iperf_output(basevm,
                         guest_cmd_builder,
                         current_avail_cpu,
                         runtime,
                         omit,
                         load_factor,
                         modes):
    """Produce iperf raw output from server-client connection."""
    # Check if we have enough CPUs to pin the servers on the host.
    # The available CPUs are the total minus vcpus, vmm and API threads.
    assert load_factor * basevm.vcpus_count < CpuMap.len() - \
           basevm.vcpus_count - 2

    # Start the servers.
    for server_idx in range(load_factor*basevm.vcpus_count):
        assigned_cpu = CpuMap(current_avail_cpu)
        iperf_server = \
            CmdBuilder(f"taskset --cpu-list {assigned_cpu}") \
            .with_arg(basevm.jailer.netns_cmd_prefix()) \
            .with_arg(IPERF3) \
            .with_arg("-sD") \
            .with_arg("-p", f"{BASE_PORT + server_idx}") \
            .with_arg("-1") \
            .build()
        run_cmd(iperf_server)
        current_avail_cpu += 1

    # Wait for iperf3 server to start.
    time.sleep(2)

    # Start `vcpus` iperf3 clients. We can not use iperf3 parallel streams
    # due to non deterministic results and lack of scaling.
    def spawn_iperf_client(conn, client_idx, mode):
        # Add the port where the iperf3 client is going to send/receive.
        cmd = guest_cmd_builder                                 \
            .with_arg("-p", f"{BASE_PORT + client_idx}")       \
            .with_arg(mode)                                     \
            .build()
        pinned_cmd = f"taskset --cpu-list {client_idx % basevm.vcpus_count}" \
                     f" {cmd}"
        _, stdout, _ = conn.execute_command(pinned_cmd)
        return stdout.read()

    # Subtract 2 to remove the readings from the workloads end.
    cpu_load_runtime = runtime - 2
    with concurrent.futures.ThreadPoolExecutor() as executor:
        futures = list()
        cpu_load_future = executor.submit(get_cpu_percent,
                                          basevm.jailer_clone_pid,
                                          cpu_load_runtime,
                                          omit)

        modes_len = len(modes)
        ssh_connection = net_tools.SSHConnection(basevm.ssh_config)
        for client_idx in range(load_factor*basevm.vcpus_count):
            futures.append(executor.submit(spawn_iperf_client,
                                           ssh_connection,
                                           client_idx,
                                           # Distribute the modes evenly.
                                           modes[client_idx % modes_len]))

        cpu_load = cpu_load_future.result()
        for future in futures[:-1]:
            res = json.loads(future.result())
            res[IPERF3_END_RESULTS_TAG][
                IPERF3_CPU_UTILIZATION_PERCENT_OUT_TAG] = None
            yield res

        # Attach the real CPU utilization vmm/vcpus to
        # the last iperf3 server-client pair measurements.
        res = json.loads(futures[-1].result())

        # We expect a single emulation thread tagged with `firecracker` name.
        tag = "firecracker"
        assert tag in cpu_load and len(cpu_load[tag]) == 1
        for thread_id in cpu_load[tag]:
            data = cpu_load[tag][thread_id]
            data_len = len(data)
            assert data_len == cpu_load_runtime
            vmm_util = sum(data)/data_len
            cpu_util_perc = res[IPERF3_END_RESULTS_TAG][
                IPERF3_CPU_UTILIZATION_PERCENT_OUT_TAG] = dict()
            cpu_util_perc[CPU_UTILIZATION_VMM_TAG] = vmm_util
            if DEBUG:
                res[IPERF3_END_RESULTS_TAG][
                    DEBUG_CPU_UTILIZATION_VMM_SAMPLES_TAG] \
                    = data

        vcpus_util = 0
        for vcpu in range(basevm.vcpus_count):
            # We expect a single fc_vcpu thread tagged with
            # f`fc_vcpu {vcpu}`.
            tag = f"fc_vcpu {vcpu}"
            assert tag in cpu_load and len(cpu_load[tag]) == 1
            for thread_id in cpu_load[tag]:
                data = cpu_load[tag][thread_id]
                data_len = len(data)
                assert data_len == cpu_load_runtime
                if DEBUG:
                    res[IPERF3_END_RESULTS_TAG][f"cpu_utilization_fc_vcpu"
                                                f"_{vcpu}_samples"] = \
                        data
                vcpus_util += sum(data)/data_len

        cpu_util_perc[CPU_UTILIZATION_VCPUS_TOTAL_TAG] = vcpus_util

        yield res
def create_pipes_generator(basevm,
                           mode,
                           current_avail_cpu,
                           protocol,
                           host_ip,
                           env_id):
    """Create producer/consumer pipes."""
    host_cpu_model_name = get_cpu_model_name()
    cpus_baselines = test_cfg.CONFIG["hosts"]["instances"]["m5d.metal"]["cpus"]
    stats = no_criteria_stats()
    baselines = list(filter(
        lambda baseline: baseline["model"] == host_cpu_model_name,
        cpus_baselines))

    for payload_length in protocol["payload_length"]:
        for ws in protocol["window_size"]:
            iperf_guest_cmd_builder = CmdBuilder(test_cfg.IPERF3) \
                .with_arg("--verbose") \
                .with_arg("--client", host_ip) \
                .with_arg("--time", test_cfg.CONFIG["time"]) \
                .with_arg("--json") \
                .with_arg("--omit", protocol["omit"])

            if ws:
                iperf_guest_cmd_builder = iperf_guest_cmd_builder \
                    .with_arg("--window", f"{ws}")
                iperf3_id_ws = ws
            else:
                iperf3_id_ws = "DEFAULT"

            if payload_length:
                iperf_guest_cmd_builder = iperf_guest_cmd_builder \
                    .with_arg("--len", f"{payload_length}")
                iperf3_id_payload_len = payload_length
            else:
                iperf3_id_payload_len = "DEFAULT"

            iperf3_id = f"tcp-p{iperf3_id_payload_len}" \
                        f"-ws{iperf3_id_ws}-{basevm.vcpus_count}vcpu-{mode}"

            cons = consumer.LambdaConsumer(
                consume_stats=False,
                func=consume_iperf_tcp_output,
                func_kwargs={
                    "vcpus_count": basevm.vcpus_count
                }
            )

            if len(baselines) > 0:
                stats = criteria_stats(baselines[0], iperf3_id, env_id)

            eager_map(cons.set_measurement_def, measurements())
            eager_map(cons.set_stat_def, stats)

            prod_kwargs = {
                "guest_cmd_builder": iperf_guest_cmd_builder,
                "basevm": basevm,
                "current_avail_cpu": current_avail_cpu,
                "runtime": test_cfg.CONFIG["time"],
                "omit": protocol["omit"],
                "load_factor": test_cfg.CONFIG["load_factor"],
                "modes": test_cfg.CONFIG["modes"][mode]
            }
            prod = producer.LambdaProducer(produce_iperf_output,
                                           prod_kwargs)
            yield cons, prod, f"{env_id}/{iperf3_id}"