示例#1
0
def pipes(basevm, current_avail_cpu, env_id):
    """Producer/Consumer pipes generator."""
    host_cpu_model_name = get_cpu_model_name()
    cpus_baselines = test_cfg.CONFIG["hosts"]["instances"]["m5d.metal"]["cpus"]
    stats = no_criteria_stats()
    baselines = list(
        filter(lambda baseline: baseline["model"] == host_cpu_model_name,
               cpus_baselines))

    for mode in test_cfg.CONFIG["modes"]:
        # We run bi-directional tests only on uVM with more than 2 vCPus
        # because we need to pin one iperf3/direction per vCPU, and since we
        # have two directions, we need at least two vCPUs.
        if mode == "bd" and basevm.vcpus_count < 2:
            continue

        for protocol in test_cfg.CONFIG["protocols"]:
            host_cpu_model_name = get_cpu_model_name()

            for payload_length in protocol["payload_length"]:
                iperf_guest_cmd_builder = CmdBuilder(test_cfg.IPERF3) \
                    .with_arg("--vsock") \
                    .with_arg("-c", 2)       \
                    .with_arg("--json") \
                    .with_arg("--omit", protocol["omit"]) \
                    .with_arg("--time", test_cfg.CONFIG["time"])

                if payload_length:
                    iperf_guest_cmd_builder = iperf_guest_cmd_builder \
                        .with_arg("--len", f"{payload_length}")
                    iperf3_id_payload_len = payload_length
                else:
                    iperf3_id_payload_len = "DEFAULT"

                iperf3_id = f"vsock-p{iperf3_id_payload_len}" \
                    f"-{basevm.vcpus_count}vcpu-{mode}"

                cons = consumer.LambdaConsumer(consume_stats=False,
                                               func=consume_iperf_output)

                if len(baselines) > 0:
                    stats = criteria_stats(baselines[0], iperf3_id, env_id)

                eager_map(cons.set_measurement_def, measurements())
                eager_map(cons.set_stat_def, stats)

                prod_kwargs = {
                    "guest_cmd_builder": iperf_guest_cmd_builder,
                    "basevm": basevm,
                    "current_avail_cpu": current_avail_cpu,
                    "runtime": test_cfg.CONFIG["time"],
                    "omit": protocol["omit"],
                    "load_factor": test_cfg.CONFIG["load_factor"],
                    "modes": test_cfg.CONFIG["modes"][mode],
                }
                prod = producer.LambdaProducer(produce_iperf_output,
                                               prod_kwargs)
                yield cons, prod, f"{env_id}/{iperf3_id}"
def create_pipes_generator(basevm,
                           mode,
                           current_avail_cpu,
                           protocol,
                           host_ip,
                           env_id):
    """Create producer/consumer pipes."""
    host_cpu_model_name = get_cpu_model_name()
    for payload_length in protocol["payload_length"]:
        for ws in protocol["window_size"]:
            iperf_guest_cmd_builder = CmdBuilder(IPERF3) \
                .with_arg("--verbose") \
                .with_arg("--client", host_ip) \
                .with_arg("--time", CONFIG["time"]) \
                .with_arg("--json") \
                .with_arg("--omit", protocol["omit"])

            if ws:
                iperf_guest_cmd_builder = iperf_guest_cmd_builder \
                    .with_arg("--window", f"{ws}")
                iperf3_id_ws = ws
            else:
                iperf3_id_ws = "DEFAULT"

            if payload_length:
                iperf_guest_cmd_builder = iperf_guest_cmd_builder \
                    .with_arg("--len", f"{payload_length}")
                iperf3_id_payload_len = payload_length
            else:
                iperf3_id_payload_len = "DEFAULT"

            iperf3_id = f"tcp-p{iperf3_id_payload_len}" \
                        f"-ws{iperf3_id_ws}-{basevm.vcpus_count}vcpu-{mode}"

            cons = consumer.LambdaConsumer(
                consume_stats=True,
                func=consume_iperf_tcp_output,
                func_kwargs={
                    "vcpus_count": basevm.vcpus_count
                }
            )

            eager_map(cons.set_measurement_def, measurements_tcp())
            eager_map(cons.set_stat_def, stats_tcp(host_cpu_model_name,
                                                   iperf3_id, env_id))

            prod_kwargs = {
                "guest_cmd_builder": iperf_guest_cmd_builder,
                "basevm": basevm,
                "current_avail_cpu": current_avail_cpu,
                "runtime": CONFIG["time"],
                "omit": protocol["omit"],
                "load_factor": CONFIG["load_factor"],
                "modes": CONFIG["modes"][mode]
            }
            prod = producer.LambdaProducer(produce_iperf_output,
                                           prod_kwargs)
            yield cons, prod, f"{env_id}/{iperf3_id}"
def consume_ping_output(cons, raw_data, requests):
    """Consume ping output.

    Output example:
    PING 8.8.8.8 (8.8.8.8) 56(84) bytes of data.
    64 bytes from 8.8.8.8: icmp_seq=1 ttl=118 time=17.7 ms
    64 bytes from 8.8.8.8: icmp_seq=2 ttl=118 time=17.7 ms
    64 bytes from 8.8.8.8: icmp_seq=3 ttl=118 time=17.4 ms
    64 bytes from 8.8.8.8: icmp_seq=4 ttl=118 time=17.8 ms

    --- 8.8.8.8 ping statistics ---
    4 packets transmitted, 4 received, 0% packet loss, time 3005ms
    rtt min/avg/max/mdev = 17.478/17.705/17.808/0.210 ms
    """
    eager_map(cons.set_measurement_def, measurements())

    st_keys = ["Min", "Avg", "Max", "Stddev"]

    output = raw_data.strip().split("\n")
    assert len(output) > 2

    # E.g: round-trip min/avg/max/stddev = 17.478/17.705/17.808/0.210 ms
    stat_values = output[-1]
    pattern_stats = "min/avg/max/[a-z]+dev = (.+)/(.+)/(.+)/(.+) ms"
    stat_values = re.findall(pattern_stats, stat_values)[0]
    assert len(stat_values) == 4

    for index, stat_value in enumerate(stat_values[:4]):
        cons.consume_stat(
            st_name=st_keys[index], ms_name=LATENCY, value=float(stat_value)
        )

    # E.g: 4 packets transmitted, 4 received, 0% packet loss
    packet_stats = output[-2]
    pattern_packet = ".+ packet.+transmitted, .+ received," " (.+)% packet loss"
    pkt_loss = re.findall(pattern_packet, packet_stats)[0]
    assert len(pkt_loss) == 1
    cons.consume_stat(st_name=PKT_LOSS_STAT_KEY, ms_name=PKT_LOSS, value=pkt_loss[0])

    # Compute percentiles.
    seqs = output[1 : requests + 1]
    times = []
    pattern_time = ".+ bytes from .+: icmp_seq=.+ ttl=.+ time=(.+) ms"
    for index, seq in enumerate(seqs):
        time = re.findall(pattern_time, seq)
        assert len(time) == 1
        times.append(time[0])

    times.sort()
    cons.consume_stat(
        st_name="Percentile50", ms_name=LATENCY, value=times[int(requests * 0.5)]
    )
    cons.consume_stat(
        st_name="Percentile90", ms_name=LATENCY, value=times[int(requests * 0.9)]
    )
    cons.consume_stat(
        st_name="Percentile99", ms_name=LATENCY, value=times[int(requests * 0.99)]
    )
示例#4
0
def _test_older_snapshot_resume_latency(context):
    builder = context.custom["builder"]
    logger = context.custom["logger"]
    snapshot_type = context.custom['snapshot_type']
    file_dumper = context.custom["results_file_dumper"]

    firecracker = context.firecracker
    jailer = firecracker.jailer()
    jailer.download()
    fc_version = firecracker.base_name()[1:]
    logger.info("Firecracker version: %s", fc_version)
    logger.info("Source Firecracker: %s", firecracker.local_path())
    logger.info("Source Jailer: %s", jailer.local_path())

    # Create a fresh microvm with the binary artifacts.
    vm_instance = builder.build_vm_micro(firecracker.local_path(),
                                         jailer.local_path())
    basevm = vm_instance.vm
    basevm.start()
    ssh_connection = net_tools.SSHConnection(basevm.ssh_config)

    # Check if guest works.
    exit_code, _, _ = ssh_connection.execute_command("ls")
    assert exit_code == 0

    # The snapshot builder expects disks as paths, not artifacts.
    disks = []
    for disk in vm_instance.disks:
        disks.append(disk.local_path())

    # Create a snapshot builder from a microvm.
    snapshot_builder = SnapshotBuilder(basevm)
    snapshot = snapshot_builder.create(disks, vm_instance.ssh_key,
                                       snapshot_type)

    basevm.kill()

    st_core = core.Core(name="older_snapshot_resume_latency",
                        iterations=SAMPLE_COUNT)

    prod = producer.LambdaProducer(func=snapshot_resume_producer,
                                   func_kwargs={
                                       "logger": logger,
                                       "vm_builder": builder,
                                       "snapshot": snapshot,
                                       "snapshot_type": snapshot_type,
                                       "use_ramdisk": False
                                   })

    cons = consumer.LambdaConsumer(func=lambda cons, result: cons.consume_stat(
        st_name="max", ms_name="latency", value=result),
                                   func_kwargs={})
    eager_map(cons.set_measurement_def,
              snapshot_resume_measurements(context.microvm.name()))

    st_core.add_pipe(producer=prod, consumer=cons, tag=context.microvm.name())

    # Gather results and verify pass criteria.
    try:
        result = st_core.run_exercise()
    except core.CoreException as err:
        handle_failure(file_dumper, err)

    dump_test_result(file_dumper, result)
示例#5
0
def _test_snapshot_resume_latency(context):
    logger = context.custom['logger']
    vm_builder = context.custom['builder']
    snapshot_type = context.custom['snapshot_type']
    file_dumper = context.custom['results_file_dumper']
    diff_snapshots = snapshot_type == SnapshotType.DIFF

    logger.info("""Measuring snapshot resume({}) latency for microvm: \"{}\",
    kernel {}, disk {} """.format(snapshot_type, context.microvm.name(),
                                  context.kernel.name(), context.disk.name()))

    # Create a rw copy artifact.
    rw_disk = context.disk.copy()
    # Get ssh key from read-only artifact.
    ssh_key = context.disk.ssh_key()
    # Create a fresh microvm from aftifacts.
    vm_instance = vm_builder.build(kernel=context.kernel,
                                   disks=[rw_disk],
                                   ssh_key=ssh_key,
                                   config=context.microvm,
                                   diff_snapshots=diff_snapshots,
                                   use_ramdisk=True)
    basevm = vm_instance.vm
    basevm.start()
    ssh_connection = net_tools.SSHConnection(basevm.ssh_config)

    # Check if guest works.
    exit_code, _, _ = ssh_connection.execute_command("ls")
    assert exit_code == 0

    logger.info("Create {}.".format(snapshot_type))
    # Create a snapshot builder from a microvm.
    snapshot_builder = SnapshotBuilder(basevm)

    snapshot = snapshot_builder.create([rw_disk.local_path()],
                                       ssh_key,
                                       snapshot_type,
                                       use_ramdisk=True)

    basevm.kill()

    st_core = core.Core(name="snapshot_resume_latency",
                        iterations=SAMPLE_COUNT)

    prod = producer.LambdaProducer(func=snapshot_resume_producer,
                                   func_kwargs={
                                       "logger": logger,
                                       "vm_builder": vm_builder,
                                       "snapshot": snapshot,
                                       "snapshot_type": snapshot_type,
                                       "use_ramdisk": True
                                   })

    cons = consumer.LambdaConsumer(func=lambda cons, result: cons.consume_stat(
        st_name="max", ms_name="latency", value=result),
                                   func_kwargs={})
    eager_map(cons.set_measurement_def,
              snapshot_resume_measurements(context.microvm.name()))

    st_core.add_pipe(producer=prod, consumer=cons, tag=context.microvm.name())

    # Gather results and verify pass criteria.
    try:
        result = st_core.run_exercise()
    except core.CoreException as err:
        handle_failure(file_dumper, err)

    dump_test_result(file_dumper, result)
示例#6
0
def _test_snapshot_create_latency(context):
    logger = context.custom['logger']
    vm_builder = context.custom['builder']
    snapshot_type = context.custom['snapshot_type']
    file_dumper = context.custom['results_file_dumper']
    diff_snapshots = snapshot_type == SnapshotType.DIFF

    # Create a rw copy artifact.
    rw_disk = context.disk.copy()
    # Get ssh key from read-only artifact.
    ssh_key = context.disk.ssh_key()

    logger.info("Fetching firecracker/jailer versions from {}.".format(
        DEFAULT_TEST_IMAGES_S3_BUCKET))
    artifacts = ArtifactCollection(_test_images_s3_bucket())
    firecracker_versions = artifacts.firecracker_versions(
        # v1.0.0 breaks snapshot compatibility with older versions.
        min_version="1.0.0",
        max_version=get_firecracker_version_from_toml())
    assert len(firecracker_versions) > 0

    # Test snapshot creation for every supported target version.
    for target_version in firecracker_versions:
        logger.info("""Measuring snapshot create({}) latency for target
        version: {} and microvm: \"{}\", kernel {}, disk {} """.format(
            snapshot_type, target_version, context.microvm.name(),
            context.kernel.name(), context.disk.name()))

        # Create a fresh microVM from artifacts.
        vm_instance = vm_builder.build(kernel=context.kernel,
                                       disks=[rw_disk],
                                       ssh_key=ssh_key,
                                       config=context.microvm,
                                       diff_snapshots=diff_snapshots,
                                       use_ramdisk=True)
        vm = vm_instance.vm
        # Configure metrics system.
        metrics_fifo_path = os.path.join(vm.path, 'metrics_fifo')
        metrics_fifo = log_tools.Fifo(metrics_fifo_path)

        response = vm.metrics.put(
            metrics_path=vm.create_jailed_resource(metrics_fifo.path))
        assert vm.api_session.is_status_no_content(response.status_code)

        vm.start()

        # Check if the needed CPU cores are available. We have the API
        # thread, VMM thread and then one thread for each configured vCPU.
        assert CpuMap.len() >= 2 + vm.vcpus_count

        # Pin uVM threads to physical cores.
        current_cpu_id = 0
        assert vm.pin_vmm(current_cpu_id), \
            "Failed to pin firecracker thread."
        current_cpu_id += 1
        assert vm.pin_api(current_cpu_id), \
            "Failed to pin fc_api thread."
        for idx_vcpu in range(vm.vcpus_count):
            current_cpu_id += 1
            assert vm.pin_vcpu(idx_vcpu, current_cpu_id + idx_vcpu), \
                f"Failed to pin fc_vcpu {idx_vcpu} thread."

        st_core = core.Core(
            name="snapshot_create_full_latency" if snapshot_type
            == SnapshotType.FULL else "snapshot_create_diff_latency",
            iterations=SAMPLE_COUNT)

        prod = producer.LambdaProducer(func=snapshot_create_producer,
                                       func_kwargs={
                                           "logger": logger,
                                           "vm": vm,
                                           "disks": [rw_disk],
                                           "ssh_key": ssh_key,
                                           "target_version": target_version,
                                           "metrics_fifo": metrics_fifo,
                                           "snapshot_type": snapshot_type
                                       })

        cons = consumer.LambdaConsumer(
            func=lambda cons, result: cons.consume_stat(
                st_name="max", ms_name="latency", value=result),
            func_kwargs={})
        eager_map(
            cons.set_measurement_def,
            snapshot_create_measurements(context.microvm.name(),
                                         snapshot_type))

        st_core.add_pipe(producer=prod,
                         consumer=cons,
                         tag=context.microvm.name())

    # Gather results and verify pass criteria.
    try:
        result = st_core.run_exercise()
    except core.CoreException as err:
        handle_failure(file_dumper, err)

    dump_test_result(file_dumper, result)
def fio_workload(context):
    """Execute block device emulation benchmarking scenarios."""
    vm_builder = context.custom['builder']
    logger = context.custom["logger"]

    # Create a rw copy artifact.
    rw_disk = context.disk.copy()
    # Get ssh key from read-only artifact.
    ssh_key = context.disk.ssh_key()
    # Create a fresh microvm from artifacts.
    basevm = vm_builder.build(kernel=context.kernel,
                              disks=[rw_disk],
                              ssh_key=ssh_key,
                              config=context.microvm)

    # Add a secondary block device for benchmark tests.
    fs = drive_tools.FilesystemFile(
        os.path.join(basevm.fsfiles, 'scratch'),
        CONFIG["block_device_size"]
    )
    basevm.add_drive('scratch', fs.path)
    basevm.start()

    # Get names of threads in Firecracker.
    current_cpu_id = 0
    basevm.pin_vmm(current_cpu_id)
    current_cpu_id += 1
    basevm.pin_api(current_cpu_id)
    for vcpu_id in range(basevm.vcpus_count):
        current_cpu_id += 1
        basevm.pin_vcpu(vcpu_id, current_cpu_id)

    st_core = core.Core(name=TEST_ID,
                        iterations=1,
                        custom={"microvm": context.microvm.name(),
                                "kernel": context.kernel.name(),
                                "disk": context.disk.name()})

    logger.info("Testing with microvm: \"{}\", kernel {}, disk {}"
                .format(context.microvm.name(),
                        context.kernel.name(),
                        context.disk.name()))

    ssh_connection = net_tools.SSHConnection(basevm.ssh_config)
    env_id = f"{context.kernel.name()}/{context.disk.name()}"
    for mode in CONFIG["fio_modes"]:
        ms_defs = measurements(mode)
        for bs in CONFIG["fio_blk_sizes"]:
            fio_id = f"{mode}-bs{bs}-{basevm.vcpus_count}vcpu"
            st_defs = statistics(mode, env_id, fio_id)
            st_prod = st.producer.LambdaProducer(
                func=run_fio,
                func_kwargs={
                    "env_id": env_id,
                    "basevm": basevm,
                    "ssh_conn": ssh_connection,
                    "mode": mode,
                    "bs": bs
                }
            )

            numjobs = CONFIG['load_factor'] * basevm.vcpus_count
            st_cons = st.consumer.LambdaConsumer(
                consume_stats=False,
                func=consume_fio_output,
                func_kwargs={"numjobs": numjobs,
                             "mode": mode,
                             "bs": bs,
                             "env_id": env_id})
            eager_map(st_cons.set_measurement_def, ms_defs)
            eager_map(st_cons.set_stat_def, st_defs)
            st_core.add_pipe(st_prod, st_cons, tag=f"{env_id}/{fio_id}")

    st_core.run_exercise()
    basevm.kill()
def create_pipes_generator(basevm,
                           mode,
                           current_avail_cpu,
                           protocol,
                           host_ip,
                           env_id):
    """Create producer/consumer pipes."""
    host_cpu_model_name = get_cpu_model_name()
    cpus_baselines = test_cfg.CONFIG["hosts"]["instances"]["m5d.metal"]["cpus"]
    stats = no_criteria_stats()
    baselines = list(filter(
        lambda baseline: baseline["model"] == host_cpu_model_name,
        cpus_baselines))

    for payload_length in protocol["payload_length"]:
        for ws in protocol["window_size"]:
            iperf_guest_cmd_builder = CmdBuilder(test_cfg.IPERF3) \
                .with_arg("--verbose") \
                .with_arg("--client", host_ip) \
                .with_arg("--time", test_cfg.CONFIG["time"]) \
                .with_arg("--json") \
                .with_arg("--omit", protocol["omit"])

            if ws:
                iperf_guest_cmd_builder = iperf_guest_cmd_builder \
                    .with_arg("--window", f"{ws}")
                iperf3_id_ws = ws
            else:
                iperf3_id_ws = "DEFAULT"

            if payload_length:
                iperf_guest_cmd_builder = iperf_guest_cmd_builder \
                    .with_arg("--len", f"{payload_length}")
                iperf3_id_payload_len = payload_length
            else:
                iperf3_id_payload_len = "DEFAULT"

            iperf3_id = f"tcp-p{iperf3_id_payload_len}" \
                        f"-ws{iperf3_id_ws}-{basevm.vcpus_count}vcpu-{mode}"

            cons = consumer.LambdaConsumer(
                consume_stats=False,
                func=consume_iperf_tcp_output,
                func_kwargs={
                    "vcpus_count": basevm.vcpus_count
                }
            )

            if len(baselines) > 0:
                stats = criteria_stats(baselines[0], iperf3_id, env_id)

            eager_map(cons.set_measurement_def, measurements())
            eager_map(cons.set_stat_def, stats)

            prod_kwargs = {
                "guest_cmd_builder": iperf_guest_cmd_builder,
                "basevm": basevm,
                "current_avail_cpu": current_avail_cpu,
                "runtime": test_cfg.CONFIG["time"],
                "omit": protocol["omit"],
                "load_factor": test_cfg.CONFIG["load_factor"],
                "modes": test_cfg.CONFIG["modes"][mode]
            }
            prod = producer.LambdaProducer(produce_iperf_output,
                                           prod_kwargs)
            yield cons, prod, f"{env_id}/{iperf3_id}"