Exemple #1
0
def iperf_workload(context):
    """Iperf between guest and host in both directions for TCP workload."""
    vm_builder = context.custom['builder']
    logger = context.custom["logger"]
    file_dumper = context.custom['results_file_dumper']

    # Create a rw copy artifact.
    rw_disk = context.disk.copy()
    # Get ssh key from read-only artifact.
    ssh_key = context.disk.ssh_key()
    # Create a fresh microvm from artifacts.
    vm_instance = vm_builder.build(kernel=context.kernel,
                                   disks=[rw_disk],
                                   ssh_key=ssh_key,
                                   config=context.microvm)
    basevm = vm_instance.vm
    basevm.start()
    custom = {
        "microvm": context.microvm.name(),
        "kernel": context.kernel.name(),
        "disk": context.disk.name(),
        "cpu_model_name": get_cpu_model_name()
    }
    st_core = core.Core(name=TEST_ID, iterations=1, custom=custom)

    # Check if the needed CPU cores are available. We have the API thread, VMM
    # thread and then one thread for each configured vCPU.
    assert CpuMap.len() >= 2 + basevm.vcpus_count

    # Pin uVM threads to physical cores.
    current_avail_cpu = 0
    assert basevm.pin_vmm(current_avail_cpu), \
        "Failed to pin firecracker thread."
    current_avail_cpu += 1
    assert basevm.pin_api(current_avail_cpu), \
        "Failed to pin fc_api thread."
    for i in range(basevm.vcpus_count):
        current_avail_cpu += 1
        assert basevm.pin_vcpu(i, current_avail_cpu), \
            f"Failed to pin fc_vcpu {i} thread."

    logger.info("Testing with microvm: \"{}\", kernel {}, disk {}".format(
        context.microvm.name(), context.kernel.name(), context.disk.name()))

    for cons, prod, tag in \
            pipes(basevm,
                  DEFAULT_HOST_IP,
                  current_avail_cpu + 1,
                  f"{context.kernel.name()}/{context.disk.name()}/"
                  f"{context.microvm.name()}"):
        st_core.add_pipe(prod, cons, tag)

    # Start running the commands on guest, gather results and verify pass
    # criteria.
    try:
        result = st_core.run_exercise()
    except core.CoreException as err:
        handle_failure(file_dumper, err)

    file_dumper.dump(result)
Exemple #2
0
def snapshot_workload(context):
    """Test all VM configurations for snapshot restore."""
    file_dumper = context.custom["results_file_dumper"]

    st_core = core.Core(name=TEST_ID,
                        iterations=1,
                        custom={"cpu_model_name": get_cpu_model_name()})

    snapshot_scaling_vcpus(context, st_core, vcpu_count=10)
    snapshot_scaling_mem(context, st_core, mem_exponent=9)
    snapshot_scaling_net(context, st_core)
    snapshot_scaling_block(context, st_core)
    snapshot_all_devices(context, st_core)

    # Gather results and verify pass criteria.
    try:
        result = st_core.run_exercise()
    except core.CoreException as err:
        handle_failure(file_dumper, err)

    dump_test_result(file_dumper, result)
def fio_workload(context):
    """Execute block device emulation benchmarking scenarios."""
    vm_builder = context.custom['builder']
    logger = context.custom["logger"]
    file_dumper = context.custom["results_file_dumper"]

    # Create a rw copy artifact.
    rw_disk = context.disk.copy()
    # Get ssh key from read-only artifact.
    ssh_key = context.disk.ssh_key()
    # Create a fresh microvm from artifacts.
    vm_instance = vm_builder.build(kernel=context.kernel,
                                   disks=[rw_disk],
                                   ssh_key=ssh_key,
                                   config=context.microvm)
    basevm = vm_instance.vm

    # Add a secondary block device for benchmark tests.
    fs = drive_tools.FilesystemFile(os.path.join(basevm.fsfiles, 'scratch'),
                                    CONFIG["block_device_size"])
    basevm.add_drive('scratch', fs.path)
    basevm.start()

    # Get names of threads in Firecracker.
    current_cpu_id = 0
    basevm.pin_vmm(current_cpu_id)
    current_cpu_id += 1
    basevm.pin_api(current_cpu_id)
    for vcpu_id in range(basevm.vcpus_count):
        current_cpu_id += 1
        basevm.pin_vcpu(vcpu_id, current_cpu_id)

    st_core = core.Core(name=TEST_ID,
                        iterations=1,
                        custom={
                            "microvm": context.microvm.name(),
                            "kernel": context.kernel.name(),
                            "disk": context.disk.name(),
                            "cpu_model_name": get_cpu_model_name()
                        })

    logger.info("Testing with microvm: \"{}\", kernel {}, disk {}".format(
        context.microvm.name(), context.kernel.name(), context.disk.name()))

    ssh_connection = net_tools.SSHConnection(basevm.ssh_config)
    env_id = f"{context.kernel.name()}/{context.disk.name()}/" \
             f"{context.microvm.name()}"

    for mode in CONFIG["fio_modes"]:
        for bs in CONFIG["fio_blk_sizes"]:
            fio_id = f"{mode}-bs{bs}"
            st_prod = st.producer.LambdaProducer(func=run_fio,
                                                 func_kwargs={
                                                     "env_id": env_id,
                                                     "basevm": basevm,
                                                     "ssh_conn":
                                                     ssh_connection,
                                                     "mode": mode,
                                                     "bs": bs
                                                 })
            st_cons = st.consumer.LambdaConsumer(
                metadata_provider=DictMetadataProvider(
                    CONFIG["measurements"],
                    BlockBaselinesProvider(env_id, fio_id)),
                func=consume_fio_output,
                func_kwargs={
                    "numjobs": basevm.vcpus_count,
                    "mode": mode,
                    "bs": bs,
                    "env_id": env_id,
                    "logs_path": basevm.jailer.chroot_base_with_id()
                })
            st_core.add_pipe(st_prod, st_cons, tag=f"{env_id}/{fio_id}")

    # Gather results and verify pass criteria.
    try:
        result = st_core.run_exercise()
    except core.CoreException as err:
        handle_failure(file_dumper, err)

    dump_test_result(file_dumper, result)
Exemple #4
0
def _test_older_snapshot_resume_latency(context):
    builder = context.custom["builder"]
    logger = context.custom["logger"]
    snapshot_type = context.custom['snapshot_type']
    file_dumper = context.custom["results_file_dumper"]

    firecracker = context.firecracker
    jailer = firecracker.jailer()
    jailer.download()
    fc_version = firecracker.base_name()[1:]
    logger.info("Firecracker version: %s", fc_version)
    logger.info("Source Firecracker: %s", firecracker.local_path())
    logger.info("Source Jailer: %s", jailer.local_path())

    # Create a fresh microvm with the binary artifacts.
    vm_instance = builder.build_vm_micro(firecracker.local_path(),
                                         jailer.local_path())
    basevm = vm_instance.vm
    basevm.start()
    ssh_connection = net_tools.SSHConnection(basevm.ssh_config)

    # Check if guest works.
    exit_code, _, _ = ssh_connection.execute_command("ls")
    assert exit_code == 0

    # The snapshot builder expects disks as paths, not artifacts.
    disks = []
    for disk in vm_instance.disks:
        disks.append(disk.local_path())

    # Create a snapshot builder from a microvm.
    snapshot_builder = SnapshotBuilder(basevm)
    snapshot = snapshot_builder.create(disks, vm_instance.ssh_key,
                                       snapshot_type)

    basevm.kill()

    st_core = core.Core(name="older_snapshot_resume_latency",
                        iterations=SAMPLE_COUNT)

    prod = producer.LambdaProducer(func=snapshot_resume_producer,
                                   func_kwargs={
                                       "logger": logger,
                                       "vm_builder": builder,
                                       "snapshot": snapshot,
                                       "snapshot_type": snapshot_type,
                                       "use_ramdisk": False
                                   })

    cons = consumer.LambdaConsumer(func=lambda cons, result: cons.consume_stat(
        st_name="max", ms_name="latency", value=result),
                                   func_kwargs={})
    eager_map(cons.set_measurement_def,
              snapshot_resume_measurements(context.microvm.name()))

    st_core.add_pipe(producer=prod, consumer=cons, tag=context.microvm.name())

    # Gather results and verify pass criteria.
    try:
        result = st_core.run_exercise()
    except core.CoreException as err:
        handle_failure(file_dumper, err)

    dump_test_result(file_dumper, result)
Exemple #5
0
def _test_snapshot_resume_latency(context):
    logger = context.custom['logger']
    vm_builder = context.custom['builder']
    snapshot_type = context.custom['snapshot_type']
    file_dumper = context.custom['results_file_dumper']
    diff_snapshots = snapshot_type == SnapshotType.DIFF

    logger.info("""Measuring snapshot resume({}) latency for microvm: \"{}\",
    kernel {}, disk {} """.format(snapshot_type, context.microvm.name(),
                                  context.kernel.name(), context.disk.name()))

    # Create a rw copy artifact.
    rw_disk = context.disk.copy()
    # Get ssh key from read-only artifact.
    ssh_key = context.disk.ssh_key()
    # Create a fresh microvm from aftifacts.
    vm_instance = vm_builder.build(kernel=context.kernel,
                                   disks=[rw_disk],
                                   ssh_key=ssh_key,
                                   config=context.microvm,
                                   diff_snapshots=diff_snapshots,
                                   use_ramdisk=True)
    basevm = vm_instance.vm
    basevm.start()
    ssh_connection = net_tools.SSHConnection(basevm.ssh_config)

    # Check if guest works.
    exit_code, _, _ = ssh_connection.execute_command("ls")
    assert exit_code == 0

    logger.info("Create {}.".format(snapshot_type))
    # Create a snapshot builder from a microvm.
    snapshot_builder = SnapshotBuilder(basevm)

    snapshot = snapshot_builder.create([rw_disk.local_path()],
                                       ssh_key,
                                       snapshot_type,
                                       use_ramdisk=True)

    basevm.kill()

    st_core = core.Core(name="snapshot_resume_latency",
                        iterations=SAMPLE_COUNT)

    prod = producer.LambdaProducer(func=snapshot_resume_producer,
                                   func_kwargs={
                                       "logger": logger,
                                       "vm_builder": vm_builder,
                                       "snapshot": snapshot,
                                       "snapshot_type": snapshot_type,
                                       "use_ramdisk": True
                                   })

    cons = consumer.LambdaConsumer(func=lambda cons, result: cons.consume_stat(
        st_name="max", ms_name="latency", value=result),
                                   func_kwargs={})
    eager_map(cons.set_measurement_def,
              snapshot_resume_measurements(context.microvm.name()))

    st_core.add_pipe(producer=prod, consumer=cons, tag=context.microvm.name())

    # Gather results and verify pass criteria.
    try:
        result = st_core.run_exercise()
    except core.CoreException as err:
        handle_failure(file_dumper, err)

    dump_test_result(file_dumper, result)
Exemple #6
0
def _test_snapshot_create_latency(context):
    logger = context.custom['logger']
    vm_builder = context.custom['builder']
    snapshot_type = context.custom['snapshot_type']
    file_dumper = context.custom['results_file_dumper']
    diff_snapshots = snapshot_type == SnapshotType.DIFF

    # Create a rw copy artifact.
    rw_disk = context.disk.copy()
    # Get ssh key from read-only artifact.
    ssh_key = context.disk.ssh_key()

    logger.info("Fetching firecracker/jailer versions from {}.".format(
        DEFAULT_TEST_IMAGES_S3_BUCKET))
    artifacts = ArtifactCollection(_test_images_s3_bucket())
    firecracker_versions = artifacts.firecracker_versions(
        # v1.0.0 breaks snapshot compatibility with older versions.
        min_version="1.0.0",
        max_version=get_firecracker_version_from_toml())
    assert len(firecracker_versions) > 0

    # Test snapshot creation for every supported target version.
    for target_version in firecracker_versions:
        logger.info("""Measuring snapshot create({}) latency for target
        version: {} and microvm: \"{}\", kernel {}, disk {} """.format(
            snapshot_type, target_version, context.microvm.name(),
            context.kernel.name(), context.disk.name()))

        # Create a fresh microVM from artifacts.
        vm_instance = vm_builder.build(kernel=context.kernel,
                                       disks=[rw_disk],
                                       ssh_key=ssh_key,
                                       config=context.microvm,
                                       diff_snapshots=diff_snapshots,
                                       use_ramdisk=True)
        vm = vm_instance.vm
        # Configure metrics system.
        metrics_fifo_path = os.path.join(vm.path, 'metrics_fifo')
        metrics_fifo = log_tools.Fifo(metrics_fifo_path)

        response = vm.metrics.put(
            metrics_path=vm.create_jailed_resource(metrics_fifo.path))
        assert vm.api_session.is_status_no_content(response.status_code)

        vm.start()

        # Check if the needed CPU cores are available. We have the API
        # thread, VMM thread and then one thread for each configured vCPU.
        assert CpuMap.len() >= 2 + vm.vcpus_count

        # Pin uVM threads to physical cores.
        current_cpu_id = 0
        assert vm.pin_vmm(current_cpu_id), \
            "Failed to pin firecracker thread."
        current_cpu_id += 1
        assert vm.pin_api(current_cpu_id), \
            "Failed to pin fc_api thread."
        for idx_vcpu in range(vm.vcpus_count):
            current_cpu_id += 1
            assert vm.pin_vcpu(idx_vcpu, current_cpu_id + idx_vcpu), \
                f"Failed to pin fc_vcpu {idx_vcpu} thread."

        st_core = core.Core(
            name="snapshot_create_full_latency" if snapshot_type
            == SnapshotType.FULL else "snapshot_create_diff_latency",
            iterations=SAMPLE_COUNT)

        prod = producer.LambdaProducer(func=snapshot_create_producer,
                                       func_kwargs={
                                           "logger": logger,
                                           "vm": vm,
                                           "disks": [rw_disk],
                                           "ssh_key": ssh_key,
                                           "target_version": target_version,
                                           "metrics_fifo": metrics_fifo,
                                           "snapshot_type": snapshot_type
                                       })

        cons = consumer.LambdaConsumer(
            func=lambda cons, result: cons.consume_stat(
                st_name="max", ms_name="latency", value=result),
            func_kwargs={})
        eager_map(
            cons.set_measurement_def,
            snapshot_create_measurements(context.microvm.name(),
                                         snapshot_type))

        st_core.add_pipe(producer=prod,
                         consumer=cons,
                         tag=context.microvm.name())

    # Gather results and verify pass criteria.
    try:
        result = st_core.run_exercise()
    except core.CoreException as err:
        handle_failure(file_dumper, err)

    dump_test_result(file_dumper, result)
Exemple #7
0
def iperf_workload(context):
    """Run a statistic exercise."""
    vm_builder = context.custom['builder']
    logger = context.custom["logger"]
    file_dumper = context.custom['results_file_dumper']

    # Create a rw copy artifact.
    rw_disk = context.disk.copy()
    # Get ssh key from read-only artifact.
    ssh_key = context.disk.ssh_key()
    # Create a fresh microvm from artifacts.
    basevm = vm_builder.build(kernel=context.kernel,
                              disks=[rw_disk],
                              ssh_key=ssh_key,
                              config=context.microvm)

    # Create a vsock device
    basevm.vsock.put(vsock_id="vsock0",
                     guest_cid=3,
                     uds_path="/" + VSOCK_UDS_PATH)

    basevm.start()

    st_core = core.Core(name="vsock_throughput",
                        iterations=1,
                        custom={'cpu_model_name': get_cpu_model_name()})

    # Check if the needed CPU cores are available. We have the API thread, VMM
    # thread and then one thread for each configured vCPU.
    assert CpuMap.len() >= 2 + basevm.vcpus_count

    # Pin uVM threads to physical cores.
    current_avail_cpu = 0
    assert basevm.pin_vmm(current_avail_cpu), \
        "Failed to pin firecracker thread."
    current_avail_cpu += 1
    assert basevm.pin_api(current_avail_cpu), \
        "Failed to pin fc_api thread."
    for i in range(basevm.vcpus_count):
        current_avail_cpu += 1
        assert basevm.pin_vcpu(i, current_avail_cpu), \
            f"Failed to pin fc_vcpu {i} thread."

    logger.info("Testing with microvm: \"{}\", kernel {}, disk {}".format(
        context.microvm.name(), context.kernel.name(), context.disk.name()))

    for cons, prod, tag in \
            pipes(basevm,
                  current_avail_cpu + 1,
                  f"{context.kernel.name()}/{context.disk.name()}/"
                  f"{context.microvm.name()}"):
        st_core.add_pipe(prod, cons, tag)

    # Start running the commands on guest, gather results and verify pass
    # criteria.
    try:
        result = st_core.run_exercise()
    except core.CoreException as err:
        handle_failure(file_dumper, err)

    dump_test_result(file_dumper, result)
Exemple #8
0
def _g2h_send_ping(context):
    """Send ping from guest to host."""
    logger = context.custom['logger']
    vm_builder = context.custom['builder']
    interval_between_req = context.custom['interval']
    name = context.custom['name']
    file_dumper = context.custom['results_file_dumper']

    logger.info("Testing {} with microvm: \"{}\", kernel {}, disk {} ".format(
        name, context.microvm.name(), context.kernel.name(),
        context.disk.name()))

    # Create a rw copy artifact.
    rw_disk = context.disk.copy()
    # Get ssh key from read-only artifact.
    ssh_key = context.disk.ssh_key()
    # Create a fresh microvm from aftifacts.
    vm_instance = vm_builder.build(kernel=context.kernel,
                                   disks=[rw_disk],
                                   ssh_key=ssh_key,
                                   config=context.microvm)
    basevm = vm_instance.vm
    basevm.start()

    # Check if the needed CPU cores are available. We have the API thread, VMM
    # thread and then one thread for each configured vCPU.
    assert CpuMap.len() >= 2 + basevm.vcpus_count

    # Pin uVM threads to physical cores.
    current_cpu_id = 0
    assert basevm.pin_vmm(current_cpu_id), \
        "Failed to pin firecracker thread."
    current_cpu_id += 1
    assert basevm.pin_api(current_cpu_id), \
        "Failed to pin fc_api thread."
    for i in range(basevm.vcpus_count):
        current_cpu_id += 1
        assert basevm.pin_vcpu(i, current_cpu_id + i), \
            f"Failed to pin fc_vcpu {i} thread."

    custom = {
        "microvm": context.microvm.name(),
        "kernel": context.kernel.name(),
        "disk": context.disk.name(),
        "cpu_model_name": get_cpu_model_name()
    }

    st_core = core.Core(name="network_latency", iterations=1, custom=custom)
    cons = consumer.LambdaConsumer(
        func=consume_ping_output,
        func_kwargs={"requests": context.custom['requests']})
    cmd = PING.format(context.custom['requests'], interval_between_req,
                      DEFAULT_HOST_IP)
    prod = producer.SSHCommand(cmd, net_tools.SSHConnection(basevm.ssh_config))
    st_core.add_pipe(producer=prod, consumer=cons, tag="ping")

    # Gather results and verify pass criteria.
    try:
        result = st_core.run_exercise()
    except core.CoreException as err:
        handle_failure(file_dumper, err)

    dump_test_result(file_dumper, result)