def iperf_workload(context):
    """Iperf between guest and host in both directions for TCP workload."""
    vm_builder = context.custom['builder']
    logger = context.custom["logger"]
    file_dumper = context.custom['results_file_dumper']

    # Create a rw copy artifact.
    rw_disk = context.disk.copy()
    # Get ssh key from read-only artifact.
    ssh_key = context.disk.ssh_key()
    # Create a fresh microvm from artifacts.
    basevm = vm_builder.build(kernel=context.kernel,
                              disks=[rw_disk],
                              ssh_key=ssh_key,
                              config=context.microvm)

    basevm.start()
    custom = {
        "microvm": context.microvm.name(),
        "kernel": context.kernel.name(),
        "disk": context.disk.name(),
        "cpu_model_name": get_cpu_model_name()
    }
    st_core = core.Core(name="network_tcp_throughput",
                        iterations=1,
                        custom=custom)

    # Check if the needed CPU cores are available. We have the API thread, VMM
    # thread and then one thread for each configured vCPU.
    assert CpuMap.len() >= 2 + basevm.vcpus_count

    # Pin uVM threads to physical cores.
    current_avail_cpu = 0
    assert basevm.pin_vmm(current_avail_cpu), \
        "Failed to pin firecracker thread."
    current_avail_cpu += 1
    assert basevm.pin_api(current_avail_cpu), \
        "Failed to pin fc_api thread."
    for i in range(basevm.vcpus_count):
        current_avail_cpu += 1
        assert basevm.pin_vcpu(i, current_avail_cpu), \
            f"Failed to pin fc_vcpu {i} thread."

    logger.info("Testing with microvm: \"{}\", kernel {}, disk {}"
                .format(context.microvm.name(),
                        context.kernel.name(),
                        context.disk.name()))

    for cons, prod, tag in \
            pipes(basevm,
                  DEFAULT_HOST_IP,
                  current_avail_cpu + 1,
                  f"{context.kernel.name()}/{context.disk.name()}"):
        st_core.add_pipe(prod, cons, tag)

    # Start running the commands on guest, gather results and verify pass
    # criteria.
    results = st_core.run_exercise(check_criteria=file_dumper is None)
    if file_dumper:
        file_dumper.writeln(json.dumps(results))
Пример #2
0
def _g2h_send_ping(context):
    """Send ping from guest to host."""
    logger = context.custom['logger']
    vm_builder = context.custom['builder']
    interval_between_req = context.custom['interval']
    name = context.custom['name']
    file_dumper = context.custom['results_file_dumper']

    logger.info("Testing {} with microvm: \"{}\", kernel {}, disk {} ".format(
        name, context.microvm.name(), context.kernel.name(),
        context.disk.name()))

    # Create a rw copy artifact.
    rw_disk = context.disk.copy()
    # Get ssh key from read-only artifact.
    ssh_key = context.disk.ssh_key()
    # Create a fresh microvm from aftifacts.
    basevm = vm_builder.build(kernel=context.kernel,
                              disks=[rw_disk],
                              ssh_key=ssh_key,
                              config=context.microvm)

    basevm.start()

    # Check if the needed CPU cores are available. We have the API thread, VMM
    # thread and then one thread for each configured vCPU.
    assert CpuMap.len() >= 2 + basevm.vcpus_count

    # Pin uVM threads to physical cores.
    current_cpu_id = 0
    assert basevm.pin_vmm(current_cpu_id), \
        "Failed to pin firecracker thread."
    current_cpu_id += 1
    assert basevm.pin_api(current_cpu_id), \
        "Failed to pin fc_api thread."
    for i in range(basevm.vcpus_count):
        current_cpu_id += 1
        assert basevm.pin_vcpu(i, current_cpu_id + i), \
            f"Failed to pin fc_vcpu {i} thread."

    custom = {
        "microvm": context.microvm.name(),
        "kernel": context.kernel.name(),
        "disk": context.disk.name(),
        "cpu_model_name": get_cpu_model_name()
    }

    st_core = core.Core(name="network_latency", iterations=1, custom=custom)
    cons = consumer.LambdaConsumer(
        func=consume_ping_output,
        func_kwargs={"requests": context.custom['requests']})
    cmd = PING.format(context.custom['requests'], interval_between_req,
                      DEFAULT_HOST_IP)
    prod = producer.SSHCommand(cmd, net_tools.SSHConnection(basevm.ssh_config))
    st_core.add_pipe(producer=prod, consumer=cons, tag="ping")

    # Gather results and verify pass criteria.
    result = st_core.run_exercise(file_dumper is None)
    if file_dumper:
        file_dumper.writeln(json.dumps(result))
Пример #3
0
def iperf_workload(context):
    """Run a statistic exercise."""
    vm_builder = context.custom['builder']
    logger = context.custom["logger"]
    file_dumper = context.custom['results_file_dumper']

    # Create a rw copy artifact.
    rw_disk = context.disk.copy()
    # Get ssh key from read-only artifact.
    ssh_key = context.disk.ssh_key()
    # Create a fresh microvm from artifacts.
    basevm = vm_builder.build(kernel=context.kernel,
                              disks=[rw_disk],
                              ssh_key=ssh_key,
                              config=context.microvm)

    # Create a vsock device
    basevm.vsock.put(vsock_id="vsock0",
                     guest_cid=3,
                     uds_path="/" + VSOCK_UDS_PATH)

    basevm.start()

    st_core = core.Core(name="vsock_throughput",
                        iterations=1,
                        custom={'cpu_model_name': get_cpu_model_name()})

    # Check if the needed CPU cores are available. We have the API thread, VMM
    # thread and then one thread for each configured vCPU.
    assert CpuMap.len() >= 2 + basevm.vcpus_count

    # Pin uVM threads to physical cores.
    current_avail_cpu = 0
    assert basevm.pin_vmm(current_avail_cpu), \
        "Failed to pin firecracker thread."
    current_avail_cpu += 1
    assert basevm.pin_api(current_avail_cpu), \
        "Failed to pin fc_api thread."
    for i in range(basevm.vcpus_count):
        current_avail_cpu += 1
        assert basevm.pin_vcpu(i, current_avail_cpu), \
            f"Failed to pin fc_vcpu {i} thread."

    logger.info("Testing with microvm: \"{}\", kernel {}, disk {}".format(
        context.microvm.name(), context.kernel.name(), context.disk.name()))

    for cons, prod, tag in \
            pipes(basevm,
                  current_avail_cpu + 1,
                  f"{context.kernel.name()}/{context.disk.name()}"):
        st_core.add_pipe(prod, cons, tag)

    # Start running the commands on guest, gather results and verify pass
    # criteria.
    results = st_core.run_exercise(file_dumper is None)
    if file_dumper:
        file_dumper.writeln(json.dumps(results))

    basevm.kill()
Пример #4
0
def _g2h_send_ping(context):
    """Send ping from guest to host."""
    logger = context.custom['logger']
    vm_builder = context.custom['builder']
    network_config = context.custom['network_config']
    interval_between_req = context.custom['interval']
    name = context.custom['name']

    logger.info("Testing {} with microvm: \"{}\", kernel {}, disk {} ".format(
        name, context.microvm.name(), context.kernel.name(),
        context.disk.name()))

    # Create a rw copy artifact.
    rw_disk = context.disk.copy()
    # Get ssh key from read-only artifact.
    ssh_key = context.disk.ssh_key()
    # Create a fresh microvm from aftifacts.
    basevm = vm_builder.build(kernel=context.kernel,
                              disks=[rw_disk],
                              ssh_key=ssh_key,
                              config=context.microvm,
                              network_config=network_config)

    _tap, host_ip, _ = basevm.ssh_network_config(network_config, '1')

    basevm.start()
    custom = {
        "microvm": context.microvm.name(),
        "kernel": context.kernel.name(),
        "disk": context.disk.name()
    }
    st_core = core.Core(name="network_latency", iterations=1, custom=custom)
    cons = consumer.LambdaConsumer(
        consume_stats=True,
        func=consume_ping_output,
        func_kwargs={"requests": context.custom['requests']})
    prod = producer.SSHCommand(
        PING.format(context.custom['requests'], interval_between_req, host_ip),
        net_tools.SSHConnection(basevm.ssh_config))
    st_core.add_pipe(producer=prod, consumer=cons, tag="ping")

    # Gather results and verify pass criteria.
    st_core.run_exercise()
def fio_workload(context):
    """Execute block device emulation benchmarking scenarios."""
    vm_builder = context.custom['builder']
    logger = context.custom["logger"]

    # Create a rw copy artifact.
    rw_disk = context.disk.copy()
    # Get ssh key from read-only artifact.
    ssh_key = context.disk.ssh_key()
    # Create a fresh microvm from artifacts.
    basevm = vm_builder.build(kernel=context.kernel,
                              disks=[rw_disk],
                              ssh_key=ssh_key,
                              config=context.microvm)

    # Add a secondary block device for benchmark tests.
    fs = drive_tools.FilesystemFile(
        os.path.join(basevm.fsfiles, 'scratch'),
        CONFIG["block_device_size"]
    )
    basevm.add_drive('scratch', fs.path)
    basevm.start()

    # Get names of threads in Firecracker.
    current_cpu_id = 0
    basevm.pin_vmm(current_cpu_id)
    current_cpu_id += 1
    basevm.pin_api(current_cpu_id)
    for vcpu_id in range(basevm.vcpus_count):
        current_cpu_id += 1
        basevm.pin_vcpu(vcpu_id, current_cpu_id)

    st_core = core.Core(name=TEST_ID,
                        iterations=1,
                        custom={"microvm": context.microvm.name(),
                                "kernel": context.kernel.name(),
                                "disk": context.disk.name()})

    logger.info("Testing with microvm: \"{}\", kernel {}, disk {}"
                .format(context.microvm.name(),
                        context.kernel.name(),
                        context.disk.name()))

    ssh_connection = net_tools.SSHConnection(basevm.ssh_config)
    env_id = f"{context.kernel.name()}/{context.disk.name()}"
    for mode in CONFIG["fio_modes"]:
        ms_defs = measurements(mode)
        for bs in CONFIG["fio_blk_sizes"]:
            fio_id = f"{mode}-bs{bs}-{basevm.vcpus_count}vcpu"
            st_defs = statistics(mode, env_id, fio_id)
            st_prod = st.producer.LambdaProducer(
                func=run_fio,
                func_kwargs={
                    "env_id": env_id,
                    "basevm": basevm,
                    "ssh_conn": ssh_connection,
                    "mode": mode,
                    "bs": bs
                }
            )

            numjobs = CONFIG['load_factor'] * basevm.vcpus_count
            st_cons = st.consumer.LambdaConsumer(
                consume_stats=False,
                func=consume_fio_output,
                func_kwargs={"numjobs": numjobs,
                             "mode": mode,
                             "bs": bs,
                             "env_id": env_id})
            eager_map(st_cons.set_measurement_def, ms_defs)
            eager_map(st_cons.set_stat_def, st_defs)
            st_core.add_pipe(st_prod, st_cons, tag=f"{env_id}/{fio_id}")

    st_core.run_exercise()
    basevm.kill()
Пример #6
0
def fio_workload(context):
    """Execute block device emulation benchmarking scenarios."""
    vm_builder = context.custom['builder']
    logger = context.custom["logger"]
    file_dumper = context.custom["results_file_dumper"]

    # Create a rw copy artifact.
    rw_disk = context.disk.copy()
    # Get ssh key from read-only artifact.
    ssh_key = context.disk.ssh_key()
    # Create a fresh microvm from artifacts.
    basevm = vm_builder.build(kernel=context.kernel,
                              disks=[rw_disk],
                              ssh_key=ssh_key,
                              config=context.microvm)

    # Add a secondary block device for benchmark tests.
    fs = drive_tools.FilesystemFile(
        os.path.join(basevm.fsfiles, 'scratch'),
        CONFIG["block_device_size"]
    )
    basevm.add_drive('scratch', fs.path)
    basevm.start()

    # Get names of threads in Firecracker.
    current_cpu_id = 0
    basevm.pin_vmm(current_cpu_id)
    current_cpu_id += 1
    basevm.pin_api(current_cpu_id)
    for vcpu_id in range(basevm.vcpus_count):
        current_cpu_id += 1
        basevm.pin_vcpu(vcpu_id, current_cpu_id)

    st_core = core.Core(name=TEST_ID,
                        iterations=1,
                        custom={"microvm": context.microvm.name(),
                                "kernel": context.kernel.name(),
                                "disk": context.disk.name(),
                                "cpu_model_name": get_cpu_model_name()})

    logger.info("Testing with microvm: \"{}\", kernel {}, disk {}"
                .format(context.microvm.name(),
                        context.kernel.name(),
                        context.disk.name()))

    ssh_connection = net_tools.SSHConnection(basevm.ssh_config)
    env_id = f"{context.kernel.name()}/{context.disk.name()}"

    for mode in CONFIG["fio_modes"]:
        for bs in CONFIG["fio_blk_sizes"]:
            fio_id = f"{mode}-bs{bs}-{basevm.vcpus_count}vcpu"
            st_prod = st.producer.LambdaProducer(
                func=run_fio,
                func_kwargs={"env_id": env_id, "basevm": basevm,
                             "ssh_conn": ssh_connection, "mode": mode,
                             "bs": bs})
            st_cons = st.consumer.LambdaConsumer(
                metadata_provider=DictMetadataProvider(
                    CONFIG["measurements"],
                    BlockBaselinesProvider(env_id,
                                           fio_id)),
                func=consume_fio_output,
                func_kwargs={"numjobs": basevm.vcpus_count, "mode": mode,
                             "bs": bs, "env_id": env_id,
                             "logs_path": basevm.jailer.chroot_base_with_id()})
            st_core.add_pipe(st_prod, st_cons, tag=f"{env_id}/{fio_id}")

    result = st_core.run_exercise(file_dumper is None)
    if file_dumper:
        file_dumper.writeln(json.dumps(result))
    basevm.kill()