def create_pipes_generator(basevm, mode, current_avail_cpu, protocol, host_ip, env_id): """Create producer/consumer pipes.""" for payload_length in protocol["payload_length"]: for ws in protocol["window_size"]: iperf_guest_cmd_builder = ( CmdBuilder(IPERF3) .with_arg("--verbose") .with_arg("--client", host_ip) .with_arg("--time", CONFIG_DICT["time"]) .with_arg("--json") .with_arg("--omit", protocol["omit"]) ) if ws != "DEFAULT": iperf_guest_cmd_builder = iperf_guest_cmd_builder.with_arg( "--window", f"{ws}" ) if payload_length != "DEFAULT": iperf_guest_cmd_builder = iperf_guest_cmd_builder.with_arg( "--len", f"{payload_length}" ) iperf3_id = f"tcp-p{payload_length}-ws{ws}-{mode}" cons = consumer.LambdaConsumer( metadata_provider=DictMetadataProvider( measurements=CONFIG_DICT["measurements"], baseline_provider=NetTCPThroughputBaselineProvider( env_id, iperf3_id ), ), func=consume_iperf_tcp_output, func_kwargs={"vcpus_count": basevm.vcpus_count}, ) prod_kwargs = { "guest_cmd_builder": iperf_guest_cmd_builder, "basevm": basevm, "current_avail_cpu": current_avail_cpu, "runtime": CONFIG_DICT["time"], "omit": protocol["omit"], "load_factor": CONFIG_DICT["load_factor"], "modes": CONFIG_DICT["modes"][mode], } prod = producer.LambdaProducer(produce_iperf_output, prod_kwargs) yield cons, prod, f"{env_id}/{iperf3_id}"
def pipes(basevm, current_avail_cpu, env_id): """Producer/Consumer pipes generator.""" for mode in CONFIG_DICT["modes"]: # We run bi-directional tests only on uVM with more than 2 vCPus # because we need to pin one iperf3/direction per vCPU, and since we # have two directions, we need at least two vCPUs. if mode == "bd" and basevm.vcpus_count < 2: continue for protocol in CONFIG_DICT["protocols"]: for payload_length in protocol["payload_length"]: iperf_guest_cmd_builder = ( CmdBuilder(IPERF3).with_arg("--vsock").with_arg( "-c", 2).with_arg("--json").with_arg( "--omit", protocol["omit"]).with_arg("--time", CONFIG_DICT["time"])) if payload_length != "DEFAULT": iperf_guest_cmd_builder = iperf_guest_cmd_builder.with_arg( "--len", f"{payload_length}") iperf3_id = f"vsock-p{payload_length}-{mode}" cons = consumer.LambdaConsumer( metadata_provider=DictMetadataProvider( CONFIG_DICT["measurements"], VsockThroughputBaselineProvider(env_id, iperf3_id), ), func=consume_iperf_output, ) prod_kwargs = { "guest_cmd_builder": iperf_guest_cmd_builder, "basevm": basevm, "current_avail_cpu": current_avail_cpu, "runtime": CONFIG_DICT["time"], "omit": protocol["omit"], "load_factor": CONFIG_DICT["load_factor"], "modes": CONFIG_DICT["modes"][mode], } prod = producer.LambdaProducer(produce_iperf_output, prod_kwargs) yield cons, prod, f"{env_id}/{iperf3_id}"
def _test_older_snapshot_resume_latency(context): builder = context.custom["builder"] logger = context.custom["logger"] snapshot_type = context.custom['snapshot_type'] file_dumper = context.custom["results_file_dumper"] firecracker = context.firecracker jailer = firecracker.jailer() jailer.download() fc_version = firecracker.base_name()[1:] logger.info("Firecracker version: %s", fc_version) logger.info("Source Firecracker: %s", firecracker.local_path()) logger.info("Source Jailer: %s", jailer.local_path()) # Create a fresh microvm with the binary artifacts. vm_instance = builder.build_vm_micro(firecracker.local_path(), jailer.local_path()) basevm = vm_instance.vm basevm.start() ssh_connection = net_tools.SSHConnection(basevm.ssh_config) # Check if guest works. exit_code, _, _ = ssh_connection.execute_command("ls") assert exit_code == 0 # The snapshot builder expects disks as paths, not artifacts. disks = [] for disk in vm_instance.disks: disks.append(disk.local_path()) # Create a snapshot builder from a microvm. snapshot_builder = SnapshotBuilder(basevm) snapshot = snapshot_builder.create(disks, vm_instance.ssh_key, snapshot_type) basevm.kill() st_core = core.Core(name="older_snapshot_resume_latency", iterations=SAMPLE_COUNT) prod = producer.LambdaProducer(func=snapshot_resume_producer, func_kwargs={ "logger": logger, "vm_builder": builder, "snapshot": snapshot, "snapshot_type": snapshot_type, "use_ramdisk": False }) cons = consumer.LambdaConsumer(func=lambda cons, result: cons.consume_stat( st_name="max", ms_name="latency", value=result), func_kwargs={}) eager_map(cons.set_measurement_def, snapshot_resume_measurements(context.microvm.name())) st_core.add_pipe(producer=prod, consumer=cons, tag=context.microvm.name()) # Gather results and verify pass criteria. try: result = st_core.run_exercise() except core.CoreException as err: handle_failure(file_dumper, err) dump_test_result(file_dumper, result)
def _test_snapshot_resume_latency(context): logger = context.custom['logger'] vm_builder = context.custom['builder'] snapshot_type = context.custom['snapshot_type'] file_dumper = context.custom['results_file_dumper'] diff_snapshots = snapshot_type == SnapshotType.DIFF logger.info("""Measuring snapshot resume({}) latency for microvm: \"{}\", kernel {}, disk {} """.format(snapshot_type, context.microvm.name(), context.kernel.name(), context.disk.name())) # Create a rw copy artifact. rw_disk = context.disk.copy() # Get ssh key from read-only artifact. ssh_key = context.disk.ssh_key() # Create a fresh microvm from aftifacts. vm_instance = vm_builder.build(kernel=context.kernel, disks=[rw_disk], ssh_key=ssh_key, config=context.microvm, diff_snapshots=diff_snapshots, use_ramdisk=True) basevm = vm_instance.vm basevm.start() ssh_connection = net_tools.SSHConnection(basevm.ssh_config) # Check if guest works. exit_code, _, _ = ssh_connection.execute_command("ls") assert exit_code == 0 logger.info("Create {}.".format(snapshot_type)) # Create a snapshot builder from a microvm. snapshot_builder = SnapshotBuilder(basevm) snapshot = snapshot_builder.create([rw_disk.local_path()], ssh_key, snapshot_type, use_ramdisk=True) basevm.kill() st_core = core.Core(name="snapshot_resume_latency", iterations=SAMPLE_COUNT) prod = producer.LambdaProducer(func=snapshot_resume_producer, func_kwargs={ "logger": logger, "vm_builder": vm_builder, "snapshot": snapshot, "snapshot_type": snapshot_type, "use_ramdisk": True }) cons = consumer.LambdaConsumer(func=lambda cons, result: cons.consume_stat( st_name="max", ms_name="latency", value=result), func_kwargs={}) eager_map(cons.set_measurement_def, snapshot_resume_measurements(context.microvm.name())) st_core.add_pipe(producer=prod, consumer=cons, tag=context.microvm.name()) # Gather results and verify pass criteria. try: result = st_core.run_exercise() except core.CoreException as err: handle_failure(file_dumper, err) dump_test_result(file_dumper, result)
def _test_snapshot_create_latency(context): logger = context.custom['logger'] vm_builder = context.custom['builder'] snapshot_type = context.custom['snapshot_type'] file_dumper = context.custom['results_file_dumper'] diff_snapshots = snapshot_type == SnapshotType.DIFF # Create a rw copy artifact. rw_disk = context.disk.copy() # Get ssh key from read-only artifact. ssh_key = context.disk.ssh_key() logger.info("Fetching firecracker/jailer versions from {}.".format( DEFAULT_TEST_IMAGES_S3_BUCKET)) artifacts = ArtifactCollection(_test_images_s3_bucket()) firecracker_versions = artifacts.firecracker_versions( # v1.0.0 breaks snapshot compatibility with older versions. min_version="1.0.0", max_version=get_firecracker_version_from_toml()) assert len(firecracker_versions) > 0 # Test snapshot creation for every supported target version. for target_version in firecracker_versions: logger.info("""Measuring snapshot create({}) latency for target version: {} and microvm: \"{}\", kernel {}, disk {} """.format( snapshot_type, target_version, context.microvm.name(), context.kernel.name(), context.disk.name())) # Create a fresh microVM from artifacts. vm_instance = vm_builder.build(kernel=context.kernel, disks=[rw_disk], ssh_key=ssh_key, config=context.microvm, diff_snapshots=diff_snapshots, use_ramdisk=True) vm = vm_instance.vm # Configure metrics system. metrics_fifo_path = os.path.join(vm.path, 'metrics_fifo') metrics_fifo = log_tools.Fifo(metrics_fifo_path) response = vm.metrics.put( metrics_path=vm.create_jailed_resource(metrics_fifo.path)) assert vm.api_session.is_status_no_content(response.status_code) vm.start() # Check if the needed CPU cores are available. We have the API # thread, VMM thread and then one thread for each configured vCPU. assert CpuMap.len() >= 2 + vm.vcpus_count # Pin uVM threads to physical cores. current_cpu_id = 0 assert vm.pin_vmm(current_cpu_id), \ "Failed to pin firecracker thread." current_cpu_id += 1 assert vm.pin_api(current_cpu_id), \ "Failed to pin fc_api thread." for idx_vcpu in range(vm.vcpus_count): current_cpu_id += 1 assert vm.pin_vcpu(idx_vcpu, current_cpu_id + idx_vcpu), \ f"Failed to pin fc_vcpu {idx_vcpu} thread." st_core = core.Core( name="snapshot_create_full_latency" if snapshot_type == SnapshotType.FULL else "snapshot_create_diff_latency", iterations=SAMPLE_COUNT) prod = producer.LambdaProducer(func=snapshot_create_producer, func_kwargs={ "logger": logger, "vm": vm, "disks": [rw_disk], "ssh_key": ssh_key, "target_version": target_version, "metrics_fifo": metrics_fifo, "snapshot_type": snapshot_type }) cons = consumer.LambdaConsumer( func=lambda cons, result: cons.consume_stat( st_name="max", ms_name="latency", value=result), func_kwargs={}) eager_map( cons.set_measurement_def, snapshot_create_measurements(context.microvm.name(), snapshot_type)) st_core.add_pipe(producer=prod, consumer=cons, tag=context.microvm.name()) # Gather results and verify pass criteria. try: result = st_core.run_exercise() except core.CoreException as err: handle_failure(file_dumper, err) dump_test_result(file_dumper, result)
def _g2h_send_ping(context): """Send ping from guest to host.""" logger = context.custom['logger'] vm_builder = context.custom['builder'] interval_between_req = context.custom['interval'] name = context.custom['name'] file_dumper = context.custom['results_file_dumper'] logger.info("Testing {} with microvm: \"{}\", kernel {}, disk {} ".format( name, context.microvm.name(), context.kernel.name(), context.disk.name())) # Create a rw copy artifact. rw_disk = context.disk.copy() # Get ssh key from read-only artifact. ssh_key = context.disk.ssh_key() # Create a fresh microvm from aftifacts. vm_instance = vm_builder.build(kernel=context.kernel, disks=[rw_disk], ssh_key=ssh_key, config=context.microvm) basevm = vm_instance.vm basevm.start() # Check if the needed CPU cores are available. We have the API thread, VMM # thread and then one thread for each configured vCPU. assert CpuMap.len() >= 2 + basevm.vcpus_count # Pin uVM threads to physical cores. current_cpu_id = 0 assert basevm.pin_vmm(current_cpu_id), \ "Failed to pin firecracker thread." current_cpu_id += 1 assert basevm.pin_api(current_cpu_id), \ "Failed to pin fc_api thread." for i in range(basevm.vcpus_count): current_cpu_id += 1 assert basevm.pin_vcpu(i, current_cpu_id + i), \ f"Failed to pin fc_vcpu {i} thread." custom = { "microvm": context.microvm.name(), "kernel": context.kernel.name(), "disk": context.disk.name(), "cpu_model_name": get_cpu_model_name() } st_core = core.Core(name="network_latency", iterations=1, custom=custom) cons = consumer.LambdaConsumer( func=consume_ping_output, func_kwargs={"requests": context.custom['requests']}) cmd = PING.format(context.custom['requests'], interval_between_req, DEFAULT_HOST_IP) prod = producer.SSHCommand(cmd, net_tools.SSHConnection(basevm.ssh_config)) st_core.add_pipe(producer=prod, consumer=cons, tag="ping") # Gather results and verify pass criteria. try: result = st_core.run_exercise() except core.CoreException as err: handle_failure(file_dumper, err) dump_test_result(file_dumper, result)