def create_512mb_full_snapshot(bin_cloner_path, target_version: str = None, fc_binary=None, jailer_binary=None): """Create a snapshoft from a 2vcpu 512MB microvm.""" vm_instance = VMMicro.spawn(bin_cloner_path, True, fc_binary, jailer_binary) # Attempt to connect to the fresh microvm. ssh_connection = net_tools.SSHConnection(vm_instance.vm.ssh_config) # Run a fio workload and validate succesfull execution. fio = """fio --filename=/dev/vda --direct=1 --rw=randread --bs=4k \ --ioengine=libaio --iodepth=16 --runtime=2 --numjobs=4 --time_based \ --group_reporting --name=iops-test-job --eta-newline=1 --readonly""" exit_code, _, _ = ssh_connection.execute_command(fio) assert exit_code == 0 # Create a snapshot builder from a microvm. snapshot_builder = SnapshotBuilder(vm_instance.vm) # The snapshot builder expects disks as paths, not artifacts. disks = [] for disk in vm_instance.disks: disks.append(disk.local_path()) snapshot = snapshot_builder.create(disks, vm_instance.ssh_key, SnapshotType.FULL, target_version) vm_instance.vm.kill() return snapshot
def test_older_snapshot_resume_latency(bin_cloner_path): """Test scenario: Older snapshot load performance measurement.""" logger = logging.getLogger("old_snapshot_load") artifacts = ArtifactCollection(_test_images_s3_bucket()) # Fetch all firecracker binaries. # With each binary create a snapshot and try to restore in current # version. firecracker_artifacts = artifacts.firecrackers() for firecracker in firecracker_artifacts: firecracker.download() jailer = firecracker.jailer() jailer.download() fc_version = firecracker.base_name()[1:] logger.info("Firecracker version: %s", fc_version) logger.info("Source Firecracker: %s", firecracker.local_path()) logger.info("Source Jailer: %s", jailer.local_path()) for i in range(SAMPLE_COUNT): # Create a fresh microvm with the binary artifacts. vm_instance = VMMicro.spawn(bin_cloner_path, True, firecracker.local_path(), jailer.local_path()) # Attempt to connect to the fresh microvm. ssh_connection = net_tools.SSHConnection(vm_instance.vm.ssh_config) exit_code, _, _ = ssh_connection.execute_command("sync") assert exit_code == 0 # Create a snapshot builder from a microvm. snapshot_builder = SnapshotBuilder(vm_instance.vm) # The snapshot builder expects disks as paths, not artifacts. disks = [] for disk in vm_instance.disks: disks.append(disk.local_path()) snapshot_builder = SnapshotBuilder(vm_instance.vm) snapshot = snapshot_builder.create(disks, vm_instance.ssh_key, SnapshotType.FULL) vm_instance.vm.kill() builder = MicrovmBuilder(bin_cloner_path) microvm, metrics_fifo = builder.build_from_snapshot( snapshot, True, False) # Attempt to connect to resumed microvm. ssh_connection = net_tools.SSHConnection(microvm.ssh_config) # Check if guest still runs commands. exit_code, _, _ = ssh_connection.execute_command("dmesg") assert exit_code == 0 value = 0 # Parse all metric data points in search of load_snapshot time. metrics = microvm.get_all_metrics(metrics_fifo) for data_point in metrics: metrics = json.loads(data_point) cur_value = metrics['latencies_us']['load_snapshot'] if cur_value > 0: value = cur_value / USEC_IN_MSEC break baseline = LOAD_LATENCY_BASELINES[fc_version] logger.info("Latency %s/%s: %s ms", i + 1, SAMPLE_COUNT, value) assert baseline > value, "LoadSnapshot latency degraded." microvm.kill()