def test_restore_in_past_versions(bin_cloner_path):
    """Test scenario: create a snapshot and restore in previous versions."""
    logger = logging.getLogger("snapshot_version")

    artifacts = ArtifactCollection(_test_images_s3_bucket())
    # Fetch all snapshots artifacts.
    # "fc_release" is the key that should be used for per release snapshot
    # artifacts. Such snapshots are created at release time and target the
    # current version. We are going to restore all these snapshots with current
    # testing build.
    firecracker_artifacts = artifacts.firecrackers()
    for firecracker in firecracker_artifacts:
        firecracker.download()
        jailer = firecracker.jailer()
        jailer.download()
        # The target version is in the name of the firecracker binary from S3.
        # We also strip the "v" as fc expects X.Y.Z version string.
        target_version = firecracker.base_name()[1:]
        logger.info("Creating snapshot for version: %s", target_version)

        # Create a fresh snapshot targeted at the binary artifact version.
        snapshot = create_512mb_full_snapshot(bin_cloner_path, target_version)

        builder = MicrovmBuilder(bin_cloner_path, firecracker.local_path(),
                                 jailer.local_path())
        microvm, _ = builder.build_from_snapshot(snapshot, True, False)

        logger.info("Using Firecracker: %s", firecracker.local_path())
        logger.info("Using Jailer: %s", jailer.local_path())

        # Attempt to connect to resumed microvm.
        ssh_connection = net_tools.SSHConnection(microvm.ssh_config)

        exit_code, _, _ = ssh_connection.execute_command("sleep 1 && sync")
        assert exit_code == 0
Exemple #2
0
def test_restore_no_tsc(bin_cloner_path):
    """Test scenario: restore a snapshot without TSC in current version."""
    logger = logging.getLogger("no_tsc_snapshot")
    builder = MicrovmBuilder(bin_cloner_path)

    artifacts = ArtifactCollection(_test_images_s3_bucket())
    # Fetch the v0.24.0 firecracker binary as that one does not have
    # the TSC frequency in the snapshot file.
    firecracker_artifacts = artifacts.firecrackers(keyword="v0.24.0")
    firecracker = firecracker_artifacts[0]
    firecracker.download()
    jailer = firecracker.jailer()
    jailer.download()
    diff_snapshots = True

    # Create a snapshot.
    snapshot = create_snapshot_helper(builder,
                                      logger,
                                      drives=scratch_drives,
                                      ifaces=net_ifaces,
                                      fc_binary=firecracker.local_path(),
                                      jailer_binary=jailer.local_path(),
                                      diff_snapshots=diff_snapshots,
                                      balloon=True)

    # Resume microvm using current build of FC/Jailer.
    # The resume should be successful because the CPU model
    # in the snapshot state is the same as this host's.
    microvm, _ = builder.build_from_snapshot(snapshot,
                                             resume=True,
                                             diff_snapshots=False)
    validate_all_devices(logger, microvm, net_ifaces, scratch_drives,
                         diff_snapshots)
    logger.debug("========== Firecracker restore snapshot log ==========")
    logger.debug(microvm.log_data)
def test_restore_from_past_versions(bin_cloner_path):
    """Test scenario: restore all previous version snapshots."""
    logger = logging.getLogger("snapshot_version")

    artifacts = ArtifactCollection(_test_images_s3_bucket())
    # Fetch all firecracker binaries.
    # With each binary create a snapshot and try to restore in current
    # version.
    firecracker_artifacts = artifacts.firecrackers()
    for firecracker in firecracker_artifacts:
        firecracker.download()
        jailer = firecracker.jailer()
        jailer.download()

        logger.info("Source Firecracker: %s", firecracker.local_path())
        logger.info("Source Jailer: %s", jailer.local_path())
        # Create a fresh snapshot using the binary artifacts.
        builder = MicrovmBuilder(bin_cloner_path, firecracker.local_path(),
                                 jailer.local_path())
        snapshot = create_512mb_full_snapshot(bin_cloner_path, None,
                                              firecracker.local_path(),
                                              jailer.local_path())
        microvm, _ = builder.build_from_snapshot(snapshot, True, False)
        ssh_connection = net_tools.SSHConnection(microvm.ssh_config)
        exit_code, _, _ = ssh_connection.execute_command("sleep 1 && sync")

        assert exit_code == 0
def test_patch_drive_snapshot(bin_cloner_path):
    """
    Test that a patched drive is correctly used by guests loaded from snapshot.

    @type: functional
    """
    logger = logging.getLogger("snapshot_sequence")

    vm_builder = MicrovmBuilder(bin_cloner_path)
    snapshot_type = SnapshotType.FULL
    diff_snapshots = False

    # Use a predefined vm instance.
    vm_instance = vm_builder.build_vm_nano()
    basevm = vm_instance.vm
    root_disk = vm_instance.disks[0]
    ssh_key = vm_instance.ssh_key

    # Add a scratch 128MB RW non-root block device.
    scratchdisk1 = drive_tools.FilesystemFile(tempfile.mktemp(), size=128)
    basevm.add_drive("scratch", scratchdisk1.path)

    basevm.start()
    ssh_connection = net_tools.SSHConnection(basevm.ssh_config)

    # Verify if guest can run commands.
    exit_code, _, _ = ssh_connection.execute_command("sync")
    assert exit_code == 0

    # Update drive to have another backing file, double in size.
    new_file_size_mb = 2 * int(scratchdisk1.size() / (1024 * 1024))
    logger.info("Patch drive, new file: size %sMB.", new_file_size_mb)
    scratchdisk1 = drive_tools.FilesystemFile(tempfile.mktemp(),
                                              new_file_size_mb)
    basevm.patch_drive("scratch", scratchdisk1)

    logger.info("Create %s #0.", snapshot_type)
    # Create a snapshot builder from a microvm.
    snapshot_builder = SnapshotBuilder(basevm)

    disks = [root_disk.local_path(), scratchdisk1.path]
    # Create base snapshot.
    snapshot = snapshot_builder.create(disks, ssh_key, snapshot_type)

    basevm.kill()

    # Load snapshot in a new Firecracker microVM.
    logger.info("Load snapshot, mem %s", snapshot.mem)
    microvm, _ = vm_builder.build_from_snapshot(snapshot,
                                                resume=True,
                                                diff_snapshots=diff_snapshots)
    # Attempt to connect to resumed microvm.
    ssh_connection = net_tools.SSHConnection(microvm.ssh_config)

    # Verify the new microVM has the right scratch drive.
    guest_drive_size = _get_guest_drive_size(ssh_connection)
    assert guest_drive_size == str(scratchdisk1.size())

    microvm.kill()
def test_restore_old_version(bin_cloner_path):
    """
    Restore current snapshot with previous versions of Firecracker.

    @type: functional
    """
    # Microvm: 2vCPU 256MB RAM, balloon, 4 disks and 4 net devices.
    logger = logging.getLogger("old_snapshot_version_many_devices")
    builder = MicrovmBuilder(bin_cloner_path)

    artifacts = ArtifactCollection(_test_images_s3_bucket())
    # Fetch all firecracker binaries.
    # Create a snapshot with current build and restore with each FC binary
    # artifact.
    firecracker_artifacts = artifacts.firecrackers(
        # current snapshot (i.e a machine snapshotted with current build)
        # is incompatible with any past release due to notification suppression.
        min_version="1.2.0",
        max_version=get_firecracker_version_from_toml(),
    )
    for firecracker in firecracker_artifacts:
        firecracker.download()
        jailer = firecracker.jailer()
        jailer.download()

        logger.info("Creating snapshot with local build")

        # Old version from artifact.
        target_version = firecracker.base_name()[1:]

        # Create a snapshot with current FC version targeting the old version.
        snapshot = create_snapshot_helper(
            builder,
            logger,
            target_version=target_version,
            drives=scratch_drives,
            ifaces=net_ifaces,
            balloon=True,
            diff_snapshots=True,
        )

        logger.info("Restoring snapshot with Firecracker: %s",
                    firecracker.local_path())
        logger.info("Using Jailer: %s", jailer.local_path())

        # Resume microvm using FC/Jailer binary artifacts.
        vm, _ = builder.build_from_snapshot(
            snapshot,
            resume=True,
            diff_snapshots=False,
            fc_binary=firecracker.local_path(),
            jailer_binary=jailer.local_path(),
        )
        validate_all_devices(logger, vm, net_ifaces, scratch_drives, True)
        logger.debug("========== Firecracker restore snapshot log ==========")
        logger.debug(vm.log_data)
def test_malicious_handler(bin_cloner_path, test_microvm_with_api,
                           uffd_handler_paths):
    """
    Test malicious uffd handler scenario.

    The page fault handler panics when receiving a page fault,
    so no events are handled and snapshot memory regions cannot be
    loaded into memory. In this case, Firecracker is designed to freeze,
    instead of silently switching to having the kernel handle page
    faults, so that it becomes obvious that something went wrong.

    @type: negative
    """
    logger = logging.getLogger("uffd_malicious_handler")

    logger.info("Create snapshot")
    snapshot = create_snapshot(bin_cloner_path)

    logger.info("Load snapshot, mem %s", snapshot.mem)
    vm_builder = MicrovmBuilder(bin_cloner_path)
    vm = test_microvm_with_api
    vm.spawn()

    # Spawn page fault handler process.
    _pf_handler = spawn_pf_handler(vm, uffd_handler_paths["malicious_handler"],
                                   snapshot.mem)

    # We expect Firecracker to freeze while resuming from a snapshot
    # due to the malicious handler's unavailability.
    try:
        vm_builder.build_from_snapshot(snapshot,
                                       vm=vm,
                                       resume=True,
                                       uffd_path=SOCKET_PATH,
                                       timeout=30)
        assert False
    except (
            socket.timeout,
            urllib3.exceptions.ReadTimeoutError,
            requests.exceptions.ReadTimeout,
    ) as _err:
        assert True, _err
def test_restore_old_snapshot(bin_cloner_path):
    """
    Restore from snapshots obtained with previous versions of Firecracker.

    @type: functional
    """
    # Microvm: 2vCPU 256MB RAM, balloon, 4 disks and 4 net devices.
    logger = logging.getLogger("old_snapshot_many_devices")
    builder = MicrovmBuilder(bin_cloner_path)

    artifacts = ArtifactCollection(_test_images_s3_bucket())
    # Fetch all firecracker binaries.
    # With each binary create a snapshot and try to restore in current
    # version.
    firecracker_artifacts = artifacts.firecrackers(
        max_version=get_firecracker_version_from_toml())

    for firecracker in firecracker_artifacts:
        firecracker.download()
        jailer = firecracker.jailer()
        jailer.download()

        logger.info("Creating snapshot with Firecracker: %s",
                    firecracker.local_path())
        logger.info("Using Jailer: %s", jailer.local_path())

        target_version = firecracker.base_name()[1:]

        # v0.23 does not support creating diff snapshots.
        # v0.23 does not support balloon.
        diff_snapshots = "0.23" not in target_version

        # Create a snapshot.
        snapshot = create_snapshot_helper(
            builder,
            logger,
            drives=scratch_drives,
            ifaces=net_ifaces,
            fc_binary=firecracker.local_path(),
            jailer_binary=jailer.local_path(),
            diff_snapshots=diff_snapshots,
            balloon=diff_snapshots,
        )

        # Resume microvm using current build of FC/Jailer.
        microvm, _ = builder.build_from_snapshot(snapshot,
                                                 resume=True,
                                                 diff_snapshots=False)
        validate_all_devices(logger, microvm, net_ifaces, scratch_drives,
                             diff_snapshots)
        logger.debug("========== Firecracker restore snapshot log ==========")
        logger.debug(microvm.log_data)
Exemple #8
0
def test_negative_postload_api(bin_cloner_path):
    """Test APIs fail after loading from snapshot."""
    logger = logging.getLogger("snapshot_api_fail")

    vm_builder = MicrovmBuilder(bin_cloner_path)
    vm_instance = VMNano.spawn(bin_cloner_path, diff_snapshots=True)
    basevm = vm_instance.vm
    root_disk = vm_instance.disks[0]
    ssh_key = vm_instance.ssh_key

    basevm.start()
    ssh_connection = net_tools.SSHConnection(basevm.ssh_config)

    # Verify if guest can run commands.
    exit_code, _, _ = ssh_connection.execute_command("sync")
    assert exit_code == 0

    logger.info("Create snapshot")
    # Create a snapshot builder from a microvm.
    snapshot_builder = SnapshotBuilder(basevm)

    # Create base snapshot.
    snapshot = snapshot_builder.create([root_disk.local_path()],
                                       ssh_key,
                                       SnapshotType.DIFF)

    basevm.kill()

    logger.info("Load snapshot, mem %s", snapshot.mem)
    # Do not resume, just load, so we can still call APIs that work.
    microvm, _ = vm_builder.build_from_snapshot(snapshot,
                                                False,
                                                True)
    fail_msg = "The requested operation is not supported after starting " \
        "the microVM"

    try:
        microvm.start()
    except AssertionError as error:
        assert fail_msg in str(error)
    else:
        assert False, "Negative test failed"

    try:
        microvm.basic_config()
    except AssertionError as error:
        assert fail_msg in str(error)
    else:
        assert False, "Negative test failed"

    microvm.kill()
Exemple #9
0
def test_serial_after_snapshot(bin_cloner_path):
    """
    Serial I/O after restoring from a snapshot.

    @type: functional
    """
    vm_builder = MicrovmBuilder(bin_cloner_path)
    vm_instance = vm_builder.build_vm_nano(
        diff_snapshots=False,
        daemonize=False,
    )
    microvm = vm_instance.vm
    root_disk = vm_instance.disks[0]
    ssh_key = vm_instance.ssh_key

    microvm.start()
    serial = Serial(microvm)
    serial.open()

    # Image used for tests on aarch64 has autologon
    if platform.machine() == "x86_64":
        serial.rx(token="login: "******"root")
        serial.rx("Password: "******"root")
    # Make sure that at the time we snapshot the vm, the user is logged in.
    serial.rx("#")

    snapshot_builder = SnapshotBuilder(microvm)
    disks = [root_disk.local_path()]
    # Create diff snapshot.
    snapshot = snapshot_builder.create(disks, ssh_key, SnapshotType.FULL)
    # Kill base microVM.
    microvm.kill()

    # Load microVM clone from snapshot.
    test_microvm, _ = vm_builder.build_from_snapshot(snapshot,
                                                     resume=True,
                                                     diff_snapshots=False,
                                                     daemonize=False)
    serial = Serial(test_microvm)
    serial.open()
    # We need to send a newline to signal the serial to flush
    # the login content.
    serial.tx("")
    serial.rx("#")
    serial.tx("pwd")
    res = serial.rx("#")
    assert "/root" in res
Exemple #10
0
def test_restore_old_version_all_devices(bin_cloner_path):
    """Test scenario: restore snapshot in previous versions of Firecracker."""
    # Microvm: 2vCPU 256MB RAM, baloon, 4 disks and 4 netdevices.
    logger = logging.getLogger("snapshot_many_devices")

    artifacts = ArtifactCollection(_test_images_s3_bucket())
    # Fetch all firecracker binaries.
    # Create a snapshot with current build and restore with each FC binary
    # artifact.
    firecracker_artifacts = artifacts.firecrackers()
    for firecracker in firecracker_artifacts:
        firecracker.download()
        jailer = firecracker.jailer()
        jailer.download()

        logger.info("Creating snapshot with local build")

        # Old version from artifact.
        target_version = firecracker.base_name()[1:]
        # v0.23 does not have a ballon device.
        balloon = "0.23" not in target_version

        # Create a snapshot with current FC version targeting the old version.
        snapshot = create_snapshot_helper(bin_cloner_path,
                                          logger,
                                          target_version=target_version,
                                          drives=scratch_drives,
                                          ifaces=net_ifaces,
                                          balloon=balloon,
                                          diff_snapshots=True)

        logger.info("Restoring snapshot with Firecracker: %s",
                    firecracker.local_path())
        logger.info("Using Jailer: %s", jailer.local_path())

        # Resume microvm using FC/Jailer binary artifacts.
        builder = MicrovmBuilder(bin_cloner_path,
                                 firecracker.local_path(),
                                 jailer.local_path())
        microvm, _ = builder.build_from_snapshot(snapshot,
                                                 resume=True,
                                                 enable_diff_snapshots=False)
        validate_all_devices(logger, microvm, net_ifaces, scratch_drives,
                             balloon)
        logger.debug("========== Firecracker restore snapshot log ==========")
        logger.debug(microvm.log_data)
Exemple #11
0
def test_valid_handler(bin_cloner_path, test_microvm_with_api,
                       uffd_handler_paths):
    """
    Test valid uffd handler scenario.

    @type: functional
    """
    logger = logging.getLogger("uffd_valid_handler")

    logger.info("Create snapshot")
    snapshot = create_snapshot(bin_cloner_path)

    logger.info("Load snapshot, mem %s", snapshot.mem)
    vm_builder = MicrovmBuilder(bin_cloner_path)
    vm = test_microvm_with_api
    vm.spawn()

    # Spawn page fault handler process.
    _pf_handler = spawn_pf_handler(vm, uffd_handler_paths['valid_handler'],
                                   snapshot.mem)

    vm, _ = vm_builder.build_from_snapshot(snapshot,
                                           vm=vm,
                                           resume=True,
                                           uffd_path=SOCKET_PATH)

    # Inflate balloon.
    response = vm.balloon.patch(amount_mib=200)
    assert vm.api_session.is_status_no_content(response.status_code)

    # Deflate balloon.
    response = vm.balloon.patch(amount_mib=0)
    assert vm.api_session.is_status_no_content(response.status_code)

    # Verify if guest can run commands.
    ssh_connection = net_tools.SSHConnection(vm.ssh_config)
    exit_code, _, _ = ssh_connection.execute_command("sync")
    assert exit_code == 0
Exemple #12
0
def test_mmds_snapshot(bin_cloner_path):
    """
    Exercise MMDS behavior with snapshots.

    Ensures that MMDS V2 behavior is not affected by taking a snapshot
    and that MMDS V2 is not available after snapshot load.

    @type: functional
    """
    vm_builder = MicrovmBuilder(bin_cloner_path)
    net_iface = NetIfaceConfig()
    vm_instance = vm_builder.build_vm_nano(
        net_ifaces=[net_iface],
        diff_snapshots=True
    )
    test_microvm = vm_instance.vm
    root_disk = vm_instance.disks[0]
    ssh_key = vm_instance.ssh_key

    ipv4_address = '169.254.169.250'
    # Configure MMDS version with custom IPv4 address.
    _configure_mmds(
        test_microvm,
        version='V2',
        iface_id=DEFAULT_DEV_NAME,
        ipv4_address=ipv4_address
    )

    data_store = {
        'latest': {
            'meta-data': {
                'ami-id': 'ami-12345678'
            }
        }
    }
    _populate_data_store(test_microvm, data_store)

    test_microvm.start()

    snapshot_builder = SnapshotBuilder(test_microvm)
    disks = [root_disk.local_path()]

    ssh_connection = net_tools.SSHConnection(test_microvm.ssh_config)
    cmd = 'ip route add {} dev eth0'.format(ipv4_address)
    _, stdout, stderr = ssh_connection.execute_command(cmd)
    _assert_out(stdout, stderr, '')

    # Generate token.
    token = generate_mmds_session_token(
        ssh_connection,
        ipv4_address=ipv4_address,
        token_ttl=60
    )

    pre = 'curl -m 2 -s'
    pre += ' -X GET'
    pre += ' -H  "X-metadata-token: {}"'.format(token)
    pre += ' http://{}/'.format(ipv4_address)

    # Fetch metadata.
    cmd = pre + 'latest/meta-data/'
    _, stdout, stderr = ssh_connection.execute_command(cmd)
    _assert_out(stdout, stderr, "ami-id")

    # Create diff snapshot.
    snapshot = snapshot_builder.create(disks,
                                       ssh_key,
                                       SnapshotType.DIFF)

    # Resume microVM and ensure session token is still valid on the base.
    response = test_microvm.vm.patch(state='Resumed')
    assert test_microvm.api_session.is_status_no_content(response.status_code)

    _, stdout, stderr = ssh_connection.execute_command(
        pre + 'latest/meta-data/'
    )
    _assert_out(stdout, stderr, "ami-id")

    # Kill base microVM.
    test_microvm.kill()

    # Load microVM clone from snapshot.
    test_microvm, _ = vm_builder.build_from_snapshot(snapshot,
                                                     resume=True,
                                                     diff_snapshots=True)
    _populate_data_store(test_microvm, data_store)
    ssh_connection = net_tools.SSHConnection(test_microvm.ssh_config)

    # Mmds V2 is not available with snapshots.
    # Test that `PUT` requests are not allowed.
    cmd = 'curl -m 2 -s'
    cmd += ' -X PUT'
    cmd += ' -H  "X-metadata-token-ttl-seconds: 1"'
    cmd += ' http://{}/latest/api/token'.format(ipv4_address)
    _, stdout, stderr = ssh_connection.execute_command(cmd)
    expected = "Not allowed HTTP method."
    _assert_out(stdout, stderr, expected)

    # Fetch metadata using V1 requests and ensure IPv4 configuration
    # is persistent between snapshots.
    cmd = 'curl -s http://{}/latest/meta-data/ami-id/'.format(ipv4_address)
    _, stdout, stderr = ssh_connection.execute_command(cmd)
    _assert_out(stdout, stderr, 'ami-12345678')
Exemple #13
0
def test_vsock_transport_reset(bin_cloner_path, bin_vsock_path,
                               test_fc_session_root_path):
    """
    Vsock transport reset test.

    Steps:
    1. Start echo server on the guest
    2. Start host workers that ping-pong data between guest and host,
    without closing any of them
    3. Pause VM -> Create snapshot -> Resume VM
    4. Check that worker sockets no longer work by setting a timeout
    so the sockets won't block and do a recv operation.
    5. If the recv operation timeouts, the connection was closed.
       Else, the connection was not closed and the test fails.
    6. Close VM -> Load VM from Snapshot -> check that vsock
       device is still working.

    @type: functional
    """
    vm_builder = MicrovmBuilder(bin_cloner_path)
    vm_instance = vm_builder.build_vm_nano()
    test_vm = vm_instance.vm
    root_disk = vm_instance.disks[0]
    ssh_key = vm_instance.ssh_key

    test_vm.vsock.put(vsock_id="vsock0",
                      guest_cid=3,
                      uds_path="/{}".format(VSOCK_UDS_PATH))

    test_vm.start()

    snapshot_builder = SnapshotBuilder(test_vm)
    disks = [root_disk.local_path()]

    # Generate the random data blob file.
    blob_path, blob_hash = make_blob(test_fc_session_root_path)
    vm_blob_path = "/tmp/vsock/test.blob"

    conn = SSHConnection(test_vm.ssh_config)
    # Set up a tmpfs drive on the guest, so we can copy the blob there.
    # Guest-initiated connections (echo workers) will use this blob.
    _copy_vsock_data_to_guest(conn, blob_path, vm_blob_path, bin_vsock_path)

    # Start guest echo server.
    path = os.path.join(test_vm.jailer.chroot_path(), VSOCK_UDS_PATH)
    conn = SSHConnection(test_vm.ssh_config)
    cmd = "vsock_helper echosrv -d {}".format(ECHO_SERVER_PORT)
    ecode, _, _ = conn.execute_command(cmd)
    assert ecode == 0

    # Start host workers that connect to the guest server.
    workers = []
    for _ in range(TEST_WORKER_COUNT):
        worker = HostEchoWorker(path, blob_path)
        workers.append(worker)
        worker.start()

    for wrk in workers:
        wrk.join()

    # Create snapshot.
    snapshot = snapshot_builder.create(disks, ssh_key, SnapshotType.FULL)
    response = test_vm.vm.patch(state="Resumed")
    assert test_vm.api_session.is_status_no_content(response.status_code)

    # Check that sockets are no longer working on workers.
    for worker in workers:
        # Whatever we send to the server, it should return the same
        # value.
        buf = bytearray("TEST\n".encode("utf-8"))
        worker.sock.send(buf)
        try:
            # Arbitrary timeout, we set this so the socket won't block as
            # it shouldn't receive anything.
            worker.sock.settimeout(0.25)
            response = worker.sock.recv(32)
            # If we reach here, it means the connection did not close.
            assert False, "Connection not closed: {}".format(
                response.decode("utf-8"))
        except SocketTimeout as exc:
            assert True, exc

    # Terminate VM.
    test_vm.kill()

    # Load snapshot.
    test_vm, _ = vm_builder.build_from_snapshot(snapshot,
                                                resume=True,
                                                diff_snapshots=False)

    # Check that vsock device still works.
    # Test guest-initiated connections.
    path = os.path.join(test_vm.path,
                        make_host_port_path(VSOCK_UDS_PATH, ECHO_SERVER_PORT))
    check_guest_connections(test_vm, path, vm_blob_path, blob_hash)

    # Test host-initiated connections.
    path = os.path.join(test_vm.jailer.chroot_path(), VSOCK_UDS_PATH)
    check_host_connections(test_vm, path, blob_path, blob_hash)
def test_negative_snapshot_permissions(bin_cloner_path):
    """
    Test missing permission error scenarios.

    @type: negative
    """
    logger = logging.getLogger("snapshot_negative")
    vm_builder = MicrovmBuilder(bin_cloner_path)

    # Use a predefined vm instance.
    vm_instance = vm_builder.build_vm_nano()
    basevm = vm_instance.vm
    root_disk = vm_instance.disks[0]
    ssh_key = vm_instance.ssh_key

    basevm.start()

    logger.info("Create snapshot")
    # Create a snapshot builder from a microvm.
    snapshot_builder = SnapshotBuilder(basevm)

    disks = [root_disk.local_path()]

    # Remove write permissions.
    os.chmod(basevm.jailer.chroot_path(), 0o444)

    try:
        _ = snapshot_builder.create(disks, ssh_key, SnapshotType.FULL)
    except AssertionError as error:
        # Check if proper error is returned.
        assert "Permission denied" in str(error)
    else:
        assert False, "Negative test failed"

    # Restore proper permissions.
    os.chmod(basevm.jailer.chroot_path(), 0o744)

    # Create base snapshot.
    snapshot = snapshot_builder.create(disks, ssh_key, SnapshotType.FULL)

    logger.info("Load snapshot, mem %s", snapshot.mem)

    basevm.kill()

    # Remove permissions for mem file.
    os.chmod(snapshot.mem, 0o000)

    try:
        _, _ = vm_builder.build_from_snapshot(snapshot,
                                              resume=True,
                                              diff_snapshots=True)
    except AssertionError as error:
        # Check if proper error is returned.
        assert "Cannot open the memory file: Permission denied" in str(error)
    else:
        assert False, "Negative test failed"

    # Remove permissions for state file.
    os.chmod(snapshot.vmstate, 0o000)

    try:
        _, _ = vm_builder.build_from_snapshot(snapshot,
                                              resume=True,
                                              diff_snapshots=True)
    except AssertionError as error:
        # Check if proper error is returned.
        assert ("Cannot perform open on the snapshot backing file:"
                " Permission denied" in str(error))
    else:
        assert False, "Negative test failed"

    # Restore permissions for state file.
    os.chmod(snapshot.vmstate, 0o744)
    os.chmod(snapshot.mem, 0o744)

    # Remove permissions for block file.
    os.chmod(snapshot.disks[0], 0o000)

    try:
        _, _ = vm_builder.build_from_snapshot(snapshot,
                                              resume=True,
                                              diff_snapshots=True)
    except AssertionError as error:
        # Check if proper error is returned.
        assert "Block(BackingFile(Os { code: 13, kind: PermissionDenied" in str(
            error)
    else:
        assert False, "Negative test failed"
Exemple #15
0
def test_older_snapshot_resume_latency(bin_cloner_path):
    """Test scenario: Older snapshot load performance measurement."""
    logger = logging.getLogger("old_snapshot_load")

    artifacts = ArtifactCollection(_test_images_s3_bucket())
    # Fetch all firecracker binaries.
    # With each binary create a snapshot and try to restore in current
    # version.
    firecracker_artifacts = artifacts.firecrackers()
    for firecracker in firecracker_artifacts:
        firecracker.download()
        jailer = firecracker.jailer()
        jailer.download()
        fc_version = firecracker.base_name()[1:]
        logger.info("Firecracker version: %s", fc_version)
        logger.info("Source Firecracker: %s", firecracker.local_path())
        logger.info("Source Jailer: %s", jailer.local_path())

        for i in range(SAMPLE_COUNT):
            # Create a fresh microvm with the binary artifacts.
            vm_instance = VMMicro.spawn(bin_cloner_path, True,
                                        firecracker.local_path(),
                                        jailer.local_path())
            # Attempt to connect to the fresh microvm.
            ssh_connection = net_tools.SSHConnection(vm_instance.vm.ssh_config)

            exit_code, _, _ = ssh_connection.execute_command("sync")
            assert exit_code == 0

            # Create a snapshot builder from a microvm.
            snapshot_builder = SnapshotBuilder(vm_instance.vm)

            # The snapshot builder expects disks as paths, not artifacts.
            disks = []
            for disk in vm_instance.disks:
                disks.append(disk.local_path())

            snapshot_builder = SnapshotBuilder(vm_instance.vm)
            snapshot = snapshot_builder.create(disks, vm_instance.ssh_key,
                                               SnapshotType.FULL)

            vm_instance.vm.kill()
            builder = MicrovmBuilder(bin_cloner_path)
            microvm, metrics_fifo = builder.build_from_snapshot(
                snapshot, True, False)
            # Attempt to connect to resumed microvm.
            ssh_connection = net_tools.SSHConnection(microvm.ssh_config)
            # Check if guest still runs commands.
            exit_code, _, _ = ssh_connection.execute_command("dmesg")
            assert exit_code == 0

            value = 0
            # Parse all metric data points in search of load_snapshot time.
            metrics = microvm.get_all_metrics(metrics_fifo)
            for data_point in metrics:
                metrics = json.loads(data_point)
                cur_value = metrics['latencies_us']['load_snapshot']
                if cur_value > 0:
                    value = cur_value / USEC_IN_MSEC
                    break

            baseline = LOAD_LATENCY_BASELINES[fc_version]
            logger.info("Latency %s/%s: %s ms", i + 1, SAMPLE_COUNT, value)
            assert baseline > value, "LoadSnapshot latency degraded."
            microvm.kill()
def test_snap_restore_from_artifacts(
    bin_cloner_path, bin_vsock_path, test_fc_session_root_path, cpu_template
):
    """
    Restore from snapshots obtained with all supported guest kernel versions.

    The snapshot artifacts have been generated through the
    `create_snapshot_artifacts` devtool command. The base microVM snapshotted
    has been built from the config file at
    ~/firecracker/tools/create_snapshot_artifact/complex_vm_config.json.

    @type: functional
    """
    logger = logging.getLogger("cross_kernel_snapshot_restore")
    builder = MicrovmBuilder(bin_cloner_path)

    snapshot_root_name = "snapshot_artifacts"
    snapshot_root_dir = os.path.join(FC_WORKSPACE_DIR, snapshot_root_name)
    pathlib.Path(Artifact.LOCAL_ARTIFACT_DIR).mkdir(parents=True, exist_ok=True)

    # Iterate through all subdirectories based on CPU template
    # in the snapshot root dir.
    subdir_filter = r".*_" + re.escape(cpu_template) + r"_guest_snapshot"
    snap_subdirs = [
        d for d in os.listdir(snapshot_root_dir) if re.match(subdir_filter, d)
    ]
    for subdir_name in snap_subdirs:
        snapshot_dir = os.path.join(snapshot_root_dir, subdir_name)
        assert os.path.isdir(snapshot_dir)

        logger.info("Working with snapshot artifacts in %s.", snapshot_dir)
        mem, vmstate, disk, ssh_key = _get_snapshot_files_paths(snapshot_dir)

        logger.info("Creating snapshot from artifacts...")
        snapshot = Snapshot(mem, vmstate, [disk], net_ifaces, ssh_key)

        logger.info("Loading microVM from snapshot...")
        vm, _ = builder.build_from_snapshot(snapshot, resume=True, diff_snapshots=False)

        # Ensure microVM is running.
        response = vm.machine_cfg.get()
        assert vm.api_session.is_status_ok(response.status_code)
        assert vm.state == "Running"

        # Test that net devices have connectivity after restore.
        for iface in snapshot.net_ifaces:
            logger.info("Testing net device %s...", iface.dev_name)
            vm.ssh_config["hostname"] = iface.guest_ip
            ssh_connection = net_tools.SSHConnection(vm.ssh_config)
            exit_code, _, _ = ssh_connection.execute_command("sync")
            assert exit_code == 0

        logger.info("Testing data store behavior...")
        _test_mmds(vm, snapshot.net_ifaces[3])

        logger.info("Testing balloon device...")
        _test_balloon(vm, ssh_connection)

        logger.info("Testing vsock device...")
        check_vsock_device(
            vm, bin_vsock_path, test_fc_session_root_path, ssh_connection
        )

        # Run fio on the guest.
        _guest_run_fio_iteration(ssh_connection, 0)

        vm.kill()
Exemple #17
0
def test_mmds_snapshot(bin_cloner_path):
    """
    Exercise tokens' behavior with snapshots.

    Ensures that valid tokens created on a base microVM
    are not accepted on the clone VM.

    @type: functional
    """
    vm_builder = MicrovmBuilder(bin_cloner_path)
    vm_instance = vm_builder.build_vm_nano(diff_snapshots=True)
    test_microvm = vm_instance.vm
    root_disk = vm_instance.disks[0]
    ssh_key = vm_instance.ssh_key

    data_store = {
        'latest': {
            'meta-data': {
                'ami-id': 'ami-12345678'
            }
        }
    }
    _populate_data_store(test_microvm, data_store)

    test_microvm.start()

    snapshot_builder = SnapshotBuilder(test_microvm)
    disks = [root_disk.local_path()]

    ssh_connection = net_tools.SSHConnection(test_microvm.ssh_config)
    cmd = 'ip route add 169.254.169.254 dev eth0'
    _, stdout, stderr = ssh_connection.execute_command(cmd)
    _assert_out(stdout, stderr, '')

    # Generate token.
    token = _generate_mmds_session_token(
        ssh_connection,
        ipv4_address="169.254.169.254",
        token_ttl=60
    )

    pre = 'curl -m 2 -s'
    pre += ' -X GET'
    pre += ' -H  "X-metadata-token: {}"'.format(token)
    pre += ' http://169.254.169.254/'

    cmd = pre + 'latest/meta-data/'
    _, stdout, stderr = ssh_connection.execute_command(cmd)
    _assert_out(stdout, stderr, "ami-id")

    # Setting MMDS version to V2 when V2 is already in use should
    # have no effect on tokens generated so far.
    _set_mmds_version(test_microvm, version='V2')
    _, stdout, stderr = ssh_connection.execute_command(cmd)
    _assert_out(stdout, stderr, "ami-id")

    # Create diff snapshot.
    snapshot = snapshot_builder.create(disks,
                                       ssh_key,
                                       SnapshotType.DIFF)

    # Resume microVM and ensure session token is still valid.
    response = test_microvm.vm.patch(state='Resumed')
    assert test_microvm.api_session.is_status_no_content(response.status_code)

    _, stdout, stderr = ssh_connection.execute_command(
        pre + 'latest/meta-data/'
    )
    _assert_out(stdout, stderr, "ami-id")

    # Kill base microVM.
    test_microvm.kill()

    # Load microVM clone from snapshot.
    test_microvm, _ = vm_builder.build_from_snapshot(snapshot,
                                                     resume=True,
                                                     diff_snapshots=True)
    _populate_data_store(test_microvm, data_store)
    ssh_connection = net_tools.SSHConnection(test_microvm.ssh_config)

    # Ensure that token created on the baseVM is not valid inside the clone.
    cmd = 'curl -m 2 -s'
    cmd += ' -X GET'
    cmd += ' -H  "X-metadata-token: {}"'.format(token)
    cmd += ' http://169.254.169.254/latest/meta-data/'
    _, stdout, stderr = ssh_connection.execute_command(cmd)
    _assert_out(stdout, stderr, "MMDS token not valid.")

    # Generate new session token.
    token = _generate_mmds_session_token(
        ssh_connection,
        ipv4_address="169.254.169.254",
        token_ttl=60
    )

    # Ensure the newly created token is valid.
    cmd = 'curl -m 2 -s'
    cmd += ' -X GET'
    cmd += ' -H  "X-metadata-token: {}"'.format(token)
    cmd += ' http://169.254.169.254/latest/meta-data/'
    _, stdout, stderr = ssh_connection.execute_command(cmd)
    _assert_out(stdout, stderr, "ami-id")