def _test_log_config(microvm,
                     log_level='Info',
                     show_level=True,
                     show_origin=True):
    """Exercises different scenarios for testing the logging config."""
    microvm.spawn()

    microvm.basic_config()

    # Configure logging.
    log_fifo_path = os.path.join(microvm.path, 'log_fifo')
    metrics_fifo_path = os.path.join(microvm.path, 'metrics_fifo')
    log_fifo = log_tools.Fifo(log_fifo_path)
    metrics_fifo = log_tools.Fifo(metrics_fifo_path)

    response = microvm.logger.put(
        log_fifo=microvm.create_jailed_resource(log_fifo.path),
        metrics_fifo=microvm.create_jailed_resource(metrics_fifo.path),
        level=log_level,
        show_level=show_level,
        show_log_origin=show_origin)
    assert microvm.api_session.is_good_response(response.status_code)

    microvm.start()

    lines = log_fifo.sequential_fifo_reader(20)
    for line in lines:
        check_log_message(line, microvm.id, log_level, show_level, show_origin)
Exemple #2
0
def test_dirty_page_metrics(test_microvm_with_api):
    """Check the `dirty_pages` metric."""
    microvm = test_microvm_with_api
    microvm.spawn()
    microvm.basic_config()

    # Configure logging.
    log_fifo_path = os.path.join(microvm.path, 'log_fifo')
    metrics_fifo_path = os.path.join(microvm.path, 'metrics_fifo')
    log_fifo = log_tools.Fifo(log_fifo_path)
    metrics_fifo = log_tools.Fifo(metrics_fifo_path)

    response = microvm.logger.put(
        log_fifo=microvm.create_jailed_resource(log_fifo.path),
        metrics_fifo=microvm.create_jailed_resource(metrics_fifo.path),
        level='Error',
        show_level=False,
        show_log_origin=False,
        options=['LogDirtyPages'])
    assert microvm.api_session.is_status_no_content(response.status_code)

    microvm.start()

    sleep(0.3)
    response = microvm.actions.put(action_type='FlushMetrics')
    assert microvm.api_session.is_status_no_content(response.status_code)

    lines = metrics_fifo.sequential_reader(2)
    assert int(json.loads(lines[0])['memory']['dirty_pages']) == 0
    assert int(json.loads(lines[1])['memory']['dirty_pages']) > 0
Exemple #3
0
def test_dirty_page_metrics(test_microvm_with_api):
    """Check the `dirty_pages` metric."""
    microvm = test_microvm_with_api
    microvm.spawn()
    microvm.basic_config()

    # Configure logging.
    log_fifo_path = os.path.join(microvm.path, 'log_fifo')
    metrics_fifo_path = os.path.join(microvm.path, 'metrics_fifo')
    log_fifo = log_tools.Fifo(log_fifo_path)
    metrics_fifo = log_tools.Fifo(metrics_fifo_path)

    response = microvm.logger.put(
        log_fifo=microvm.create_jailed_resource(log_fifo.path),
        metrics_fifo=microvm.create_jailed_resource(metrics_fifo.path),
        level='Error',
        show_level=False,
        show_log_origin=False,
        options=['LogDirtyPages'])
    assert microvm.api_session.is_good_response(response.status_code)

    microvm.start()

    lines = metrics_fifo.sequential_fifo_reader(3)
    for line in lines:
        assert int(json.loads(line)['memory']['dirty_pages']) >= 0
def test_flush_metrics(test_microvm_with_api):
    """Check the `FlushMetrics` vmm action."""
    microvm = test_microvm_with_api
    microvm.spawn()
    microvm.basic_config()

    # Configure logging.
    log_fifo_path = os.path.join(microvm.path, 'log_fifo')
    metrics_fifo_path = os.path.join(microvm.path, 'metrics_fifo')
    log_fifo = log_tools.Fifo(log_fifo_path)
    metrics_fifo = log_tools.Fifo(metrics_fifo_path)

    response = microvm.logger.put(
        log_fifo=microvm.create_jailed_resource(log_fifo.path),
        metrics_fifo=microvm.create_jailed_resource(metrics_fifo.path))
    assert microvm.api_session.is_status_no_content(response.status_code)

    microvm.start()

    # Empty fifo before triggering `FlushMetrics` so that we get accurate data.
    _ = metrics_fifo.sequential_reader(100)

    how_many_flushes = 3
    for _ in range(how_many_flushes):
        response = microvm.actions.put(action_type='FlushMetrics')
        assert microvm.api_session.is_status_no_content(response.status_code)
    lines = metrics_fifo.sequential_reader(how_many_flushes)
    assert len(lines) == how_many_flushes
Exemple #5
0
def test_startup_time(test_microvm_with_api):
    """Check the startup time for jailer and Firecracker up to socket bind."""
    microvm = test_microvm_with_api
    microvm.spawn()

    microvm.basic_config(vcpu_count=2, mem_size_mib=1024)

    # Configure logging.
    log_fifo_path = os.path.join(microvm.path, 'log_fifo')
    metrics_fifo_path = os.path.join(microvm.path, 'metrics_fifo')
    log_fifo = log_tools.Fifo(log_fifo_path)
    metrics_fifo = log_tools.Fifo(metrics_fifo_path)

    response = microvm.logger.put(
        log_fifo=microvm.create_jailed_resource(log_fifo.path),
        metrics_fifo=microvm.create_jailed_resource(metrics_fifo.path))
    assert microvm.api_session.is_status_no_content(response.status_code)

    microvm.start()
    time.sleep(0.4)

    # The metrics fifo should be at index 1.
    # Since metrics are flushed at InstanceStart, the first line will suffice.
    lines = metrics_fifo.sequential_reader(1)
    metrics = json.loads(lines[0])
    startup_time_us = metrics['api_server']['process_startup_time_us']
    cpu_startup_time_us = metrics['api_server']['process_startup_time_cpu_us']

    print('Process startup time is: {} us ({} CPU us)'.format(
        startup_time_us, cpu_startup_time_us))

    assert cpu_startup_time_us > 0
    assert cpu_startup_time_us <= MAX_STARTUP_TIME_CPU_US[platform.machine()]
Exemple #6
0
def _configure_vm(microvm, network_info=None):
    """Auxiliary function for preparing microvm before measuring boottime."""
    microvm.spawn()

    # Machine configuration specified in the SLA.
    microvm.basic_config(vcpu_count=1, mem_size_mib=128)
    if network_info:
        _tap, _, _ = microvm.ssh_network_config(network_info["config"],
                                                network_info["iface_id"])

    # Configure logging.
    log_fifo_path = os.path.join(microvm.path,
                                 'log_fifo' + microvm.id.split('-')[0])
    metrics_fifo_path = os.path.join(microvm.path,
                                     'metrics_fifo' + microvm.id.split('-')[0])
    log_fifo = log_tools.Fifo(log_fifo_path)
    metrics_fifo = log_tools.Fifo(metrics_fifo_path)

    response = microvm.logger.put(
        log_fifo=microvm.create_jailed_resource(log_fifo.path),
        metrics_fifo=microvm.create_jailed_resource(metrics_fifo.path),
        level='Info',
        show_level=False,
        show_log_origin=False)
    assert microvm.api_session.is_status_no_content(response.status_code)

    microvm.start()
    return log_fifo, _tap if network_info else None
Exemple #7
0
def test_api_patch_post_boot(test_microvm_with_api):
    """Test PATCH updates after the microvm boots."""
    test_microvm = test_microvm_with_api
    test_microvm.spawn()

    # Sets up the microVM with 2 vCPUs, 256 MiB of RAM, 1 network iface and
    # a root file system with the rw permission.
    test_microvm.basic_config()

    fs1 = drive_tools.FilesystemFile(
        os.path.join(test_microvm.fsfiles, 'scratch')
    )
    response = test_microvm.drive.put(
        drive_id='scratch',
        path_on_host=test_microvm.create_jailed_resource(fs1.path),
        is_root_device=False,
        is_read_only=False
    )
    assert test_microvm.api_session.is_status_no_content(response.status_code)

    # Configure logging.
    log_fifo_path = os.path.join(test_microvm.path, 'log_fifo')
    metrics_fifo_path = os.path.join(test_microvm.path, 'metrics_fifo')
    log_fifo = log_tools.Fifo(log_fifo_path)
    metrics_fifo = log_tools.Fifo(metrics_fifo_path)

    response = test_microvm.logger.put(
        log_fifo=test_microvm.create_jailed_resource(log_fifo.path),
        metrics_fifo=test_microvm.create_jailed_resource(metrics_fifo.path)
    )
    assert test_microvm.api_session.is_status_no_content(response.status_code)

    iface_id = '1'
    tapname = test_microvm.id[:8] + 'tap' + iface_id
    tap1 = net_tools.Tap(tapname, test_microvm.jailer.netns)
    response = test_microvm.network.put(
        iface_id=iface_id,
        host_dev_name=tap1.name,
        guest_mac='06:00:00:00:00:01'
    )
    assert test_microvm.api_session.is_status_no_content(response.status_code)

    test_microvm.start()

    # Partial updates to the boot source are not allowed.
    response = test_microvm.boot.patch(
        kernel_image_path='otherfile'
    )
    assert test_microvm.api_session.is_status_bad_request(response.status_code)
    assert "Invalid request method" in response.text

    # Partial updates to the machine configuration are not allowed.
    response = test_microvm.machine_cfg.patch(vcpu_count=4)
    assert test_microvm.api_session.is_status_bad_request(response.status_code)
    assert "Invalid request method" in response.text

    # Partial updates to the logger configuration are not allowed.
    response = test_microvm.logger.patch(level='Error')
    assert test_microvm.api_session.is_status_bad_request(response.status_code)
    assert "Invalid request method" in response.text
Exemple #8
0
def _test_log_config(microvm,
                     log_level='Info',
                     show_level=True,
                     show_origin=True,
                     options=[]):
    """Exercises different scenarios for testing the logging config."""
    microvm.spawn()

    microvm.basic_config()

    # Configure logging.
    log_fifo_path = os.path.join(microvm.path, 'log_fifo')
    metrics_fifo_path = os.path.join(microvm.path, 'metrics_fifo')
    log_fifo = log_tools.Fifo(log_fifo_path)
    metrics_fifo = log_tools.Fifo(metrics_fifo_path)

    response = microvm.logger.put(
        log_fifo=microvm.create_jailed_resource(log_fifo.path),
        metrics_fifo=microvm.create_jailed_resource(metrics_fifo.path),
        level=log_level,
        show_level=show_level,
        show_log_origin=show_origin,
        options=options)
    assert microvm.api_session.is_status_no_content(response.status_code)

    microvm.start()

    lines = log_fifo.sequential_reader(20)
    for idx, line in enumerate(lines):
        if idx == 0:
            assert line.startswith("Running Firecracker")
            continue
        check_log_message(line, microvm.id, log_level, show_level, show_origin)
def test_config_start_no_api(test_microvm_with_ssh, vm_config_file):
    """Test microvm start when API server thread is disabled."""
    test_microvm = test_microvm_with_ssh

    log_fifo_path = os.path.join(test_microvm.path, 'log_fifo')
    metrics_fifo_path = os.path.join(test_microvm.path, 'metrics_fifo')
    log_fifo = log_tools.Fifo(log_fifo_path)
    metrics_fifo = log_tools.Fifo(metrics_fifo_path)
    test_microvm.create_jailed_resource(log_fifo.path, create_jail=True)
    test_microvm.create_jailed_resource(metrics_fifo.path, create_jail=True)

    _configure_vm_from_json(test_microvm, vm_config_file)
    test_microvm.no_api = True
    test_microvm.spawn()

    # Get Firecracker PID so we can check the names of threads.
    firecracker_pid = test_microvm.jailer_clone_pid

    # Get names of threads in Firecracker.
    cmd = 'ps -T --no-headers -p {} | awk \'{{print $5}}\''.format(
        firecracker_pid
    )
    process = run(cmd, stdout=PIPE, stderr=PIPE, shell=True, check=True)
    threads_out_lines = process.stdout.decode('utf-8').splitlines()

    # Check that API server thread was not created.
    assert 'fc_api' not in threads_out_lines
    # Sanity check that main thread was created.
    assert 'firecracker' in threads_out_lines

    # Check that microvm was successfully booted.
    lines = log_fifo.sequential_reader(1)
    assert lines[0].startswith('Running Firecracker')
Exemple #10
0
def test_api_requests_logs(test_microvm_with_api):
    """Test that API requests are logged."""
    microvm = test_microvm_with_api
    microvm.spawn()
    microvm.basic_config()

    # Configure logging.
    log_fifo_path = os.path.join(microvm.path, 'log_fifo')
    metrics_fifo_path = os.path.join(microvm.path, 'metrics_fifo')
    log_fifo = log_tools.Fifo(log_fifo_path)
    metrics_fifo = log_tools.Fifo(metrics_fifo_path)

    response = microvm.logger.put(
        log_fifo=microvm.create_jailed_resource(log_fifo.path),
        metrics_fifo=microvm.create_jailed_resource(metrics_fifo.path),
        level='Info',
        show_level=True,
        show_log_origin=True,
        options=[])
    assert microvm.api_session.is_status_no_content(response.status_code)

    expected_log_strings = []

    # Check that a Put request on /machine-config is logged.
    response = microvm.machine_cfg.put(vcpu_count=4)
    assert microvm.api_session.is_status_no_content(response.status_code)
    # We are not interested in the actual body. Just check that the log
    # message also has the string "body" in it.
    expected_log_strings.append(
        "The API server received a synchronous Put request "
        "on \"/machine-config\" with body")

    # Check that a Get request on /machine-config is logged without the
    # body.
    response = microvm.machine_cfg.get()
    assert microvm.api_session.is_status_ok(response.status_code)
    expected_log_strings.append(
        "The API server received a synchronous Get request "
        "on \"/machine-config\".")

    # Check that all requests on /mmds are logged without the body.
    dummy_json = {'latest': {'meta-data': {'ami-id': 'dummy'}}}
    response = microvm.mmds.put(json=dummy_json)
    assert microvm.api_session.is_status_no_content(response.status_code)
    expected_log_strings.append(
        "The API server received a synchronous Put request on \"/mmds\".")

    response = microvm.mmds.patch(json=dummy_json)
    assert microvm.api_session.is_status_no_content(response.status_code)
    expected_log_strings.append(
        "The API server received a synchronous Patch request on \"/mmds\".")

    response = microvm.mmds.get()
    assert microvm.api_session.is_status_ok(response.status_code)
    expected_log_strings.append(
        "The API server received a synchronous Get request on \"/mmds\".")

    assert log_file_contains_strings(log_fifo, expected_log_strings)
Exemple #11
0
def _test_startup_time(microvm):
    microvm.spawn()

    microvm.basic_config(vcpu_count=2, mem_size_mib=1024)

    # Configure metrics.
    metrics_fifo_path = os.path.join(microvm.path, 'metrics_fifo')
    metrics_fifo = log_tools.Fifo(metrics_fifo_path)

    response = microvm.metrics.put(
        metrics_path=microvm.create_jailed_resource(metrics_fifo.path))
    assert microvm.api_session.is_status_no_content(response.status_code)

    microvm.start()
    time.sleep(0.4)

    # The metrics fifo should be at index 1.
    # Since metrics are flushed at InstanceStart, the first line will suffice.
    lines = metrics_fifo.sequential_reader(1)
    metrics = json.loads(lines[0])
    startup_time_us = metrics['api_server']['process_startup_time_us']
    cpu_startup_time_us = metrics['api_server']['process_startup_time_cpu_us']

    print('Process startup time is: {} us ({} CPU us)'.format(
        startup_time_us, cpu_startup_time_us))

    max_startup_time = MAX_STARTUP_TIME_CPU_US[platform.machine()]
    assert cpu_startup_time_us > 0
    assert cpu_startup_time_us <= max_startup_time

    return f"{cpu_startup_time_us} us", f"<= {max_startup_time} us"
Exemple #12
0
def test_reboot(test_microvm_with_ssh, network_config):
    """Test reboot from guest kernel."""
    test_microvm = test_microvm_with_ssh
    test_microvm.jailer.daemonize = False
    test_microvm.spawn()

    # We don't need to monitor the memory for this test because we are
    # just rebooting and the process dies before pmap gets the RSS.
    test_microvm.memory_monitor = None

    # Set up the microVM with 4 vCPUs, 256 MiB of RAM, 0 network ifaces, and
    # a root file system with the rw permission. The network interfaces is
    # added after we get a unique MAC and IP.
    test_microvm.basic_config(vcpu_count=4)
    _tap, _, _ = test_microvm.ssh_network_config(network_config, '1')

    # Configure metrics system.
    metrics_fifo_path = os.path.join(test_microvm.path, 'metrics_fifo')
    metrics_fifo = log_tools.Fifo(metrics_fifo_path)
    response = test_microvm.metrics.put(
        metrics_path=test_microvm.create_jailed_resource(metrics_fifo.path)
    )
    assert test_microvm.api_session.is_status_no_content(response.status_code)

    test_microvm.start()

    # Get Firecracker PID so we can count the number of threads.
    firecracker_pid = test_microvm.jailer_clone_pid

    # Get number of threads in Firecracker
    cmd = 'ps -o nlwp {} | tail -1 | awk \'{{print $1}}\''.format(
        firecracker_pid
    )
    _, stdout, _ = utils.run_cmd(cmd)
    nr_of_threads = stdout.rstrip()
    assert int(nr_of_threads) == 6

    # Consume existing metrics
    lines = metrics_fifo.sequential_reader(100)
    assert len(lines) == 1
    # Rebooting Firecracker sends an exit event and should gracefully kill.
    # the instance.
    ssh_connection = net_tools.SSHConnection(test_microvm.ssh_config)

    ssh_connection.execute_command("reboot")

    while True:
        # Pytest's timeout will kill the test even if the loop doesn't exit.
        try:
            os.kill(firecracker_pid, 0)
            time.sleep(0.01)
        except OSError:
            break

    # Consume existing metrics
    lines = metrics_fifo.sequential_reader(100)
    assert len(lines) == 1

    # Make sure that the FC process was not killed by a seccomp fault
    assert json.loads(lines[0])["seccomp"]["num_faults"] == 0
def test_flush_metrics(test_microvm_with_api):
    """Check the `FlushMetrics` vmm action."""
    microvm = test_microvm_with_api
    microvm.spawn()
    microvm.basic_config()

    # Configure metrics system.
    metrics_fifo_path = os.path.join(microvm.path, 'metrics_fifo')
    metrics_fifo = log_tools.Fifo(metrics_fifo_path)

    response = microvm.metrics.put(
        metrics_path=microvm.create_jailed_resource(metrics_fifo.path))
    assert microvm.api_session.is_status_no_content(response.status_code)

    microvm.start()

    res = metrics_fifo.sequential_reader(1)
    metrics = json.loads(res[0])

    exp_keys = [
        'utc_timestamp_ms', 'api_server', 'block', 'get_api_requests', 'i8042',
        'logger', 'mmds', 'net', 'patch_api_requests', 'put_api_requests',
        'rtc', 'seccomp', 'vcpu', 'vmm', 'uart', 'signals', 'vsock'
    ]

    assert set(metrics.keys()) == set(exp_keys)

    microvm.flush_metrics(metrics_fifo)
Exemple #14
0
def _test_log_config(microvm,
                     log_level='Info',
                     show_level=True,
                     show_origin=True):
    """Exercises different scenarios for testing the logging config."""
    microvm.spawn(create_logger=False)

    # Configure logging.
    log_fifo_path = os.path.join(microvm.path, 'log_fifo')
    log_fifo = log_tools.Fifo(log_fifo_path)
    response = microvm.logger.put(log_path=microvm.create_jailed_resource(
        log_fifo.path),
                                  level=log_level,
                                  show_level=show_level,
                                  show_log_origin=show_origin)
    assert microvm.api_session.is_status_no_content(response.status_code)

    microvm.start_console_logger(log_fifo)

    microvm.basic_config()
    microvm.start()

    lines = microvm.log_data.splitlines()
    for idx, line in enumerate(lines):
        if idx == 0:
            assert line.startswith("Running Firecracker")
            continue
        check_log_message_format(line, microvm.id, log_level, show_level,
                                 show_origin)
Exemple #15
0
def test_sigbus_sigsegv(test_microvm_with_api, signum):
    """Test signal handling for `SIGBUS` and `SIGSEGV`."""
    test_microvm = test_microvm_with_api
    test_microvm.spawn()

    # We don't need to monitor the memory for this test.
    test_microvm.memory_events_queue = None

    test_microvm.basic_config()

    # Configure logging.
    log_fifo_path = os.path.join(test_microvm.path, 'log_fifo')
    log_fifo = log_tools.Fifo(log_fifo_path)

    response = test_microvm.logger.put(
        log_fifo=test_microvm.create_jailed_resource(log_fifo.path),
        level='Error',
        show_level=False,
        show_log_origin=False)
    assert test_microvm.api_session.is_status_no_content(response.status_code)

    test_microvm.start()
    firecracker_pid = int(test_microvm.jailer_clone_pid)

    sleep(0.5)
    os.kill(firecracker_pid, signum)

    msg = 'Shutting down VM after intercepting signal {}'.format(signum)
    lines = log_fifo.sequential_reader(5)
    msg_found = False
    for line in lines:
        if msg in line:
            msg_found = True
            break
    assert msg_found
def test_failing_filter(test_microvm_with_api):
    """Test --seccomp-filter, denying some needed syscalls."""
    test_microvm = test_microvm_with_api

    _custom_filter_setup(
        test_microvm, """{
        "Vmm": {
            "default_action": "allow",
            "filter_action": "trap",
            "filter": []
        },
        "Api": {
            "default_action": "allow",
            "filter_action": "trap",
            "filter": []
        },
        "Vcpu": {
            "default_action": "allow",
            "filter_action": "trap",
            "filter": [
                {
                    "syscall": "ioctl"
                }
            ]
        }
    }""".encode('utf-8'))

    test_microvm.spawn()

    test_microvm.basic_config(vcpu_count=1)

    metrics_fifo_path = os.path.join(test_microvm.path, 'metrics_fifo')
    metrics_fifo = log_tools.Fifo(metrics_fifo_path)
    response = test_microvm.metrics.put(
        metrics_path=test_microvm.create_jailed_resource(metrics_fifo.path))
    assert test_microvm.api_session.is_status_no_content(response.status_code)

    # Start the VM with error checking off, because it will fail.
    test_microvm.start(check=False)

    # Give time for the process to get killed
    time.sleep(1)

    # Check the logger output
    ioctl_num = 16 if platform.machine() == "x86_64" else 29
    assert "Shutting down VM after intercepting a bad syscall ({})".format(
        str(ioctl_num)) in test_microvm.log_data

    # Check the metrics
    lines = metrics_fifo.sequential_reader(100)

    num_faults = 0
    for line in lines:
        num_faults += json.loads(line)["seccomp"]["num_faults"]

    assert num_faults == 1

    # assert that the process was killed
    assert not psutil.pid_exists(test_microvm.jailer_clone_pid)
Exemple #17
0
def test_vsock_epipe(test_microvm_with_api, network_config, bin_vsock_path,
                     test_fc_session_root_path):
    """
    Vsock negative test to validate SIGPIPE/EPIPE handling.

    @type: negative
    """
    vm = test_microvm_with_api
    vm.spawn()

    vm.basic_config()
    _tap, _, _ = vm.ssh_network_config(network_config, '1')
    vm.vsock.put(vsock_id="vsock0",
                 guest_cid=3,
                 uds_path="/{}".format(VSOCK_UDS_PATH))

    # Configure metrics to assert against `sigpipe` count.
    metrics_fifo_path = os.path.join(vm.path, 'metrics_fifo')
    metrics_fifo = log_tools.Fifo(metrics_fifo_path)
    response = vm.metrics.put(
        metrics_path=vm.create_jailed_resource(metrics_fifo.path))
    assert vm.api_session.is_status_no_content(response.status_code)

    vm.start()

    # Generate the random data blob file.
    blob_path, blob_hash = make_blob(test_fc_session_root_path)
    vm_blob_path = "/tmp/vsock/test.blob"

    # Set up a tmpfs drive on the guest, so we can copy the blob there.
    # Guest-initiated connections (echo workers) will use this blob.
    conn = SSHConnection(vm.ssh_config)
    cmd = "mkdir -p /tmp/vsock"
    cmd += " && mount -t tmpfs tmpfs -o size={} /tmp/vsock".format(BLOB_SIZE +
                                                                   1024 * 1024)
    ecode, _, _ = conn.execute_command(cmd)
    assert ecode == 0

    # Copy `vsock_helper` and the random blob to the guest.
    vsock_helper = bin_vsock_path
    conn.scp_file(vsock_helper, '/bin/vsock_helper')
    conn.scp_file(blob_path, vm_blob_path)

    path = os.path.join(vm.jailer.chroot_path(), VSOCK_UDS_PATH)
    # Negative test for host-initiated connections that
    # are closed with in flight data.
    negative_test_host_connections(vm, path, blob_path, blob_hash)

    metrics = vm.flush_metrics(metrics_fifo)
    # Validate that at least 1 `SIGPIPE` signal was received.
    # Since we are reusing the existing echo server which triggers
    # reads/writes on the UDS backend connections, these might be closed
    # before a read() or a write() is about to be performed by the emulation.
    # The test uses 100 connections it is enough to close at least one
    # before write().
    #
    # If this ever fails due to 100 closes before read() we must
    # add extra tooling that will trigger only writes().
    assert metrics['signals']['sigpipe'] > 0
Exemple #18
0
def test_flush_metrics(test_microvm_with_api):
    """
    Check the `FlushMetrics` vmm action.

    @type: functional
    """
    microvm = test_microvm_with_api
    microvm.spawn()
    microvm.basic_config()

    # Configure metrics system.
    metrics_fifo_path = os.path.join(microvm.path, "metrics_fifo")
    metrics_fifo = log_tools.Fifo(metrics_fifo_path)

    response = microvm.metrics.put(
        metrics_path=microvm.create_jailed_resource(metrics_fifo.path)
    )
    assert microvm.api_session.is_status_no_content(response.status_code)

    microvm.start()

    metrics = microvm.flush_metrics(metrics_fifo)

    exp_keys = [
        "utc_timestamp_ms",
        "api_server",
        "balloon",
        "block",
        "deprecated_api",
        "get_api_requests",
        "i8042",
        "latencies_us",
        "logger",
        "mmds",
        "net",
        "patch_api_requests",
        "put_api_requests",
        "seccomp",
        "vcpu",
        "vmm",
        "uart",
        "signals",
        "vsock",
    ]

    if platform.machine() == "aarch64":
        exp_keys.append("rtc")

    assert set(metrics.keys()) == set(exp_keys)

    utc_time = datetime.datetime.now(datetime.timezone.utc)
    utc_timestamp_ms = math.floor(utc_time.timestamp() * 1000)

    # Assert that the absolute difference is less than 1 second, to check that
    # the reported utc_timestamp_ms is actually a UTC timestamp from the Unix
    # Epoch.Regression test for:
    # https://github.com/firecracker-microvm/firecracker/issues/2639
    assert abs(utc_timestamp_ms - metrics["utc_timestamp_ms"]) < 1000
Exemple #19
0
    def build_from_snapshot(self,
                            snapshot: Snapshot,
                            resume=False,
                            # Enable incremental snapshot capability.
                            diff_snapshots=False,
                            use_ramdisk=False,
                            fc_binary=None, jailer_binary=None,
                            daemonize=True):
        """Build a microvm from a snapshot artifact."""
        vm = init_microvm(self.root_path, self.bin_cloner_path,
                          fc_binary, jailer_binary,)
        vm.jailer.daemonize = daemonize
        vm.spawn(log_level='Error', use_ramdisk=use_ramdisk)
        vm.api_session.untime()

        metrics_file_path = os.path.join(vm.path, 'metrics.log')
        metrics_fifo = log_tools.Fifo(metrics_file_path)
        response = vm.metrics.put(
            metrics_path=vm.create_jailed_resource(metrics_fifo.path)
        )
        assert vm.api_session.is_status_no_content(response.status_code)

        # Hardlink all the snapshot files into the microvm jail.
        jailed_mem = vm.copy_to_jail_ramfs(snapshot.mem) if use_ramdisk else \
            vm.create_jailed_resource(snapshot.mem)
        jailed_vmstate = vm.copy_to_jail_ramfs(snapshot.vmstate) \
            if use_ramdisk else vm.create_jailed_resource(snapshot.vmstate)

        assert len(snapshot.disks) > 0, "Snapshot requires at least one disk."
        _jailed_disks = []
        for disk in snapshot.disks:
            _jailed_disks.append(vm.copy_to_jail_ramfs(disk) if use_ramdisk
                                 else vm.create_jailed_resource(disk))

        vm.ssh_config['ssh_key_path'] = snapshot.ssh_key.local_path()

        # Create network interfaces.
        for iface in snapshot.net_ifaces:
            vm.create_tap_and_ssh_config(host_ip=iface.host_ip,
                                         guest_ip=iface.guest_ip,
                                         netmask_len=iface.netmask,
                                         tapname=iface.tap_name)
        response = vm.snapshot.load(mem_file_path=jailed_mem,
                                    snapshot_path=jailed_vmstate,
                                    diff=diff_snapshots,
                                    resume=resume)
        status_ok = vm.api_session.is_status_no_content(response.status_code)

        # Verify response status and cleanup if needed before assert.
        if not status_ok:
            # Destroy VM here before we assert.
            vm.kill()
            del vm

        assert status_ok, response.text

        # Return a resumed microvm.
        return vm, metrics_fifo
Exemple #20
0
def _test_microvm_boottime(
        microvm,
        net_config
):
    """Assert that we meet the minimum boot time.

    TODO: Should use a microVM with the `boottime` capability.
    """
    microvm.spawn()

    microvm.basic_config(
        vcpu_count=2,
        mem_size_mib=1024
    )
    if net_config:
        _tap, _, _ = microvm.ssh_network_config(net_config, '1')

    # Configure logging.
    log_fifo_path = os.path.join(microvm.path, 'log_fifo')
    metrics_fifo_path = os.path.join(microvm.path, 'metrics_fifo')
    log_fifo = log_tools.Fifo(log_fifo_path)
    metrics_fifo = log_tools.Fifo(metrics_fifo_path)

    response = microvm.logger.put(
        log_fifo=microvm.create_jailed_resource(log_fifo.path),
        metrics_fifo=microvm.create_jailed_resource(metrics_fifo.path),
        level='Warning',
        show_level=False,
        show_log_origin=False
    )
    assert microvm.api_session.is_good_response(response.status_code)

    microvm.start()
    time.sleep(0.4)
    lines = log_fifo.sequential_reader(20)

    boot_time_us = 0
    for line in lines:
        timestamps = re.findall(TIMESTAMP_LOG_REGEX, line)
        if timestamps:
            boot_time_us = int(timestamps[0])

    assert boot_time_us > 0
    assert boot_time_us < MAX_BOOT_TIME_US
    return boot_time_us
def _test_snapshot_create_latency(context):
    logger = context.custom['logger']
    vm_builder = context.custom['builder']
    snapshot_type = context.custom['snapshot_type']
    enable_diff_snapshots = snapshot_type == SnapshotType.DIFF

    logger.info("""Measuring snapshot create({}) latency for microvm: \"{}\",
kernel {}, disk {} """.format(snapshot_type,
                              context.microvm.name(),
                              context.kernel.name(),
                              context.disk.name()))

    # Create a rw copy artifact.
    rw_disk = context.disk.copy()
    # Get ssh key from read-only artifact.
    ssh_key = context.disk.ssh_key()

    # Measure a burst of snapshot create calls.
    for i in range(SAMPLE_COUNT):
        # Create a fresh microvm from aftifacts.
        vm = vm_builder.build(kernel=context.kernel,
                              disks=[rw_disk],
                              ssh_key=ssh_key,
                              config=context.microvm,
                              enable_diff_snapshots=enable_diff_snapshots)

        # Configure metrics system.
        metrics_fifo_path = os.path.join(vm.path, 'metrics_fifo')
        metrics_fifo = log_tools.Fifo(metrics_fifo_path)

        response = vm.metrics.put(
            metrics_path=vm.create_jailed_resource(metrics_fifo.path)
        )
        assert vm.api_session.is_status_no_content(response.status_code)

        vm.start()

        # Create a snapshot builder from a microvm.
        snapshot_builder = SnapshotBuilder(vm)
        snapshot_builder.create([rw_disk],
                                ssh_key,
                                snapshot_type)
        metrics = vm.flush_metrics(metrics_fifo)

        if snapshot_type == SnapshotType.FULL:
            value = metrics['latencies_us']['full_create_snapshot']
            baseline = CREATE_LATENCY_BASELINES[context.microvm.name()]['FULL']
        else:
            value = metrics['latencies_us']['diff_create_snapshot']
            baseline = CREATE_LATENCY_BASELINES[context.microvm.name()]['DIFF']

        value = value / USEC_IN_MSEC

        assert baseline > value, "CreateSnapshot latency degraded."

        logger.info("Latency {}/3: {} ms".format(i + 1, value))
        vm.kill()
Exemple #22
0
def test_serial_block(test_microvm_with_api, network_config):
    """
    Test that writing to stdout never blocks the vCPU thread.

    @type: functional
    """
    test_microvm = test_microvm_with_api
    test_microvm.jailer.daemonize = False
    test_microvm.spawn()
    # Set up the microVM with 1 vCPU so we make sure the vCPU thread
    # responsible for the SSH connection will also run the serial.
    test_microvm.basic_config(
        vcpu_count=1,
        mem_size_mib=512,
        boot_args="console=ttyS0 reboot=k panic=1 pci=off",
    )

    _tap, _, _ = test_microvm.ssh_network_config(network_config, "1")

    # Configure the metrics.
    metrics_fifo_path = os.path.join(test_microvm.path, "metrics_fifo")
    metrics_fifo = log_tools.Fifo(metrics_fifo_path)
    response = test_microvm.metrics.put(
        metrics_path=test_microvm.create_jailed_resource(metrics_fifo.path))
    assert test_microvm.api_session.is_status_no_content(response.status_code)

    test_microvm.start()
    ssh_connection = net_tools.SSHConnection(test_microvm.ssh_config)

    # Get an initial reading of missed writes to the serial.
    fc_metrics = test_microvm.flush_metrics(metrics_fifo)
    init_count = fc_metrics["uart"]["missed_write_count"]

    screen_pid = test_microvm.screen_pid
    # Stop `screen` process which captures stdout so we stop consuming stdout.
    subprocess.check_call("kill -s STOP {}".format(screen_pid), shell=True)

    # Generate a random text file.
    exit_code, _, _ = ssh_connection.execute_command(
        "base64 /dev/urandom | head -c 100000 > file.txt")

    # Dump output to terminal
    exit_code, _, _ = ssh_connection.execute_command(
        "cat file.txt > /dev/ttyS0")
    assert exit_code == 0

    # Check that the vCPU isn't blocked.
    exit_code, _, _ = ssh_connection.execute_command("cd /")
    assert exit_code == 0

    # Check the metrics to see if the serial missed bytes.
    fc_metrics = test_microvm.flush_metrics(metrics_fifo)
    last_count = fc_metrics["uart"]["missed_write_count"]

    # Should be significantly more than before the `cat` command.
    assert last_count - init_count > 10000
def test_config_start_no_api(test_microvm_with_ssh, vm_config_file):
    """Test microvm start when API server thread is disabled."""
    test_microvm = test_microvm_with_ssh

    log_fifo_path = os.path.join(test_microvm.path, 'log_fifo')
    metrics_fifo_path = os.path.join(test_microvm.path, 'metrics_fifo')
    log_fifo = log_tools.Fifo(log_fifo_path)
    metrics_fifo = log_tools.Fifo(metrics_fifo_path)
    test_microvm.create_jailed_resource(log_fifo.path, create_jail=True)
    test_microvm.create_jailed_resource(metrics_fifo.path, create_jail=True)

    _configure_vm_from_json(test_microvm, vm_config_file)
    test_microvm.jailer.extra_args.update({'no-api': None})

    test_microvm.spawn()

    # Get Firecracker PID so we can check the names of threads.
    firecracker_pid = test_microvm.jailer_clone_pid

    # Get names of threads in Firecracker.
    cmd = 'ps -T --no-headers -p {} | awk \'{{print $5}}\''.format(
        firecracker_pid)

    # Retry running 'ps' in case it failed to list the firecracker process
    # The regex matches any expression that contains 'firecracker' and does
    # not contain 'fc_api'
    retry_call(utils.search_output_from_cmd,
               fkwargs={
                   "cmd":
                   cmd,
                   "find_regex":
                   re.compile("^(?!.*fc_api)(?:.*)?firecracker", re.DOTALL)
               },
               exceptions=RuntimeError,
               tries=10,
               delay=1)

    # Check that microvm was successfully booted.
    lines = log_fifo.sequential_reader(1)
    assert lines[0].startswith('Running Firecracker')
Exemple #24
0
def test_cmd_line_logger(test_microvm_with_api):
    """Test logger configuration with fifo parameter."""
    microvm = test_microvm_with_api

    log_fifo_path = os.path.join(microvm.path, 'log_fifo')
    log_fifo = log_tools.Fifo(log_fifo_path)
    microvm.create_jailed_resource(log_fifo.path, create_jail=True)

    microvm.jailer.extra_args = {'log-path': 'log_fifo'}
    microvm.spawn()

    # Check that the logger was indeed configured.
    lines = log_fifo.sequential_reader(1)
    assert lines[0].startswith('Running Firecracker')
def test_config_json_start(test_microvm_with_ssh, vm_config_file):
    """Start a microvm using configuration sent as command line parameter.

    Create resources needed for the configuration of the microvm, then
    start a process which receives as command line parameter, one json
    for that configuration.
    """
    test_microvm = test_microvm_with_ssh

    test_microvm.create_jailed_resource(test_microvm.kernel_file,
                                        create_jail=True)
    test_microvm.create_jailed_resource(test_microvm.rootfs_file,
                                        create_jail=True)

    log_fifo_path = os.path.join(test_microvm.path, 'log_fifo')
    metrics_fifo_path = os.path.join(test_microvm.path, 'metrics_fifo')
    log_fifo = log_tools.Fifo(log_fifo_path)
    metrics_fifo = log_tools.Fifo(metrics_fifo_path)
    test_microvm.create_jailed_resource(log_fifo.path, create_jail=True)
    test_microvm.create_jailed_resource(metrics_fifo.path, create_jail=True)

    # vm_config_file is the source file that keeps the desired vmm
    # configuration. vm_config_path is the configuration file we
    # create inside the jail, such that it can be accessed by
    # firecracker after it starts.
    vm_config_path = os.path.join(test_microvm.path,
                                  os.path.basename(vm_config_file))
    with open(vm_config_file) as f1:
        with open(vm_config_path, "w") as f2:
            for line in f1:
                f2.write(line)
    test_microvm.create_jailed_resource(vm_config_path, create_jail=True)
    test_microvm.config_file = os.path.basename(vm_config_file)
    test_microvm.spawn()

    response = test_microvm.machine_cfg.get()
    assert test_microvm.api_session.is_status_ok(response.status_code)
Exemple #26
0
    def build_from_snapshot(
            self,
            snapshot: Snapshot,
            host_ip=None,
            guest_ip=None,
            netmask_len=None,
            resume=False,
            # Enable incremental snapshot capability.
            enable_diff_snapshots=False):
        """Build a microvm from a snapshot artifact."""
        self.init_root_path()
        vm = init_microvm(self.root_path, self.bin_cloner_path)

        vm.spawn(log_level='Info')

        metrics_file_path = os.path.join(vm.path, 'metrics.log')
        metrics_fifo = log_tools.Fifo(metrics_file_path)
        response = vm.metrics.put(
            metrics_path=vm.create_jailed_resource(metrics_fifo.path))
        assert vm.api_session.is_status_no_content(response.status_code)

        # Hardlink all the snapshot files into the microvm jail.
        jailed_mem = vm.create_jailed_resource(snapshot.mem)
        jailed_vmstate = vm.create_jailed_resource(snapshot.vmstate)
        assert len(snapshot.disks) > 0, "Snapshot requiures at least one disk."
        _jailed_disks = []
        for disk in snapshot.disks:
            _jailed_disks.append(vm.create_jailed_resource(disk))
        vm.ssh_config['ssh_key_path'] = snapshot.ssh_key

        if host_ip is not None:
            vm.create_tap_and_ssh_config(host_ip=host_ip,
                                         guest_ip=guest_ip,
                                         netmask_len=netmask_len,
                                         tapname="tap0")

        response = vm.snapshot_load.put(mem_file_path=jailed_mem,
                                        snapshot_path=jailed_vmstate,
                                        diff=enable_diff_snapshots)

        assert vm.api_session.is_status_no_content(response.status_code)

        if resume:
            # Resume microvm
            response = vm.vm.patch(state='Resumed')
            assert vm.api_session.is_status_no_content(response.status_code)

        # Return a resumed microvm.
        return vm, metrics_fifo
Exemple #27
0
    def build_from_snapshot(
            self,
            snapshot: Snapshot,
            resume=False,
            # Enable incremental snapshot capability.
            enable_diff_snapshots=False):
        """Build a microvm from a snapshot artifact."""
        vm = init_microvm(self.root_path, self.bin_cloner_path,
                          self._fc_binary, self._jailer_binary)
        vm.spawn(log_level='Info')

        metrics_file_path = os.path.join(vm.path, 'metrics.log')
        metrics_fifo = log_tools.Fifo(metrics_file_path)
        response = vm.metrics.put(
            metrics_path=vm.create_jailed_resource(metrics_fifo.path))
        assert vm.api_session.is_status_no_content(response.status_code)

        # Hardlink all the snapshot files into the microvm jail.
        jailed_mem = vm.create_jailed_resource(snapshot.mem)
        jailed_vmstate = vm.create_jailed_resource(snapshot.vmstate)
        assert len(snapshot.disks) > 0, "Snapshot requires at least one disk."
        _jailed_disks = []
        for disk in snapshot.disks:
            _jailed_disks.append(vm.create_jailed_resource(disk))
        vm.ssh_config['ssh_key_path'] = snapshot.ssh_key

        vm.create_tap_and_ssh_config(host_ip=DEFAULT_HOST_IP,
                                     guest_ip=DEFAULT_GUEST_IP,
                                     netmask_len=DEFAULT_NETMASK,
                                     tapname=DEFAULT_TAP_NAME)

        response = vm.snapshot_load.put(mem_file_path=jailed_mem,
                                        snapshot_path=jailed_vmstate,
                                        diff=enable_diff_snapshots)

        assert vm.api_session.is_status_no_content(response.status_code)

        if resume:
            # Resume microvm
            response = vm.vm.patch(state='Resumed')
            assert vm.api_session.is_status_no_content(response.status_code)

        # Reset root path so next microvm is built some place else.
        self.init_root_path()

        # Return a resumed microvm.
        return vm, metrics_fifo
Exemple #28
0
def test_no_flush(test_microvm_with_api, network_config):
    """
    Verify default block ignores flush.

    @type: functional
    """
    test_microvm = test_microvm_with_api
    test_microvm.spawn()

    test_microvm.basic_config(
        vcpu_count=1,
        add_root_device=False
    )

    _tap, _, _ = test_microvm.ssh_network_config(network_config, '1')

    # Add the block device
    test_microvm.add_drive(
        'rootfs',
        test_microvm.rootfs_file,
        root_device=True,
    )

    # Configure the metrics.
    metrics_fifo_path = os.path.join(test_microvm.path, 'metrics_fifo')
    metrics_fifo = log_tools.Fifo(metrics_fifo_path)
    response = test_microvm.metrics.put(
        metrics_path=test_microvm.create_jailed_resource(metrics_fifo.path)
    )
    assert test_microvm.api_session.is_status_no_content(response.status_code)

    test_microvm.start()

    # Verify all flush commands were ignored during boot.
    fc_metrics = test_microvm.flush_metrics(metrics_fifo)
    assert fc_metrics['block']['flush_count'] == 0

    # Have the guest drop the caches to generate flush requests.
    ssh_connection = net_tools.SSHConnection(test_microvm.ssh_config)
    cmd = "sync; echo 1 > /proc/sys/vm/drop_caches"
    _, _, stderr = ssh_connection.execute_command(cmd)
    assert stderr.read() == ''

    # Verify all flush commands were ignored even after
    # dropping the caches.
    fc_metrics = test_microvm.flush_metrics(metrics_fifo)
    assert fc_metrics['block']['flush_count'] == 0
Exemple #29
0
def test_flush(test_microvm_with_api, network_config):
    """
    Verify block with flush actually flushes.

    @type: functional
    """
    test_microvm = test_microvm_with_api
    test_microvm.spawn()

    test_microvm.basic_config(
        vcpu_count=1,
        add_root_device=False
    )

    _tap, _, _ = test_microvm.ssh_network_config(network_config, '1')

    # Add the block device with explicitly enabling flush.
    test_microvm.add_drive(
        'rootfs',
        test_microvm.rootfs_file,
        root_device=True,
        cache_type="Writeback",
    )

    # Configure metrics, to get later the `flush_count`.
    metrics_fifo_path = os.path.join(test_microvm.path, 'metrics_fifo')
    metrics_fifo = log_tools.Fifo(metrics_fifo_path)
    response = test_microvm.metrics.put(
        metrics_path=test_microvm.create_jailed_resource(metrics_fifo.path)
    )
    assert test_microvm.api_session.is_status_no_content(response.status_code)

    test_microvm.start()

    # Have the guest drop the caches to generate flush requests.
    ssh_connection = net_tools.SSHConnection(test_microvm.ssh_config)
    cmd = "sync; echo 1 > /proc/sys/vm/drop_caches"
    _, _, stderr = ssh_connection.execute_command(cmd)
    assert stderr.read() == ''

    # On average, dropping the caches right after boot generates
    # about 6 block flush requests.
    fc_metrics = test_microvm.flush_metrics(metrics_fifo)
    assert fc_metrics['block']['flush_count'] > 0
Exemple #30
0
def test_flush_metrics(test_microvm_with_api):
    """Check the `FlushMetrics` vmm action."""
    microvm = test_microvm_with_api
    microvm.spawn()
    microvm.basic_config()

    # Configure metrics system.
    metrics_fifo_path = os.path.join(microvm.path, 'metrics_fifo')
    metrics_fifo = log_tools.Fifo(metrics_fifo_path)

    response = microvm.metrics.put(
        metrics_path=microvm.create_jailed_resource(metrics_fifo.path))
    assert microvm.api_session.is_status_no_content(response.status_code)

    microvm.start()

    metrics = microvm.flush_metrics(metrics_fifo)

    exp_keys = [
        'utc_timestamp_ms', 'api_server', 'balloon', 'block',
        'get_api_requests', 'i8042', 'latencies_us', 'logger', 'mmds', 'net',
        'patch_api_requests', 'put_api_requests', 'seccomp', 'vcpu', 'vmm',
        'uart', 'signals', 'vsock'
    ]

    if platform.machine() == "aarch64":
        exp_keys.append("rtc")

    assert set(metrics.keys()) == set(exp_keys)

    utc_time = datetime.datetime.now(datetime.timezone.utc)
    utc_timestamp_ms = math.floor(utc_time.timestamp() * 1000)

    # Assert that the absolute difference is less than 1 second, to check that
    # the reported utc_timestamp_ms is actually a UTC timestamp from the Unix
    # Epoch.Regression test for:
    # https://github.com/firecracker-microvm/firecracker/issues/2639
    assert abs(utc_timestamp_ms - metrics['utc_timestamp_ms']) < 1000