def test_sudoers_includedir(client: IntegrationInstance):
    """Ensure we don't add additional #includedir to sudoers.

    Newer versions of /etc/sudoers will use @includedir rather than
    #includedir. Ensure we handle that properly and don't include an
    additional #includedir when one isn't warranted.

    https://github.com/canonical/cloud-init/pull/783
    """
    if ImageSpecification.from_os_image().release in [
            'xenial', 'bionic', 'focal'
    ]:
        raise pytest.skip(
            'Test requires version of sudo installed on groovy and later')
    client.execute("sed -i 's/#include/@include/g' /etc/sudoers")

    sudoers = client.read_from_file('/etc/sudoers')
    if '@includedir /etc/sudoers.d' not in sudoers:
        client.execute("echo '@includedir /etc/sudoers.d' >> /etc/sudoers")
    client.instance.clean()
    client.restart()
    sudoers = client.read_from_file('/etc/sudoers')

    assert '#includedir' not in sudoers
    assert sudoers.count('includedir /etc/sudoers.d') == 1
Exemple #2
0
def test_boot_event_disabled_by_default(client: IntegrationInstance):
    log = client.read_from_file("/var/log/cloud-init.log")
    if "network config is disabled" in log:
        pytest.skip("network config disabled. Test doesn't apply")
    assert "Applying network configuration" in log
    assert "dummy0" not in client.execute("ls /sys/class/net")

    _add_dummy_bridge_to_netplan(client)
    client.execute("rm /var/log/cloud-init.log")

    client.restart()
    log2 = client.read_from_file("/var/log/cloud-init.log")

    if "cache invalid in datasource" in log2:
        # Invalid cache will get cleared, meaning we'll create a new
        # "instance" and apply networking config, so events aren't
        # really relevant here
        pytest.skip("Test only valid for existing instances")

    # We attempt to apply network config twice on every boot.
    # Ensure neither time works.
    assert 2 == len(
        re.findall(r"Event Denied: scopes=\['network'\] EventType=boot[^-]",
                   log2))
    assert 2 == log2.count(
        "Event Denied: scopes=['network'] EventType=boot-legacy")
    assert 2 == log2.count("No network config applied. Neither a new instance"
                           " nor datasource network update allowed")

    assert "dummy0" in client.execute("ls /sys/class/net")
Exemple #3
0
def _customize_envionment(client: IntegrationInstance):
    # Assert our platform can detect LXD during sytemd generator timeframe.
    ds_id_log = client.execute("cat /run/cloud-init/ds-identify.log").stdout
    assert "check for 'LXD' returned found" in ds_id_log

    # At some point Jammy will fail this test. We want to be informed
    # when Jammy images no longer ship NoCloud template files (LP: #1958460).
    assert "check for 'NoCloud' returned found" in ds_id_log
    if client.settings.PLATFORM == "lxd_vm":
        # ds-identify runs at systemd generator time before /dev/lxd/sock.
        # Assert we can expected artifact which indicates LXD is viable.
        result = client.execute("cat /sys/class/dmi/id/board_name")
        if not result.ok:
            raise AssertionError(
                "Missing expected /sys/class/dmi/id/board_name")
        if "LXD" != result.stdout:
            raise AssertionError(f"DMI board_name is not LXD: {result.stdout}")

    # Having multiple datasources prevents ds-identify from short-circuiting
    # detection logic with a log like:
    #     single entry in datasource_list (LXD) use that.
    # Also, NoCloud is detected during init-local timeframe.

    # If there is a race on VMs where /dev/lxd/sock is not setup in init-local
    # cloud-init will fallback to NoCloud and fail this test.
    client.write_to_file(
        "/etc/cloud/cloud.cfg.d/99-detect-lxd-first.cfg",
        "datasource_list: [LXD, NoCloud]\n",
    )
    client.execute("cloud-init clean --logs")
    client.restart()
    def test_device_alias(self, create_disk, client: IntegrationInstance):
        log = client.read_from_file("/var/log/cloud-init.log")
        assert ("updated disk_setup device entry 'my_alias' to '/dev/sdb'"
                in log)
        assert "changed my_alias.1 => /dev/sdb1" in log
        assert "changed my_alias.2 => /dev/sdb2" in log
        verify_clean_log(log)

        lsblk = json.loads(client.execute("lsblk --json"))
        sdb = [x for x in lsblk["blockdevices"] if x["name"] == "sdb"][0]
        assert len(sdb["children"]) == 2
        assert sdb["children"][0]["name"] == "sdb1"
        assert sdb["children"][1]["name"] == "sdb2"
        if "mountpoint" in sdb["children"][0]:
            assert sdb["children"][0]["mountpoint"] == "/mnt1"
            assert sdb["children"][1]["mountpoint"] == "/mnt2"
        else:
            assert sdb["children"][0]["mountpoints"] == ["/mnt1"]
            assert sdb["children"][1]["mountpoints"] == ["/mnt2"]
        result = client.execute("mount -a")
        assert result.return_code == 0
        assert result.stdout.strip() == ""
        assert result.stderr.strip() == ""
        result = client.execute("findmnt -J /mnt1")
        assert result.return_code == 0
        result = client.execute("findmnt -J /mnt2")
        assert result.return_code == 0
def test_hotplug_add_remove(client: IntegrationInstance):
    ips_before = _get_ip_addr(client)
    log = client.read_from_file("/var/log/cloud-init.log")
    assert "Exiting hotplug handler" not in log
    assert client.execute(
        "test -f /etc/udev/rules.d/10-cloud-init-hook-hotplug.rules").ok

    # Add new NIC
    added_ip = client.instance.add_network_interface()
    _wait_till_hotplug_complete(client, expected_runs=1)
    ips_after_add = _get_ip_addr(client)
    new_addition = [ip for ip in ips_after_add if ip.ip4 == added_ip][0]

    assert len(ips_after_add) == len(ips_before) + 1
    assert added_ip not in [ip.ip4 for ip in ips_before]
    assert added_ip in [ip.ip4 for ip in ips_after_add]
    assert new_addition.state == "UP"

    netplan_cfg = client.read_from_file("/etc/netplan/50-cloud-init.yaml")
    config = yaml.safe_load(netplan_cfg)
    assert new_addition.interface in config["network"]["ethernets"]

    # Remove new NIC
    client.instance.remove_network_interface(added_ip)
    _wait_till_hotplug_complete(client, expected_runs=2)
    ips_after_remove = _get_ip_addr(client)
    assert len(ips_after_remove) == len(ips_before)
    assert added_ip not in [ip.ip4 for ip in ips_after_remove]

    netplan_cfg = client.read_from_file("/etc/netplan/50-cloud-init.yaml")
    config = yaml.safe_load(netplan_cfg)
    assert new_addition.interface not in config["network"]["ethernets"]

    assert "enabled" == client.execute(
        "cloud-init devel hotplug-hook -s net query")
Exemple #6
0
def test_oci_networking_iscsi_instance(client: IntegrationInstance, tmpdir):
    customize_environment(client, tmpdir, configure_secondary_nics=False)
    result_net_files = client.execute("ls /run/net-*.conf")
    assert result_net_files.ok, "No net files found under /run"

    log = client.read_from_file("/var/log/cloud-init.log")
    verify_clean_log(log)

    assert ("opc/v2/vnics/"
            not in log), "vnic data was fetched and it should not have been"

    netplan_yaml = client.read_from_file("/etc/netplan/50-cloud-init.yaml")
    netplan_cfg = yaml.safe_load(netplan_yaml)
    configured_interfaces = extract_interface_names(netplan_cfg["network"])
    assert 1 <= len(configured_interfaces
                    ), "Expected at least 1 primary network configuration."

    expected_interfaces = set(
        re.findall(r"/run/net-(.+)\.conf", result_net_files.stdout))
    for expected_interface in expected_interfaces:
        assert (f"Reading from /run/net-{expected_interface}.conf"
                in log), "Expected {expected_interface} not found in: {log}"

    not_found_interfaces = expected_interfaces.difference(
        configured_interfaces)
    assert not not_found_interfaces, (
        f"Interfaces, {not_found_interfaces}, expected to be configured in"
        f" {netplan_cfg['network']}")
    assert client.execute("ping -c 2 canonical.com").ok
def _check_iid_insensitive_across_kernel_upgrade(
    instance: IntegrationInstance, ):
    uuid = instance.read_from_file("/sys/class/dmi/id/product_uuid")
    assert (uuid.isupper()
            ), "Expected uppercase UUID on Ubuntu FIPS image {}".format(uuid)
    orig_kernel = instance.execute("uname -r").strip()
    assert "azure-fips" in orig_kernel
    result = instance.execute("apt-get update")
    # Install a 5.4+ kernel which provides lowercase product_uuid
    result = instance.execute("apt-get install linux-azure --assume-yes")
    if not result.ok:
        pytest.fail("Unable to install linux-azure kernel: {}".format(result))
    # Remove ubuntu-azure-fips metapkg which mandates FIPS-flavour kernel
    result = instance.execute("ua disable fips --assume-yes")
    assert result.ok, "Unable to disable fips: {}".format(result)
    instance.restart()
    new_kernel = instance.execute("uname -r").strip()
    assert orig_kernel != new_kernel
    assert "azure-fips" not in new_kernel
    assert "azure" in new_kernel
    new_uuid = instance.read_from_file("/sys/class/dmi/id/product_uuid")
    assert (
        uuid.lower() == new_uuid
    ), "Expected UUID on linux-azure to be lowercase of FIPS: {}".format(uuid)
    log = instance.read_from_file("/var/log/cloud-init.log")
    RE_CONFIG_SSH_SEMAPHORE = r"Writing.*sem/config_ssh "
    ssh_runs = len(re.findall(RE_CONFIG_SSH_SEMAPHORE, log))
    assert 1 == ssh_runs, "config_ssh ran too many times {}".format(ssh_runs)
def _customize_envionment(client: IntegrationInstance):
    client.write_to_file(
        "/etc/cloud/cloud.cfg.d/99-detect-lxd.cfg",
        "datasource_list: [LXD]\n",
    )
    client.execute("cloud-init clean --logs")
    client.restart()
Exemple #9
0
def _collect_logs(instance: IntegrationInstance, node_id: str,
                  test_failed: bool):
    """Collect logs from remote instance.

    Args:
        instance: The current IntegrationInstance to collect logs from
        node_id: The pytest representation of this test, E.g.:
            tests/integration_tests/test_example.py::TestExample.test_example
        test_failed: If test failed or not
    """
    if any([
            integration_settings.COLLECT_LOGS == 'NEVER',
            integration_settings.COLLECT_LOGS == 'ON_ERROR' and not test_failed
    ]):
        return
    instance.execute(
        'cloud-init collect-logs -u -t /var/tmp/cloud-init.tar.gz')
    node_id_path = Path(
        node_id.replace('.py',
                        '')  # Having a directory with '.py' would be weird
        .replace('::', os.path.sep)  # Turn classes/tests into paths
        .replace('[', '-')  # For parametrized names
        .replace(']', '')  # For parameterized names
    )
    log_dir = Path(integration_settings.LOCAL_LOG_PATH
                   ) / session_start_time / node_id_path
    log.info("Writing logs to %s", log_dir)
    if not log_dir.exists():
        log_dir.mkdir(parents=True)
    tarball_path = log_dir / 'cloud-init.tar.gz'
    instance.pull_file('/var/tmp/cloud-init.tar.gz', tarball_path)

    tarball = TarFile.open(str(tarball_path))
    tarball.extractall(path=str(log_dir))
    tarball_path.unlink()
Exemple #10
0
def _customize_environment(client: IntegrationInstance):
    # Insert our "disable_network_activation" file here
    client.write_to_file(
        "/etc/cloud/cloud.cfg.d/99-disable-network-activation.cfg",
        "disable_network_activation: true\n",
    )
    client.execute("cloud-init clean --logs")
    client.restart()
Exemple #11
0
def custom_client(client: IntegrationInstance,
                  tmpdir) -> Iterator[IntegrationInstance]:
    client.write_to_file(f"/etc/cloud/cloud.cfg.d/{CUSTOM_CLOUD_DIR_FN}",
                         CUSTOM_CLOUD_DIR)
    client.execute(f"rm -rf {DEFAULT_CLOUD_DIR}")  # Remove previous cloud_dir
    client.execute("cloud-init clean --logs")
    client.restart()
    yield client
def test_log_message_on_missing_version_file(client: IntegrationInstance):
    # Start by pushing a pickle so we can see the log message
    client.push_file(TEST_PICKLE, PICKLE_PATH)
    client.execute("rm /var/lib/cloud/data/python-version")
    client.restart()
    log = client.read_from_file('/var/log/cloud-init.log')
    assert ('Writing python-version file. '
            'Cache compatibility status is currently unknown.') in log
Exemple #13
0
def _customize_envionment(client: IntegrationInstance):
    # Insert our "disable_network_config" file here
    client.write_to_file(
        "/etc/cloud/cloud.cfg.d/99-disable-network-config.cfg",
        "network: {config: disabled}\n",
    )
    client.execute("cloud-init clean --logs")
    client.restart()
Exemple #14
0
def test_cache_purged_on_version_change(client: IntegrationInstance):
    # Start by pushing the invalid pickle so we'll hit an error if the
    # cache didn't actually get purged
    client.push_file(TEST_PICKLE, PICKLE_PATH)
    client.execute("echo '1.0' > /var/lib/cloud/data/python-version")
    client.restart()
    log = client.read_from_file("/var/log/cloud-init.log")
    assert "Python version change detected. Purging cache" in log
    _assert_no_pickle_problems(log)
def test_no_home_directory_created(client: IntegrationInstance):
    """Ensure cc_ssh_authkey_fingerprints doesn't create user directories"""
    home_output = client.execute("ls /home")
    assert "nch" not in home_output
    assert "system" not in home_output

    passwd = client.execute("cat /etc/passwd")
    assert re.search("^nch:", passwd, re.MULTILINE)
    assert re.search("^system:", passwd, re.MULTILINE)
Exemple #16
0
 def test_cloud_id_file_symlink(self, class_client: IntegrationInstance):
     cloud_id = class_client.execute("cloud-id").stdout
     expected_link_output = (
         "'/run/cloud-init/cloud-id' -> "
         f"'/run/cloud-init/cloud-id-{cloud_id}'"
     )
     assert expected_link_output == str(
         class_client.execute("stat -c %N /run/cloud-init/cloud-id")
     )
Exemple #17
0
def test_wakeonlan(client: IntegrationInstance):
    if ImageSpecification.from_os_image().release == "xenial":
        eni = client.execute("cat /etc/network/interfaces.d/50-cloud-init.cfg")
        assert eni.endswith(EXPECTED_ENI_END)
        return

    netplan_cfg = client.execute("cat /etc/netplan/50-cloud-init.yaml")
    netplan_yaml = yaml.safe_load(netplan_cfg)
    assert "wakeonlan" in netplan_yaml["network"]["ethernets"]["eth0"]
    assert netplan_yaml["network"]["ethernets"]["eth0"]["wakeonlan"] is True
Exemple #18
0
 def collect_logs(self, custom_client: IntegrationInstance):
     help_result = custom_client.execute("cloud-init collect-logs -h")
     assert help_result.ok, help_result.stderr
     assert f"{NEW_CLOUD_DIR}/instance/user-data.txt" in re.sub(
         r"\s+", "", help_result.stdout
     ), "user-data file not correctly render in collect-logs -h"
     collect_logs_result = custom_client.execute(
         "cloud-init collect-logs --include-userdata")
     assert (collect_logs_result.ok
             ), f"collect-logs error: {collect_logs_result.stderr}"
Exemple #19
0
def test_wakeonlan(client: IntegrationInstance):
    if ImageSpecification.from_os_image().release == 'xenial':
        eni = client.execute('cat /etc/network/interfaces.d/50-cloud-init.cfg')
        assert eni.endswith(EXPECTED_ENI_END)
        return

    netplan_cfg = client.execute('cat /etc/netplan/50-cloud-init.yaml')
    netplan_yaml = yaml.safe_load(netplan_cfg)
    assert 'wakeonlan' in netplan_yaml['network']['ethernets']['eth0']
    assert netplan_yaml['network']['ethernets']['eth0']['wakeonlan'] is True
Exemple #20
0
def test_valid_userdata(client: IntegrationInstance):
    """Test `cloud-init devel schema` with valid userdata.

    PR #575
    """
    result = client.execute("cloud-init devel schema --system")
    assert result.ok
    assert "Valid cloud-config: system userdata" == result.stdout.strip()
    result = client.execute("cloud-init status --long")
    if not result.ok:
        raise AssertionError(
            f"Unexpected error from cloud-init status: {result}")
Exemple #21
0
def test_disk_setup_no_partprobe(create_disk, client: IntegrationInstance):
    """Ensure disk setup still works as expected without partprobe."""
    # We can't do this part in a bootcmd because the path has already
    # been found by the time we get to the bootcmd
    client.execute('rm $(which partprobe)')
    client.execute('cloud-init clean --logs')
    client.restart()

    log = client.read_from_file('/var/log/cloud-init.log')
    _verify_first_disk_setup(client, log)

    assert 'partprobe' not in log
Exemple #22
0
def customize_environment(
    client: IntegrationInstance,
    tmpdir,
    configure_secondary_nics: bool = False,
):
    cfg = tmpdir.join("01_oracle_datasource.cfg")
    with open(cfg, "w") as f:
        f.write(
            DS_CFG.format(configure_secondary_nics=configure_secondary_nics))
    client.push_file(cfg, "/etc/cloud/cloud.cfg.d/01_oracle_datasource.cfg")

    client.execute("cloud-init clean --logs")
    client.restart()
Exemple #23
0
def test_invalid_userdata(client: IntegrationInstance):
    """Test `cloud-init devel schema` with invalid userdata.

    PR #575
    """
    result = client.execute("cloud-init devel schema --system")
    assert not result.ok
    assert "Cloud config schema errors" in result.stderr
    assert 'needs to begin with "#cloud-config"' in result.stderr
    result = client.execute("cloud-init status --long")
    if not result.ok:
        raise AssertionError(
            f"Unexpected error from cloud-init status: {result}")
Exemple #24
0
 def get_keys(self, class_client: IntegrationInstance):
     """Return all keys in /etc/apt/trusted.gpg.d/ and /etc/apt/trusted.gpg
     in human readable format. Mimics the output of apt-key finger
     """
     list_cmd = ' '.join(gpg.GPG_LIST) + ' '
     keys = class_client.execute(list_cmd + cc_apt_configure.APT_LOCAL_KEYS)
     print(keys)
     files = class_client.execute('ls ' +
                                  cc_apt_configure.APT_TRUSTED_GPG_DIR)
     for file in files.split():
         path = cc_apt_configure.APT_TRUSTED_GPG_DIR + file
         keys += class_client.execute(list_cmd + path) or ''
     return keys
Exemple #25
0
def _test_network_config_applied_on_reboot(client: IntegrationInstance):
    log = client.read_from_file('/var/log/cloud-init.log')
    assert 'Applying network configuration' in log
    assert 'dummy0' not in client.execute('ls /sys/class/net')

    _add_dummy_bridge_to_netplan(client)
    client.execute('rm /var/log/cloud-init.log')
    client.restart()
    log = client.read_from_file('/var/log/cloud-init.log')

    assert 'Event Allowed: scope=network EventType=boot' in log
    assert 'Applying network configuration' in log
    assert 'dummy0' not in client.execute('ls /sys/class/net')
Exemple #26
0
def test_log_message_on_missing_version_file(client: IntegrationInstance):
    # Start by pushing a pickle so we can see the log message
    client.push_file(TEST_PICKLE, PICKLE_PATH)
    client.execute("rm /var/lib/cloud/data/python-version")
    client.execute("rm /var/log/cloud-init.log")
    client.restart()
    log = client.read_from_file("/var/log/cloud-init.log")
    if "no cache found" not in log:
        # We don't expect the python version file to exist if we have no
        # pre-existing cache
        assert (
            "Writing python-version file. "
            "Cache compatibility status is currently unknown." in log
        )
def _collect_logs(instance: IntegrationInstance, node_id: str,
                  test_failed: bool):
    """Collect logs from remote instance.

    Args:
        instance: The current IntegrationInstance to collect logs from
        node_id: The pytest representation of this test, E.g.:
            tests/integration_tests/test_example.py::TestExample.test_example
        test_failed: If test failed or not
    """
    if any([
            integration_settings.COLLECT_LOGS == "NEVER",
            integration_settings.COLLECT_LOGS == "ON_ERROR"
            and not test_failed,
    ]):
        return
    instance.execute(
        "cloud-init collect-logs -u -t /var/tmp/cloud-init.tar.gz")
    node_id_path = Path(
        node_id.replace(".py",
                        "")  # Having a directory with '.py' would be weird
        .replace("::", os.path.sep)  # Turn classes/tests into paths
        .replace("[", "-")  # For parametrized names
        .replace("]", "")  # For parameterized names
    )
    log_dir = (Path(integration_settings.LOCAL_LOG_PATH) / session_start_time /
               node_id_path)
    log.info("Writing logs to %s", log_dir)

    if not log_dir.exists():
        log_dir.mkdir(parents=True)

    # Add a symlink to the latest log output directory
    last_symlink = Path(integration_settings.LOCAL_LOG_PATH) / "last"
    if os.path.islink(last_symlink):
        os.unlink(last_symlink)
    os.symlink(log_dir.parent, last_symlink)

    tarball_path = log_dir / "cloud-init.tar.gz"
    try:
        instance.pull_file("/var/tmp/cloud-init.tar.gz", tarball_path)
    except Exception as e:
        log.error("Failed to pull logs: %s", e)
        return

    tarball = TarFile.open(str(tarball_path))
    tarball.extractall(path=str(log_dir))
    tarball_path.unlink()
def test_chrony(client: IntegrationInstance):
    if client.execute("test -f /etc/chrony.conf").ok:
        chrony_conf = "/etc/chrony.conf"
    else:
        chrony_conf = "/etc/chrony/chrony.conf"
    contents = client.read_from_file(chrony_conf)
    assert "server 172.16.15.14" in contents
def test_chrony(client: IntegrationInstance):
    if client.execute('test -f /etc/chrony.conf').ok:
        chrony_conf = '/etc/chrony.conf'
    else:
        chrony_conf = '/etc/chrony/chrony.conf'
    contents = client.read_from_file(chrony_conf)
    assert 'server 172.16.15.14' in contents
Exemple #30
0
def test_chrony(client: IntegrationInstance):
    if client.execute('test -f /etc/chrony.conf').ok:
        chrony_conf = '/etc/chrony.conf'
    else:
        chrony_conf = '/etc/chrony/chrony.conf'
    contents = client.read_from_file(chrony_conf)
    assert '.pool.ntp.org' in contents