def test_oci_networking_iscsi_instance(client: IntegrationInstance, tmpdir): customize_environment(client, tmpdir, configure_secondary_nics=False) result_net_files = client.execute("ls /run/net-*.conf") assert result_net_files.ok, "No net files found under /run" log = client.read_from_file("/var/log/cloud-init.log") verify_clean_log(log) assert ("opc/v2/vnics/" not in log), "vnic data was fetched and it should not have been" netplan_yaml = client.read_from_file("/etc/netplan/50-cloud-init.yaml") netplan_cfg = yaml.safe_load(netplan_yaml) configured_interfaces = extract_interface_names(netplan_cfg["network"]) assert 1 <= len(configured_interfaces ), "Expected at least 1 primary network configuration." expected_interfaces = set( re.findall(r"/run/net-(.+)\.conf", result_net_files.stdout)) for expected_interface in expected_interfaces: assert (f"Reading from /run/net-{expected_interface}.conf" in log), "Expected {expected_interface} not found in: {log}" not_found_interfaces = expected_interfaces.difference( configured_interfaces) assert not not_found_interfaces, ( f"Interfaces, {not_found_interfaces}, expected to be configured in" f" {netplan_cfg['network']}") assert client.execute("ping -c 2 canonical.com").ok
def test_device_alias(self, create_disk, client: IntegrationInstance): log = client.read_from_file("/var/log/cloud-init.log") assert ("updated disk_setup device entry 'my_alias' to '/dev/sdb'" in log) assert "changed my_alias.1 => /dev/sdb1" in log assert "changed my_alias.2 => /dev/sdb2" in log verify_clean_log(log) lsblk = json.loads(client.execute("lsblk --json")) sdb = [x for x in lsblk["blockdevices"] if x["name"] == "sdb"][0] assert len(sdb["children"]) == 2 assert sdb["children"][0]["name"] == "sdb1" assert sdb["children"][1]["name"] == "sdb2" if "mountpoint" in sdb["children"][0]: assert sdb["children"][0]["mountpoint"] == "/mnt1" assert sdb["children"][1]["mountpoint"] == "/mnt2" else: assert sdb["children"][0]["mountpoints"] == ["/mnt1"] assert sdb["children"][1]["mountpoints"] == ["/mnt2"] result = client.execute("mount -a") assert result.return_code == 0 assert result.stdout.strip() == "" assert result.stderr.strip() == "" result = client.execute("findmnt -J /mnt1") assert result.return_code == 0 result = client.execute("findmnt -J /mnt2") assert result.return_code == 0
def test_ovs_member_interfaces_not_excluded(self, client): # We need to install openvswitch for our provided network configuration # to apply (on next boot), so DHCP on our default interface to fetch it client.execute("dhclient enp5s0") client.execute("apt update -qqy") client.execute("apt-get install -qqy openvswitch-switch") # Now our networking config should successfully apply on a clean reboot client.execute("cloud-init clean --logs") client.restart() cloudinit_output = client.read_from_file("/var/log/cloud-init.log") # Confirm that the network configuration was applied successfully verify_clean_log(cloudinit_output) # Confirm that the applied network config created the OVS bridge assert "ovs-br" in client.execute("ip addr") # Test that we can ping our gateway using our bridge gateway = client.execute( "ip -4 route show default | awk '{ print $3 }'" ) ping_result = client.execute( "ping -c 1 -W 1 -I ovs-br {}".format(gateway) ) assert ping_result.ok
def _verify_first_disk_setup(self, client, log): verify_clean_log(log) lsblk = json.loads(client.execute("lsblk --json")) sdb = [x for x in lsblk["blockdevices"] if x["name"] == "sdb"][0] assert len(sdb["children"]) == 2 assert sdb["children"][0]["name"] == "sdb1" assert sdb["children"][0]["mountpoint"] == "/mnt1" assert sdb["children"][1]["name"] == "sdb2" assert sdb["children"][1]["mountpoint"] == "/mnt2"
def _verify_first_disk_setup(self, client, log): verify_clean_log(log) lsblk = json.loads(client.execute('lsblk --json')) sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0] assert len(sdb['children']) == 2 assert sdb['children'][0]['name'] == 'sdb1' assert sdb['children'][0]['mountpoint'] == '/mnt1' assert sdb['children'][1]['name'] == 'sdb2' assert sdb['children'][1]['mountpoint'] == '/mnt2'
def test_bridge(self, class_client): """Check that the given bridge is configured""" cloud_init_log = class_client.read_from_file("/var/log/cloud-init.log") verify_clean_log(cloud_init_log) # The bridge should exist assert class_client.execute("ip addr show lxdbr0") raw_network_config = class_client.execute("lxc network show lxdbr0") network_config = yaml.safe_load(raw_network_config) assert "10.100.100.1/24" == network_config["config"]["ipv4.address"]
def test_ubuntu_drivers_installed(session_cloud: IntegrationCloud): with session_cloud.launch(launch_kwargs={"instance_type": "VM.GPU2.1"}, user_data=USER_DATA) as client: log = client.read_from_file("/var/log/cloud-init.log") verify_clean_log(log) assert 1 == log.count("Installing and activating NVIDIA drivers " "(nvidia/license-accepted=True, version=latest)") result = client.execute("dpkg -l | grep nvidia") assert result.ok, "No nvidia packages found" assert re.search( r"ii\s+linux-modules-nvidia-\d+-server", result.stdout), ( f"Did not find specific nvidia drivers packages in:" f" {result.stdout}")
def test_no_problems(self, class_client: IntegrationInstance): """Test no errors, warnings, or tracebacks""" client = class_client status_file = client.read_from_file("/run/cloud-init/status.json") status_json = json.loads(status_file)["v1"] for stage in ("init", "init-local", "modules-config", "modules-final"): assert status_json[stage]["errors"] == [] result_file = client.read_from_file("/run/cloud-init/result.json") result_json = json.loads(result_file)["v1"] assert result_json["errors"] == [] log = client.read_from_file("/var/log/cloud-init.log") verify_clean_log(log)
def test_no_problems(self, class_client: IntegrationInstance): """Test no errors, warnings, or tracebacks""" client = class_client status_file = client.read_from_file('/run/cloud-init/status.json') status_json = json.loads(status_file)['v1'] for stage in ('init', 'init-local', 'modules-config', 'modules-final'): assert status_json[stage]['errors'] == [] result_file = client.read_from_file('/run/cloud-init/result.json') result_json = json.loads(result_file)['v1'] assert result_json['errors'] == [] log = client.read_from_file('/var/log/cloud-init.log') verify_clean_log(log)
def test_disk_setup_when_mounted( self, create_disk, client: IntegrationInstance ): """Test lp-1920939. We insert an extra disk into our VM, format it to have two partitions, modify our cloud config to mount devices before disk setup, and modify our userdata to setup a single partition on the disk. This allows cloud-init to attempt disk setup on a mounted partition. When blockdev is in use, it will fail with "blockdev: ioctl error on BLKRRPART: Device or resource busy" along with a warning and a traceback. When partprobe is in use, everything should work successfully. """ log = client.read_from_file("/var/log/cloud-init.log") self._verify_first_disk_setup(client, log) # Ensure NoCloud gets detected on reboot client.execute("mkdir -p /var/lib/cloud/seed/nocloud-net/") client.execute("touch /var/lib/cloud/seed/nocloud-net/meta-data") client.write_to_file( "/etc/cloud/cloud.cfg.d/99_nocloud.cfg", "datasource_list: [ NoCloud ]\n", ) # Update our userdata and cloud.cfg to mount then perform new disk # setup client.write_to_file( "/var/lib/cloud/seed/nocloud-net/user-data", UPDATED_PARTPROBE_USERDATA, ) client.execute( "sed -i 's/write-files/write-files\\n - mounts/' " "/etc/cloud/cloud.cfg" ) client.execute("cloud-init clean --logs") client.restart() # Assert new setup works as expected verify_clean_log(log) lsblk = json.loads(client.execute("lsblk --json")) sdb = [x for x in lsblk["blockdevices"] if x["name"] == "sdb"][0] assert len(sdb["children"]) == 1 assert sdb["children"][0]["name"] == "sdb1" if "mountpoint" in sdb["children"][0]: assert sdb["children"][0]["mountpoint"] == "/mnt3" else: assert sdb["children"][0]["mountpoints"] == ["/mnt3"]
def test_device_alias(self, create_disk, client: IntegrationInstance): log = client.read_from_file("/var/log/cloud-init.log") assert ("updated disk_setup device entry 'my_alias' to '/dev/sdb'" in log) assert "changed my_alias.1 => /dev/sdb1" in log assert "changed my_alias.2 => /dev/sdb2" in log verify_clean_log(log) lsblk = json.loads(client.execute("lsblk --json")) sdb = [x for x in lsblk["blockdevices"] if x["name"] == "sdb"][0] assert len(sdb["children"]) == 2 assert sdb["children"][0]["name"] == "sdb1" assert sdb["children"][0]["mountpoint"] == "/mnt1" assert sdb["children"][1]["name"] == "sdb2" assert sdb["children"][1]["mountpoint"] == "/mnt2"
def test_device_alias(self, create_disk, client: IntegrationInstance): log = client.read_from_file('/var/log/cloud-init.log') assert ( "updated disk_setup device entry 'my_alias' to '/dev/sdb'") in log assert 'changed my_alias.1 => /dev/sdb1' in log assert 'changed my_alias.2 => /dev/sdb2' in log verify_clean_log(log) lsblk = json.loads(client.execute('lsblk --json')) sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0] assert len(sdb['children']) == 2 assert sdb['children'][0]['name'] == 'sdb1' assert sdb['children'][0]['mountpoint'] == '/mnt1' assert sdb['children'][1]['name'] == 'sdb2' assert sdb['children'][1]['mountpoint'] == '/mnt2'
def test_datasource_rbx_no_stacktrace(client: IntegrationInstance): client.write_to_file( "/etc/cloud/cloud.cfg.d/90_dpkg.cfg", "datasource_list: [ RbxCloud, NoCloud, LXD ]\n", ) client.write_to_file( "/etc/cloud/ds-identify.cfg", "policy: enabled\n", ) client.execute("cloud-init clean --logs") client.restart() log = client.read_from_file("/var/log/cloud-init.log") verify_clean_log(log) assert "Failed to load metadata and userdata" not in log assert ("Getting data from <class 'cloudinit.sources.DataSourceRbxCloud." "DataSourceRbxCloud'> failed" not in log)
def test_oci_networking_iscsi_instance_secondary_vnics( client_with_secondary_vnic: IntegrationInstance, tmpdir): client = client_with_secondary_vnic customize_environment(client, tmpdir, configure_secondary_nics=True) log = client.read_from_file("/var/log/cloud-init.log") verify_clean_log(log) assert "opc/v2/vnics/" in log, f"vnics data not fetched in {log}" netplan_yaml = client.read_from_file("/etc/netplan/50-cloud-init.yaml") netplan_cfg = yaml.safe_load(netplan_yaml) configured_interfaces = extract_interface_names(netplan_cfg["network"]) assert 2 <= len( configured_interfaces ), "Expected at least 1 primary and 1 secondary network configurations" result_net_files = client.execute("ls /run/net-*.conf") expected_interfaces = set( re.findall(r"/run/net-(.+)\.conf", result_net_files.stdout)) assert len(expected_interfaces) + 1 == len(configured_interfaces) assert client.execute("ping -c 2 canonical.com").ok
def test_disk_setup_when_mounted(self, create_disk, client: IntegrationInstance): """Test lp-1920939. We insert an extra disk into our VM, format it to have two partitions, modify our cloud config to mount devices before disk setup, and modify our userdata to setup a single partition on the disk. This allows cloud-init to attempt disk setup on a mounted partition. When blockdev is in use, it will fail with "blockdev: ioctl error on BLKRRPART: Device or resource busy" along with a warning and a traceback. When partprobe is in use, everything should work successfully. """ log = client.read_from_file('/var/log/cloud-init.log') self._verify_first_disk_setup(client, log) # Update our userdata and cloud.cfg to mount then perform new disk # setup client.write_to_file( '/var/lib/cloud/seed/nocloud-net/user-data', UPDATED_PARTPROBE_USERDATA, ) client.execute("sed -i 's/write-files/write-files\\n - mounts/' " "/etc/cloud/cloud.cfg") client.execute('cloud-init clean --logs') client.restart() # Assert new setup works as expected verify_clean_log(log) lsblk = json.loads(client.execute('lsblk --json')) sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0] assert len(sdb['children']) == 1 assert sdb['children'][0]['name'] == 'sdb1' assert sdb['children'][0]['mountpoint'] == '/mnt3'
def verify_log_and_files(self, custom_client): log_content = custom_client.read_from_file("/var/log/cloud-init.log") verify_clean_log(log_content) assert NEW_CLOUD_DIR in log_content assert DEFAULT_CLOUD_DIR not in log_content assert custom_client.execute(f"test ! -d {DEFAULT_CLOUD_DIR}").ok
def test_clean_boot_of_upgraded_package(session_cloud: IntegrationCloud): source = get_validated_source(session_cloud) if not source.installs_new_version(): pytest.skip(UNSUPPORTED_INSTALL_METHOD_MSG.format(source)) return # type checking doesn't understand that skip raises if (ImageSpecification.from_os_image().release == "bionic" and session_cloud.settings.PLATFORM == "lxd_vm"): # The issues that we see on Bionic VMs don't appear anywhere # else, including when calling KVM directly. It likely has to # do with the extra lxd-agent setup happening on bionic. # Given that we still have Bionic covered on all other platforms, # the risk of skipping bionic here seems low enough. pytest.skip("Upgrade test doesn't run on LXD VMs and bionic") return launch_kwargs = { "image_id": session_cloud.released_image_id, } with session_cloud.launch( launch_kwargs=launch_kwargs, user_data=USER_DATA, ) as instance: # get pre values pre_hostname = instance.execute("hostname") pre_cloud_id = instance.execute("cloud-id") pre_result = instance.execute("cat /run/cloud-init/result.json") pre_network = instance.execute("cat /etc/netplan/50-cloud-init.yaml") pre_systemd_analyze = instance.execute("systemd-analyze") pre_systemd_blame = instance.execute("systemd-analyze blame") pre_cloud_analyze = instance.execute("cloud-init analyze show") pre_cloud_blame = instance.execute("cloud-init analyze blame") # Ensure no issues pre-upgrade log = instance.read_from_file("/var/log/cloud-init.log") assert not json.loads(pre_result)["v1"]["errors"] try: verify_clean_log(log) except AssertionError: LOG.warning("There were errors/warnings/tracebacks pre-upgrade. " "Any failures may be due to pre-upgrade problem") # Upgrade instance.install_new_cloud_init(source, take_snapshot=False) # 'cloud-init init' helps us understand if our pickling upgrade paths # have broken across re-constitution of a cached datasource. Some # platforms invalidate their datasource cache on reboot, so we run # it here to ensure we get a dirty run. assert instance.execute("cloud-init init").ok # Reboot instance.execute("hostname something-else") instance.restart() assert instance.execute("cloud-init status --wait --long").ok # get post values post_hostname = instance.execute("hostname") post_cloud_id = instance.execute("cloud-id") post_result = instance.execute("cat /run/cloud-init/result.json") post_network = instance.execute("cat /etc/netplan/50-cloud-init.yaml") post_systemd_analyze = instance.execute("systemd-analyze") post_systemd_blame = instance.execute("systemd-analyze blame") post_cloud_analyze = instance.execute("cloud-init analyze show") post_cloud_blame = instance.execute("cloud-init analyze blame") # Ensure no issues post-upgrade assert not json.loads(pre_result)["v1"]["errors"] log = instance.read_from_file("/var/log/cloud-init.log") verify_clean_log(log) # Ensure important things stayed the same assert pre_hostname == post_hostname assert pre_cloud_id == post_cloud_id try: assert pre_result == post_result except AssertionError: if instance.settings.PLATFORM == "azure": pre_json = json.loads(pre_result) post_json = json.loads(post_result) assert pre_json["v1"]["datasource"].startswith( "DataSourceAzure") assert post_json["v1"]["datasource"].startswith( "DataSourceAzure") assert pre_network == post_network # Calculate and log all the boot numbers pre_analyze_totals = [ x for x in pre_cloud_analyze.splitlines() if x.startswith("Finished stage") or x.startswith("Total Time") ] post_analyze_totals = [ x for x in post_cloud_analyze.splitlines() if x.startswith("Finished stage") or x.startswith("Total Time") ] # pylint: disable=logging-format-interpolation LOG.info( LOG_TEMPLATE.format( pre_systemd_analyze=pre_systemd_analyze, post_systemd_analyze=post_systemd_analyze, pre_systemd_blame="\n".join( pre_systemd_blame.splitlines()[:10]), post_systemd_blame="\n".join( post_systemd_blame.splitlines()[:10]), pre_analyze_totals="\n".join(pre_analyze_totals), post_analyze_totals="\n".join(post_analyze_totals), pre_cloud_blame="\n".join(pre_cloud_blame.splitlines()[:10]), post_cloud_blame="\n".join(post_cloud_blame.splitlines()[:10]), ))
def test_chef_license(client: IntegrationInstance): log = client.read_from_file("/var/log/cloud-init.log") verify_clean_log(log)
def test_lxd_datasource_discovery(client: IntegrationInstance): """Test that DataSourceLXD is detected instead of NoCloud.""" _customize_envionment(client) result = client.execute("cloud-init status --wait --long") if not result.ok: raise AssertionError("cloud-init failed:\n%s", result.stderr) if "DataSourceLXD" not in result.stdout: raise AssertionError("cloud-init did not discover DataSourceLXD", result.stdout) netplan_yaml = client.execute("cat /etc/netplan/50-cloud-init.yaml") netplan_cfg = yaml.safe_load(netplan_yaml) platform = client.settings.PLATFORM nic_dev = "eth0" if platform == "lxd_container" else "enp5s0" assert { "network": { "ethernets": { nic_dev: { "dhcp4": True } }, "version": 2 } } == netplan_cfg log = client.read_from_file("/var/log/cloud-init.log") verify_clean_log(log) result = client.execute("cloud-id") if result.stdout != "lxd": raise AssertionError("cloud-id didn't report lxd. Result: %s", result.stdout) # Validate config instance data represented data = json.loads( client.read_from_file("/run/cloud-init/instance-data.json")) v1 = data["v1"] ds_cfg = data["ds"] assert "lxd" == v1["platform"] assert "LXD socket API v. 1.0 (/dev/lxd/sock)" == v1["subplatform"] ds_cfg = json.loads(client.execute("cloud-init query ds").stdout) assert ["_doc", "_metadata_api_version", "config", "meta-data"] == sorted(list(ds_cfg.keys())) if (client.settings.PLATFORM == "lxd_vm" and ImageSpecification.from_os_image().release == "bionic"): # pycloudlib injects user.vendor_data for lxd_vm on bionic # to start the lxd-agent. # https://github.com/canonical/pycloudlib/blob/main/pycloudlib/\ # lxd/defaults.py#L13-L27 # Underscore-delimited aliases exist for any keys containing hyphens or # dots. lxd_config_keys = ["user.meta-data", "user.vendor-data"] else: lxd_config_keys = ["user.meta-data"] assert "1.0" == ds_cfg["_metadata_api_version"] assert lxd_config_keys == list(ds_cfg["config"].keys()) assert { "public-keys": v1["public_ssh_keys"][0] } == (yaml.safe_load(ds_cfg["config"]["user.meta-data"])) assert "#cloud-config\ninstance-id" in ds_cfg["meta-data"] # Assert NoCloud seed data is still present in cloud image metadata # This will start failing if we redact metadata templates from # https://cloud-images.ubuntu.com/daily/server/jammy/current/\ # jammy-server-cloudimg-amd64-lxd.tar.xz nocloud_metadata = yaml.safe_load( client.read_from_file("/var/lib/cloud/seed/nocloud-net/meta-data")) assert client.instance.name == nocloud_metadata["instance-id"] assert ( nocloud_metadata["instance-id"] == nocloud_metadata["local-hostname"]) assert v1["public_ssh_keys"][0] == nocloud_metadata["public-keys"]
def _assert_no_pickle_problems(log): assert "Failed loading pickled blob" not in log verify_clean_log(log)
def test_puppet_service(client: IntegrationInstance): """Basic test that puppet gets installed and runs.""" log = client.read_from_file("/var/log/cloud-init.log") verify_clean_log(log) assert client.execute("systemctl is-active puppet").ok assert "Running command ['puppet', 'agent'" not in log
def test_lp1886531(self, client): log_content = client.read_from_file("/var/log/cloud-init.log") verify_clean_log(log_content)