def test_persist_instance_data_writes_canonical_cloud_id_and_symlink(self): """canonical-cloud-id class attribute is set, persist to json.""" tmp = self.tmp_dir() datasource = DataSourceTestSubclassNet(self.sys_cfg, self.distro, Paths({"run_dir": tmp})) cloud_id_link = os.path.join(tmp, "cloud-id") cloud_id_file = os.path.join(tmp, "cloud-id-my-cloud") cloud_id2_file = os.path.join(tmp, "cloud-id-my-cloud2") for filename in (cloud_id_file, cloud_id_link, cloud_id2_file): self.assertFalse(os.path.exists(filename), "Unexpected link found {filename}") with mock.patch("cloudinit.sources.canonical_cloud_id", return_value="my-cloud"): datasource.get_data() self.assertEqual("my-cloud\n", util.load_file(cloud_id_link)) # A symlink with the generic /run/cloud-init/cloud-id link is present self.assertTrue(util.is_link(cloud_id_link)) # When cloud-id changes, symlink and content change with mock.patch("cloudinit.sources.canonical_cloud_id", return_value="my-cloud2"): datasource.persist_instance_data() self.assertEqual("my-cloud2\n", util.load_file(cloud_id2_file)) # Previous cloud-id-<cloud-type> file removed self.assertFalse(os.path.exists(cloud_id_file)) # Generic link persisted which contains canonical-cloud-id as content self.assertTrue(util.is_link(cloud_id_link)) self.assertEqual("my-cloud2\n", util.load_file(cloud_id_link))
def test_convert_output_kind_artifacts(self, output_kind, outfile_content, debug, capsys, tmpdir): """Assert proper output-kind artifacts are written.""" network_data = tmpdir.join("network_data") network_data.write(SAMPLE_NET_V1) distro = "centos" if output_kind == "sysconfig" else "ubuntu" args = [ f"--directory={tmpdir.strpath}", f"--network-data={network_data.strpath}", f"--distro={distro}", "--kind=yaml", f"--output-kind={output_kind}", ] if debug: args.append("--debug") params = self._replace_path_args(args, tmpdir) with mock.patch("sys.argv", ["net-convert"] + params): args = net_convert.get_parser().parse_args() with mock.patch("cloudinit.util.chownbyname") as chown: net_convert.handle_args("somename", args) for path in outfile_content: outfile = tmpdir.join(path) assert outfile_content[path] == outfile.read() if output_kind == "networkd": assert [ mock.call(outfile.strpath, "systemd-network", "systemd-network") ] == chown.call_args_list
def test_handle_args_error_on_invalid_vaname_paths( self, inst_data, varname, expected_error, caplog, tmpdir ): """Error when varname is not a valid instance-data variable path.""" instance_data = tmpdir.join("instance-data") instance_data.write(inst_data) args = self.args( debug=False, dump_all=False, format=None, instance_data=instance_data.strpath, list_keys=False, user_data=None, vendor_data=None, varname=varname, ) paths, _, _, _ = self._setup_paths(tmpdir) with mock.patch("cloudinit.cmd.query.read_cfg_paths") as m_paths: m_paths.return_value = paths with mock.patch( "cloudinit.cmd.query.addLogHandlerCLI", return_value="" ): with mock.patch("cloudinit.cmd.query.load_userdata") as m_lud: m_lud.return_value = "ud" assert 1 == query.handle_args("anyname", args) assert expected_error in caplog.text
def test_handle_modern_zfsroot(self, mount_info, zpool_info, parse_mount, is_container): devpth = "zroot/ROOT/default" disk = "da0p3" fs_type = "zfs" mount_point = "/" mount_info.return_value = (devpth, fs_type, mount_point) zpool_info.return_value = disk parse_mount.return_value = (devpth, fs_type, mount_point) cfg = {"resize_rootfs": True} def fake_stat(devpath): if devpath == disk: raise OSError("not here") FakeStat = namedtuple( "FakeStat", ["st_mode", "st_size", "st_mtime"]) # minimal stat return FakeStat(25008, 0, 1) # fake char block device with mock.patch("cloudinit.config.cc_resizefs.do_resize") as dresize: with mock.patch("cloudinit.config.cc_resizefs.os.stat") as m_stat: m_stat.side_effect = fake_stat handle("cc_resizefs", cfg, _cloud=None, log=LOG, args=[]) self.assertEqual( ("zpool", "online", "-e", "zroot", "/dev/" + disk), dresize.call_args[0][0], )
def test_handle_args_root_processes_user_data( self, ud_src, ud_expected, vd_src, vd_expected, capsys, tmpdir ): """Support reading multiple user-data file content types""" paths, run_dir, user_data, vendor_data = self._setup_paths( tmpdir, ud_val=ud_src, vd_val=vd_src ) sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE) sensitive_file.write('{"my-var": "it worked"}') args = self.args( debug=False, dump_all=True, format=None, instance_data=None, list_keys=False, user_data=user_data.strpath, vendor_data=vendor_data.strpath, varname=None, ) with mock.patch("cloudinit.cmd.query.read_cfg_paths") as m_paths: m_paths.return_value = paths with mock.patch("os.getuid") as m_getuid: m_getuid.return_value = 0 assert 0 == query.handle_args("anyname", args) out, _err = capsys.readouterr() cmd_output = json.loads(out) assert "it worked" == cmd_output["my-var"] if ud_expected == "ci-b64:": ud_expected = "ci-b64:{}".format(b64e(ud_src)) if vd_expected == "ci-b64:": vd_expected = "ci-b64:{}".format(b64e(vd_src)) assert ud_expected == cmd_output["userdata"] assert vd_expected == cmd_output["vendordata"]
def test_handle_args_root_fallsback_to_instance_data(self, caplog, tmpdir): """When no instance_data argument, root falls back to redacted json.""" args = self.args( debug=False, dump_all=True, format=None, instance_data=None, list_keys=False, user_data=None, vendor_data=None, varname=None, ) paths, run_dir, _, _ = self._setup_paths(tmpdir) with mock.patch("cloudinit.cmd.query.read_cfg_paths") as m_paths: m_paths.return_value = paths with mock.patch("os.getuid") as m_getuid: m_getuid.return_value = 0 assert 1 == query.handle_args("anyname", args) json_file = run_dir.join(INSTANCE_JSON_FILE) sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE) msg = "Missing root-readable %s. Using redacted %s instead." % ( sensitive_file.strpath, json_file.strpath, ) assert msg in caplog.text
def test_handle_modern_zfsroot(self, mount_info, zpool_info, parse_mount, is_container): devpth = 'zroot/ROOT/default' disk = 'da0p3' fs_type = 'zfs' mount_point = '/' mount_info.return_value = (devpth, fs_type, mount_point) zpool_info.return_value = disk parse_mount.return_value = (devpth, fs_type, mount_point) cfg = {'resize_rootfs': True} def fake_stat(devpath): if devpath == disk: raise OSError("not here") FakeStat = namedtuple( 'FakeStat', ['st_mode', 'st_size', 'st_mtime']) # minimal stat return FakeStat(25008, 0, 1) # fake char block device with mock.patch('cloudinit.config.cc_resizefs.do_resize') as dresize: with mock.patch('cloudinit.config.cc_resizefs.os.stat') as m_stat: m_stat.side_effect = fake_stat handle('cc_resizefs', cfg, _cloud=None, log=LOG, args=[]) self.assertEqual(('zpool', 'online', '-e', 'zroot', '/dev/' + disk), dresize.call_args[0][0])
def test_get_data_cust_script_disabled(self): """If custom script is disabled by VMware tools configuration, raise a RuntimeError. """ paths = Paths({'cloud_dir': self.tdir}) ds = self.datasource(sys_cfg={'disable_vmware_customization': False}, distro={}, paths=paths) # Prepare the conf file conf_file = self.tmp_path('test-cust', self.tdir) conf_content = dedent("""\ [CUSTOM-SCRIPT] SCRIPT-NAME = test-script [MISC] MARKER-ID = 12345346 """) util.write_file(conf_file, conf_content) # Prepare the custom sript customscript = self.tmp_path('test-script', self.tdir) util.write_file(customscript, "This is the post cust script") with mock.patch(MPATH + 'get_tools_config', return_value='invalid'): with mock.patch(MPATH + 'set_customization_status', return_value=('msg', b'')): with self.assertRaises(RuntimeError) as context: wrap_and_call( 'cloudinit.sources.DataSourceOVF', { 'dmi.read_dmi_data': 'vmware', 'util.del_dir': True, 'search_file': self.tdir, 'wait_for_imc_cfg_file': conf_file, 'get_nics_to_enable': '' }, ds.get_data) self.assertIn('Custom script is disabled by VM Administrator', str(context.exception))
def test_handle_args_user_vendor_data_defaults_to_instance_link( self, capsys, tmpdir ): """When no instance_data argument, root uses sensitive json.""" paths, run_dir, _, _ = self._setup_paths(tmpdir) sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE) sensitive_file.write('{"my-var": "it worked"}') ud_path = os.path.join(paths.instance_link, "user-data.txt") write_file(ud_path, "instance_link_ud") vd_path = os.path.join(paths.instance_link, "vendor-data.txt") write_file(vd_path, "instance_link_vd") args = self.args( debug=False, dump_all=True, format=None, instance_data=None, list_keys=False, user_data=None, vendor_data=None, varname=None, ) with mock.patch("cloudinit.cmd.query.read_cfg_paths") as m_paths: m_paths.return_value = paths with mock.patch("os.getuid", return_value=0): assert 0 == query.handle_args("anyname", args) expected = ( '{\n "my-var": "it worked",\n ' '"userdata": "instance_link_ud",\n ' '"vendordata": "instance_link_vd"\n}\n' ) out, _ = capsys.readouterr() assert expected == out
def test_handle_args_root_uses_instance_sensitive_data( self, capsys, tmpdir ): """When no instance_data argument, root uses sensitive json.""" paths, run_dir, user_data, vendor_data = self._setup_paths( tmpdir, ud_val="ud", vd_val="vd" ) sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE) sensitive_file.write('{"my-var": "it worked"}') args = self.args( debug=False, dump_all=True, format=None, instance_data=None, list_keys=False, user_data=user_data.strpath, vendor_data=vendor_data.strpath, varname=None, ) with mock.patch("cloudinit.cmd.query.read_cfg_paths") as m_paths: m_paths.return_value = paths with mock.patch("os.getuid") as m_getuid: m_getuid.return_value = 0 assert 0 == query.handle_args("anyname", args) expected = ( '{\n "my-var": "it worked",\n ' '"userdata": "ud",\n "vendordata": "vd"\n}\n' ) out, _err = capsys.readouterr() assert expected == out
def setUp(self): super(TestIsDiskUsed, self).setUp() self.patches = ExitStack() mod_name = "cloudinit.config.cc_disk_setup" self.enumerate_disk = self.patches.enter_context( mock.patch("{0}.enumerate_disk".format(mod_name))) self.check_fs = self.patches.enter_context( mock.patch("{0}.check_fs".format(mod_name)))
def _test_get_data_with_network_config(self, guestinfo, iso): network_config = dedent( """\ network: version: 2 ethernets: nics: nameservers: addresses: - 127.0.0.53 search: - vmware.com match: name: eth* gateway4: 10.10.10.253 dhcp4: false addresses: - 10.10.10.1/24 """ ) network_config_b64 = base64.b64encode(network_config.encode()).decode() props = { "network-config": network_config_b64, "password": "******", "instance-id": "inst-001", } env = fill_properties(props) paths = Paths({"cloud_dir": self.tdir, "run_dir": self.tdir}) ds = self.datasource(sys_cfg={}, distro={}, paths=paths) with mock.patch( MPATH + "transport_vmware_guestinfo", return_value=env if guestinfo else NOT_FOUND, ): with mock.patch( MPATH + "transport_iso9660", return_value=env if iso else NOT_FOUND, ): self.assertTrue(ds.get_data()) self.assertEqual("inst-001", ds.metadata["instance-id"]) self.assertEqual( { "version": 2, "ethernets": { "nics": { "nameservers": { "addresses": ["127.0.0.53"], "search": ["vmware.com"], }, "match": {"name": "eth*"}, "gateway4": "10.10.10.253", "dhcp4": False, "addresses": ["10.10.10.1/24"], } }, }, ds.network_config, )
def test_cloud_id_missing_instance_data_json(self): """Exit error when the provided instance-data.json does not exist.""" cmd = ['cloud-id', '--instance-data', self.instance_data] with mock.patch('sys.argv', cmd): with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: with self.assertRaises(SystemExit) as context_manager: cloud_id.main() self.assertEqual(1, context_manager.exception.code) self.assertIn("ERROR: File not found '%s'" % self.instance_data, m_stderr.getvalue())
def test_subplatform_config_drive_when_starts_with_dev(self): """subplatform reports config-drive when source starts with /dev/.""" cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, None, helpers.Paths({})) with mock.patch(M_PATH + "find_candidate_devs") as m_find_devs: with mock.patch(M_PATH + "util.mount_cb"): with mock.patch(M_PATH + "on_first_boot"): m_find_devs.return_value = ["/dev/anything"] self.assertEqual(True, cfg_ds.get_data()) self.assertEqual("config-disk (/dev/anything)", cfg_ds.subplatform)
def test_get_data_force_run_post_script_is_yes(self): """If DEFAULT-RUN-POST-CUST-SCRIPT is yes, custom script could run if enable-custom-scripts is not defined in VM Tools configuration """ paths = Paths({"cloud_dir": self.tdir}) ds = self.datasource( sys_cfg={"disable_vmware_customization": False}, distro={}, paths=paths, ) # Prepare the conf file conf_file = self.tmp_path("test-cust", self.tdir) # set DEFAULT-RUN-POST-CUST-SCRIPT = yes so that enable-custom-scripts # default value is TRUE conf_content = dedent( """\ [CUSTOM-SCRIPT] SCRIPT-NAME = test-script [MISC] MARKER-ID = 12345346 DEFAULT-RUN-POST-CUST-SCRIPT = yes """ ) util.write_file(conf_file, conf_content) # Mock get_tools_config(section, key, defaultVal) to return # defaultVal def my_get_tools_config(*args, **kwargs): return args[2] with mock.patch( MPATH + "get_tools_config", side_effect=my_get_tools_config ): with mock.patch( MPATH + "set_customization_status", return_value=("msg", b"") ): with self.assertRaises(CustomScriptNotFound) as context: wrap_and_call( "cloudinit.sources.DataSourceOVF", { "dmi.read_dmi_data": "vmware", "util.del_dir": True, "search_file": self.tdir, "wait_for_imc_cfg_file": conf_file, "get_nics_to_enable": "", }, ds.get_data, ) # Verify custom script still runs although it is # disabled by VMware Tools customscript = self.tmp_path("test-script", self.tdir) self.assertIn( "Script %s not found!!" % customscript, str(context.exception) )
def test_cloud_id_long_name_from_instance_data(self): """Report long cloud-id format from cloud_name and region.""" util.write_file( self.instance_data, '{"v1": {"cloud_name": "mycloud", "region": "somereg"}}') cmd = ['cloud-id', '--instance-data', self.instance_data, '--long'] with mock.patch('sys.argv', cmd): with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: with self.assertRaises(SystemExit) as context_manager: cloud_id.main() self.assertEqual(0, context_manager.exception.code) self.assertEqual("mycloud\tsomereg\n", m_stdout.getvalue())
def test_cloud_id_non_json_instance_data(self): """Exit error when the provided instance-data.json is not json.""" cmd = ['cloud-id', '--instance-data', self.instance_data] util.write_file(self.instance_data, '{') with mock.patch('sys.argv', cmd): with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: with self.assertRaises(SystemExit) as context_manager: cloud_id.main() self.assertEqual(1, context_manager.exception.code) self.assertIn( "ERROR: File '%s' is not valid json." % self.instance_data, m_stderr.getvalue())
def test_cloud_id_lookup_from_instance_data_region(self): """Report discovered canonical cloud_id when region lookup matches.""" util.write_file( self.instance_data, '{"v1": {"cloud_name": "aws", "region": "cn-north-1",' ' "platform": "ec2"}}') cmd = ['cloud-id', '--instance-data', self.instance_data, '--long'] with mock.patch('sys.argv', cmd): with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: with self.assertRaises(SystemExit) as context_manager: cloud_id.main() self.assertEqual(0, context_manager.exception.code) self.assertEqual("aws-china\tcn-north-1\n", m_stdout.getvalue())
def test_cloud_id_from_cloud_name_in_instance_data(self): """Report canonical cloud-id from cloud_name in instance-data.""" util.write_file( self.instance_data, '{"v1": {"cloud_name": "mycloud", "region": "somereg"}}', ) cmd = ["cloud-id", "--instance-data", self.instance_data] with mock.patch("sys.argv", cmd): with mock.patch("sys.stdout", new_callable=StringIO) as m_stdout: with self.assertRaises(SystemExit) as context_manager: cloud_id.main() self.assertEqual(0, context_manager.exception.code) self.assertEqual("mycloud\n", m_stdout.getvalue())
def _test_get_data_with_network_config(self, guestinfo, iso): network_config = dedent("""\ network: version: 2 ethernets: nics: nameservers: addresses: - 127.0.0.53 search: - vmware.com match: name: eth* gateway4: 10.10.10.253 dhcp4: false addresses: - 10.10.10.1/24 """) network_config_b64 = base64.b64encode(network_config.encode()).decode() props = { "network-config": network_config_b64, "password": "******", "instance-id": "inst-001" } env = fill_properties(props) paths = Paths({'cloud_dir': self.tdir, 'run_dir': self.tdir}) ds = self.datasource(sys_cfg={}, distro={}, paths=paths) with mock.patch(MPATH + 'transport_vmware_guestinfo', return_value=env if guestinfo else NOT_FOUND): with mock.patch(MPATH + 'transport_iso9660', return_value=env if iso else NOT_FOUND): self.assertTrue(ds.get_data()) self.assertEqual('inst-001', ds.metadata['instance-id']) self.assertEqual( { 'version': 2, 'ethernets': { 'nics': { 'nameservers': { 'addresses': ['127.0.0.53'], 'search': ['vmware.com'] }, 'match': { 'name': 'eth*' }, 'gateway4': '10.10.10.253', 'dhcp4': False, 'addresses': ['10.10.10.1/24'] } } }, ds.network_config)
def test_get_hostname_without_metadata_prefers_etc_hosts(self): """Datasource.gethostname prefers /etc/hosts to util.get_hostname.""" tmp = self.tmp_dir() datasource = DataSourceTestSubclassNet(self.sys_cfg, self.distro, Paths({'run_dir': tmp})) self.assertEqual({}, datasource.metadata) mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts' with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost: with mock.patch(mock_fqdn) as m_fqdn: m_gethost.return_value = 'systemhostname.domain.com' m_fqdn.return_value = 'fqdnhostname.domain.com' self.assertEqual('fqdnhostname', datasource.get_hostname()) self.assertEqual('fqdnhostname.domain.com', datasource.get_hostname(fqdn=True))
def test_get_hostname_without_metadata_returns_none(self): """Datasource.gethostname returns None when metadata_only and no MD.""" tmp = self.tmp_dir() datasource = DataSourceTestSubclassNet(self.sys_cfg, self.distro, Paths({"run_dir": tmp})) self.assertEqual({}, datasource.metadata) mock_fqdn = "cloudinit.sources.util.get_fqdn_from_hosts" with mock.patch("cloudinit.sources.util.get_hostname") as m_gethost: with mock.patch(mock_fqdn) as m_fqdn: self.assertIsNone(datasource.get_hostname(metadata_only=True)) self.assertIsNone( datasource.get_hostname(fqdn=True, metadata_only=True)) self.assertEqual([], m_gethost.call_args_list) self.assertEqual([], m_fqdn.call_args_list)
def test_get_hostname_without_metadata_uses_system_hostname(self): """Datasource.gethostname runs util.get_hostname when no metadata.""" tmp = self.tmp_dir() datasource = DataSourceTestSubclassNet(self.sys_cfg, self.distro, Paths({'run_dir': tmp})) self.assertEqual({}, datasource.metadata) mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts' with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost: with mock.patch(mock_fqdn) as m_fqdn: m_gethost.return_value = 'systemhostname.domain.com' m_fqdn.return_value = None # No maching fqdn in /etc/hosts self.assertEqual('systemhostname', datasource.get_hostname()) self.assertEqual('systemhostname.domain.com', datasource.get_hostname(fqdn=True))
def test_install_chef_from_omnibus_runs_chef_url_content(self): """install_chef_from_omnibus calls subp_blob_in_tempfile.""" response = b'#!/bin/bash\necho "Hi Mom"' httpretty.register_uri( httpretty.GET, cc_chef.OMNIBUS_URL, body=response, status=200 ) ret = (None, None) # stdout, stderr but capture=False with mock.patch( "cloudinit.config.cc_chef.subp_blob_in_tempfile", return_value=ret ) as m_subp_blob: cc_chef.install_chef_from_omnibus() # admittedly whitebox, but assuming subp_blob_in_tempfile works # this should be fine. self.assertEqual( [ mock.call( blob=response, args=[], basename="chef-omnibus-install", capture=False, ) ], m_subp_blob.call_args_list, )
def test_handle_args_list_keys_sorts_nested_keys_when_varname( self, capsys, tmpdir ): """Sort all nested keys of varname object when --list-keys provided.""" instance_data = tmpdir.join("instance-data") instance_data.write( '{"v1": {"v1_1": "val1.1", "v1_2": "val1.2"}, "v2":' + ' {"v2_2": "val2.2"}, "top": "gun"}' ) expected = "v1_1\nv1_2\n" args = self.args( debug=False, dump_all=False, format=None, instance_data=instance_data.strpath, list_keys=True, user_data="ud", vendor_data="vd", varname="v1", ) with mock.patch("os.getuid") as m_getuid: m_getuid.return_value = 100 assert 0 == query.handle_args("anyname", args) out, _err = capsys.readouterr() assert expected == out
def test_run_command_logs_commands_and_exit_codes_to_stderr(self): """All exit codes are logged to stderr.""" outfile = self.tmp_path("output.log", dir=self.tmp) cmd1 = 'echo "HI" >> %s' % outfile cmd2 = "bogus command" cmd3 = 'echo "MOM" >> %s' % outfile commands = [cmd1, cmd2, cmd3] mock_path = "cloudinit.config.cc_snap.sys.stderr" with mock.patch(mock_path, new_callable=StringIO) as m_stderr: with self.assertRaises(RuntimeError) as context_manager: run_commands(commands=commands) self.assertIsNotNone( re.search( r"bogus: (command )?not found", str(context_manager.exception) ), msg="Expected bogus command not found", ) expected_stderr_log = "\n".join( [ "Begin run command: {cmd}".format(cmd=cmd1), "End run command: exit(0)", "Begin run command: {cmd}".format(cmd=cmd2), "ERROR: End run command: exit(127)", "Begin run command: {cmd}".format(cmd=cmd3), "End run command: exit(0)\n", ] ) self.assertEqual(expected_stderr_log, m_stderr.getvalue())
def test_jinja_template_handle_errors_on_unreadable_instance_data(self): """If instance-data is unreadable, raise an error from handle_part.""" script_handler = ShellScriptPartHandler(self.paths) instance_json = os.path.join(self.run_dir, INSTANCE_DATA_FILE) util.write_file(instance_json, util.json_dumps({})) h = JinjaTemplatePartHandler(self.paths, sub_handlers=[script_handler]) with mock.patch(self.mpath + "load_file") as m_load: with self.assertRaises(RuntimeError) as context_manager: m_load.side_effect = OSError(errno.EACCES, "Not allowed") h.handle_part( data="data", ctype="!" + handlers.CONTENT_START, filename="part01", payload="## template: jinja \n#!/bin/bash\necho himom", frequency="freq", headers="headers", ) script_file = os.path.join(script_handler.script_dir, "part01") self.assertEqual( "Cannot render jinja template vars. No read permission on " "'{}/{}'. Try sudo".format(self.run_dir, INSTANCE_DATA_FILE), str(context_manager.exception), ) self.assertFalse( os.path.exists(script_file), "Unexpected file created %s" % script_file, )
def test_maybe_get_writable_device_path_warns_missing_cmdline_root(self): """When root does not exist isn't in the cmdline, log warning.""" info = "does not matter" def fake_mount_info(path, log): self.assertEqual("/", path) self.assertEqual(LOG, log) return ("/dev/root", "ext4", "/") exists_mock_path = "cloudinit.config.cc_resizefs.os.path.exists" with mock.patch(exists_mock_path) as m_exists: m_exists.return_value = False devpath = wrap_and_call( "cloudinit.config.cc_resizefs.util", { "is_container": { "return_value": False }, "get_mount_info": { "side_effect": fake_mount_info }, "get_cmdline": { "return_value": "BOOT_IMAGE=/vmlinuz.efi" }, }, maybe_get_writable_device_path, "/dev/root", info, LOG, ) self.assertIsNone(devpath) logs = self.logs.getvalue() self.assertIn("WARNING: Unable to find device '/dev/root'", logs)
def test_ntp_user_provided_config_template_only(self, m_select, m_install, m_schema): """Test custom template for default client""" custom = r'\n#MyCustomTemplate' user_template = NTP_TEMPLATE + custom client = 'chrony' cfg = { 'pools': ['mypool.org'], 'ntp_client': client, 'config': { 'template': user_template, } } expected_merged_cfg = { 'check_exe': 'chronyd', 'confpath': '{tmpdir}/client.conf'.format(tmpdir=self.new_root), 'template_name': 'client.conf', 'template': user_template, 'service_name': 'chrony', 'packages': ['chrony'] } for distro in cc_ntp.distros: mycloud = self._get_cloud(distro) ntpconfig = self._mock_ntp_client_config(client=client, distro=distro) confpath = ntpconfig['confpath'] m_select.return_value = ntpconfig mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR' with mock.patch(mock_path, self.new_root): cc_ntp.handle('notimportant', {'ntp': cfg}, mycloud, None, None) self.assertEqual("servers []\npools ['mypool.org']\n%s" % custom, util.load_file(confpath)) m_schema.assert_called_with(expected_merged_cfg)
def test_handle_warns_on_undiscoverable_root_path_in_commandline(self): """handle noops when the root path is not found on the commandline.""" cfg = {"resize_rootfs": True} exists_mock_path = "cloudinit.config.cc_resizefs.os.path.exists" def fake_mount_info(path, log): self.assertEqual("/", path) self.assertEqual(LOG, log) return ("/dev/root", "ext4", "/") with mock.patch(exists_mock_path) as m_exists: m_exists.return_value = False wrap_and_call( "cloudinit.config.cc_resizefs.util", { "is_container": { "return_value": False }, "get_mount_info": { "side_effect": fake_mount_info }, "get_cmdline": { "return_value": "BOOT_IMAGE=/vmlinuz.efi" }, }, handle, "cc_resizefs", cfg, _cloud=None, log=LOG, args=[], ) logs = self.logs.getvalue() self.assertIn("WARNING: Unable to find device '/dev/root'", logs)