def test_metadata_network_config(self, m_is_lxd): # network-config needs to get into network_config netconf = { "version": 1, "config": [ { "type": "physical", "name": "interface0", "subnets": [{"type": "dhcp"}], } ], } populate_dir( os.path.join(self.paths.seed_dir, "nocloud"), { "user-data": b"ud", "meta-data": "instance-id: IID\n", "network-config": yaml.dump(netconf) + "\n", }, ) sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}} dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() self.assertTrue(ret) self.assertEqual(netconf, dsrc.network_config)
def test_metadata_network_interfaces(self, m_is_lxd): gateway = "103.225.10.1" md = { "instance-id": "i-abcd", "local-hostname": "hostname1", "network-interfaces": textwrap.dedent( """\ auto eth0 iface eth0 inet static hwaddr 00:16:3e:70:e1:04 address 103.225.10.12 netmask 255.255.255.0 gateway """ + gateway + """ dns-servers 8.8.8.8""" ), } populate_dir( os.path.join(self.paths.seed_dir, "nocloud"), {"user-data": b"ud", "meta-data": yaml.dump(md) + "\n"}, ) sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}} dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() self.assertTrue(ret) # very simple check just for the strings above self.assertIn(gateway, str(dsrc.network_config))
def test_dev_os_remap(self): populate_dir(self.tmp, CFG_DRIVE_FILES_V2) cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, None, helpers.Paths({})) found = ds.read_config_drive(self.tmp) cfg_ds.metadata = found['metadata'] name_tests = { 'ami': '/dev/vda1', 'root': '/dev/vda1', 'ephemeral0': '/dev/vda2', 'swap': '/dev/vda3', } for name, dev_name in name_tests.items(): with ExitStack() as mocks: provided_name = dev_name[len('/dev/'):] provided_name = "s" + provided_name[1:] find_mock = mocks.enter_context( mock.patch.object(util, 'find_devs_with', return_value=[provided_name])) # We want os.path.exists() to return False on its first call, # and True on its second call. We use a handy generator as # the mock side effect for this. The mocked function returns # what the side effect returns. def exists_side_effect(): yield False yield True exists_mock = mocks.enter_context( mock.patch.object(os.path, 'exists', side_effect=exists_side_effect())) self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) find_mock.assert_called_once_with(mock.ANY) self.assertEqual(exists_mock.call_count, 2)
def test_nocloud_get_devices_freebsd(self, m_is_lxd, fake_blkid): populate_dir(os.path.join(self.paths.seed_dir, "nocloud"), { 'user-data': b"ud", 'meta-data': "instance-id: IID\n" }) sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} self.mocks.enter_context( mock.patch.object(util, 'is_FreeBSD', return_value=True)) def _mfind_devs_with_freebsd(criteria=None, oformat='device', tag=None, no_cache=False, path=None): if not criteria: return ["/dev/msdosfs/foo", "/dev/iso9660/foo"] if criteria.startswith("LABEL="): return ["/dev/msdosfs/foo", "/dev/iso9660/foo"] elif criteria == "TYPE=vfat": return ["/dev/msdosfs/foo"] elif criteria == "TYPE=iso9660": return ["/dev/iso9660/foo"] return [] self.mocks.enter_context( mock.patch.object(util, 'find_devs_with_freebsd', side_effect=_mfind_devs_with_freebsd)) dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc._get_devices('foo') self.assertEqual(['/dev/msdosfs/foo', '/dev/iso9660/foo'], ret) fake_blkid.assert_not_called()
def test_existing_ovf_diff(self): # waagent/SharedConfig must be removed if ovfenv is found elsewhere # 'get_data' should remove SharedConfig.xml in /var/lib/waagent # if ovf-env.xml differs. cached_ovfenv = construct_valid_ovf_env( {'userdata': base64.b64encode("FOO_USERDATA")}) new_ovfenv = construct_valid_ovf_env( {'userdata': base64.b64encode("NEW_USERDATA")}) populate_dir(self.waagent_d, {'ovf-env.xml': cached_ovfenv, 'SharedConfig.xml': "mysharedconfigxml", 'otherfile': 'otherfilecontent'}) dsrc = self._get_ds({'ovfcontent': new_ovfenv}) ret = dsrc.get_data() self.assertTrue(ret) self.assertEqual(dsrc.userdata_raw, "NEW_USERDATA") self.assertTrue(os.path.exists( os.path.join(self.waagent_d, 'otherfile'))) self.assertFalse( os.path.exists(os.path.join(self.waagent_d, 'SharedConfig.xml'))) self.assertTrue( os.path.exists(os.path.join(self.waagent_d, 'ovf-env.xml'))) self.assertEqual(new_ovfenv, load_file(os.path.join(self.waagent_d, 'ovf-env.xml')))
def test_metadata_network_config(self, m_is_lxd): # network-config needs to get into network_config netconf = { 'version': 1, 'config': [{ 'type': 'physical', 'name': 'interface0', 'subnets': [{ 'type': 'dhcp' }] }] } populate_dir( os.path.join(self.paths.seed_dir, "nocloud"), { 'user-data': b"ud", 'meta-data': "instance-id: IID\n", 'network-config': yaml.dump(netconf) + "\n" }) sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() self.assertTrue(ret) self.assertEqual(netconf, dsrc.network_config)
def test_dev_ec2_remap(self): populate_dir(self.tmp, CFG_DRIVE_FILES_V2) cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, None, helpers.Paths({})) found = ds.read_config_drive(self.tmp) ec2_md = found['ec2-metadata'] os_md = found['metadata'] cfg_ds.ec2_metadata = ec2_md cfg_ds.metadata = os_md name_tests = { 'ami': '/dev/vda1', 'root': '/dev/vda1', 'ephemeral0': '/dev/vda2', 'swap': '/dev/vda3', None: None, 'bob': None, 'root2k': None, } for name, dev_name in name_tests.items(): # We want os.path.exists() to return False on its first call, # and True on its second call. We use a handy generator as # the mock side effect for this. The mocked function returns # what the side effect returns. def exists_side_effect(): yield False yield True with mock.patch.object(os.path, 'exists', side_effect=exists_side_effect()): self.assertEqual(dev_name, cfg_ds.device_name_to_device(name))
def test_dev_os_map(self): populate_dir(self.tmp, CFG_DRIVE_FILES_V2) cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, None, helpers.Paths({})) found = ds.read_config_drive(self.tmp) os_md = found['metadata'] cfg_ds.metadata = os_md name_tests = { 'ami': '/dev/vda1', 'root': '/dev/vda1', 'ephemeral0': '/dev/vda2', 'swap': '/dev/vda3', } for name, dev_name in name_tests.items(): with ExitStack() as mocks: find_mock = mocks.enter_context( mock.patch.object(util, 'find_devs_with', return_value=[dev_name])) exists_mock = mocks.enter_context( mock.patch.object(os.path, 'exists', return_value=True)) self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) find_mock.assert_called_once_with(mock.ANY) exists_mock.assert_called_once_with(mock.ANY)
def test_status_wrapper_init_local_writes_fresh_status_info(self, tmpdir): """When running in init-local mode, status_wrapper writes status.json. Old status and results artifacts are also removed. """ data_d = tmpdir.join("data") link_d = tmpdir.join("link") status_link = link_d.join("status.json") # Write old artifacts which will be removed or updated. for _dir in data_d, link_d: test_helpers.populate_dir(str(_dir), { "status.json": "old", "result.json": "old" }) FakeArgs = namedtuple("FakeArgs", ["action", "local", "mode"]) def myaction(name, args): # Return an error to watch status capture them return "SomeDatasource", ["an error"] myargs = FakeArgs(("ignored_name", myaction), True, "bogusmode") cli.status_wrapper("init", myargs, data_d, link_d) # No errors reported in status status_v1 = load_json(load_file(status_link))["v1"] assert ["an error"] == status_v1["init-local"]["errors"] assert "SomeDatasource" == status_v1["datasource"] assert False is os.path.exists( data_d.join("result.json")), "unexpected result.json found" assert False is os.path.exists( link_d.join("result.json")), "unexpected result.json link found"
def test_existing_ovf_diff(self): # waagent/SharedConfig must be removed if ovfenv is found elsewhere # 'get_data' should remove SharedConfig.xml in /var/lib/waagent # if ovf-env.xml differs. cached_ovfenv = construct_valid_ovf_env( {'userdata': base64.b64encode("FOO_USERDATA")}) new_ovfenv = construct_valid_ovf_env( {'userdata': base64.b64encode("NEW_USERDATA")}) populate_dir( self.waagent_d, { 'ovf-env.xml': cached_ovfenv, 'SharedConfig.xml': "mysharedconfigxml", 'otherfile': 'otherfilecontent' }) dsrc = self._get_ds({'ovfcontent': new_ovfenv}) ret = dsrc.get_data() self.assertTrue(ret) self.assertEqual(dsrc.userdata_raw, "NEW_USERDATA") self.assertTrue( os.path.exists(os.path.join(self.waagent_d, 'otherfile'))) self.assertFalse( os.path.exists(os.path.join(self.waagent_d, 'SharedConfig.xml'))) self.assertTrue( os.path.exists(os.path.join(self.waagent_d, 'ovf-env.xml'))) self.assertEqual( new_ovfenv, load_file(os.path.join(self.waagent_d, 'ovf-env.xml')))
def test_status_wrapper_init_local_writes_fresh_status_info(self): """When running in init-local mode, status_wrapper writes status.json. Old status and results artifacts are also removed. """ tmpd = self.tmp_dir() data_d = self.tmp_path('data', tmpd) link_d = self.tmp_path('link', tmpd) status_link = self.tmp_path('status.json', link_d) # Write old artifacts which will be removed or updated. for _dir in data_d, link_d: test_helpers.populate_dir( _dir, {'status.json': 'old', 'result.json': 'old'}) FakeArgs = namedtuple('FakeArgs', ['action', 'local', 'mode']) def myaction(name, args): # Return an error to watch status capture them return 'SomeDatasource', ['an error'] myargs = FakeArgs(('ignored_name', myaction), True, 'bogusmode') cli.status_wrapper('init', myargs, data_d, link_d) # No errors reported in status status_v1 = load_json(load_file(status_link))['v1'] self.assertEqual(['an error'], status_v1['init-local']['errors']) self.assertEqual('SomeDatasource', status_v1['datasource']) self.assertFalse( os.path.exists(self.tmp_path('result.json', data_d)), 'unexpected result.json found') self.assertFalse( os.path.exists(self.tmp_path('result.json', link_d)), 'unexpected result.json link found')
def test_no_required_and_optional(self): dirdata = {'f1': 'f1c', 'f2': 'f2c'} populate_dir(self.tmp, dirdata) ret = util.pathprefix2dict(self.tmp, required=None, optional=['f1', 'f2']) self.assertEqual(dirdata, ret)
def test_handler_full_setup(self): """Test that the handler ends up calling the renderers""" cfg = self._get_base_config_repos() cfg['zypper']['config'] = { 'download.deltarpm': 'False', } root_d = self.tmp_dir() os.makedirs('%s/etc/zypp/repos.d' % root_d) helpers.populate_dir(root_d, {self.zypp_conf: '# Zypp config\n'}) self.reRoot(root_d) cc_zypper_add_repo.handle('zypper_add_repo', cfg, None, LOG, []) cfg_out = os.path.join(root_d, self.zypp_conf) contents = util.load_file(cfg_out) expected = [ '# Zypp config', '# Added via cloud.cfg', 'download.deltarpm=False', ] for item in contents.split('\n'): if item not in expected: self.assertIsNone(item) repos = glob.glob('%s/etc/zypp/repos.d/*.repo' % root_d) expected_repos = ['testing-foo.repo', 'testing-bar.repo'] if len(repos) != 2: assert 'Number of repos written is "%d" expected 2' % len(repos) for repo in repos: repo_name = os.path.basename(repo) if repo_name not in expected_repos: assert 'Found repo with name "%s"; unexpected' % repo_name
def test_seed_dir_invalid(self): """Verify that invalid seed_dir raises MAASSeedDirMalformed.""" valid = { 'instance-id': 'i-instanceid', 'local-hostname': 'test-hostname', 'user-data': '' } my_based = os.path.join(self.tmp, "valid_extra") # missing 'userdata' file my_d = "%s-01" % my_based invalid_data = copy(valid) del invalid_data['local-hostname'] populate_dir(my_d, invalid_data) self.assertRaises(DataSourceMAAS.MAASSeedDirMalformed, DataSourceMAAS.read_maas_seed_dir, my_d) # missing 'instance-id' my_d = "%s-02" % my_based invalid_data = copy(valid) del invalid_data['instance-id'] populate_dir(my_d, invalid_data) self.assertRaises(DataSourceMAAS.MAASSeedDirMalformed, DataSourceMAAS.read_maas_seed_dir, my_d)
def test_template_live(self, m_platform, m_sysuuid): """Template live environment should be identified.""" tmpdir = self.tmp_dir() m_platform.return_value = ( ibm.Platforms.TEMPLATE_LIVE_METADATA, tmpdir, ) m_sysuuid.return_value = self.sysuuid test_helpers.populate_dir( tmpdir, { "openstack/latest/meta_data.json": json.dumps( self.template_md ), "openstack/latest/user_data": self.userdata, "openstack/content/interfaces": self.content_interfaces, "meta.js": self.meta_js, }, ) ret = ibm.read_md() self.assertEqual(ibm.Platforms.TEMPLATE_LIVE_METADATA, ret["platform"]) self.assertEqual(tmpdir, ret["source"]) self.assertEqual(self.userdata, ret["userdata"]) self.assertEqual( self._get_expected_metadata(self.template_md), ret["metadata"] ) self.assertEqual(self.sysuuid, ret["system-uuid"])
def test_no_required_and_optional(self): dirdata = {"f1": b"f1c", "f2": b"f2c"} populate_dir(self.tmp, dirdata) ret = util.pathprefix2dict(self.tmp, required=None, optional=["f1", "f2"]) self.assertEqual(dirdata, ret)
def test_config_and_log_no_reference(self): """If the config and log existed, but no reference, assume not.""" rootd = self.tmp_dir() test_helpers.populate_dir( rootd, {self.prov_cfg: "key=value", self.inst_log: "log data\n"} ) self.assertFalse(self._call_with_root(rootd=rootd)) self.assertIn("no reference file", self.logs.getvalue())
def create_system_files(self): rootd = self.tmp_dir() populate_dir( rootd, { DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, }, ) self.assertTrue(self.reRoot(rootd))
def test_seed_dir_broken_context(self): populate_dir(self.seed_dir, {"context.sh": INVALID_CONTEXT}) self.assertRaises( ds.BrokenContextDiskDir, ds.read_context_disk_dir, self.seed_dir, mock.Mock(), )
def test_multiple_files(self): """Multiple leases files on azure with one found return that value.""" self.maxDiff = None populate_dir( self.lease_d, {"1": self.azure_lease, "9": self.lxd_lease} ) self.assertEqual( {"1": self.azure_parsed, "9": self.lxd_parsed}, networkd_load_leases(self.lease_d), )
def test_run_hook_down_deletes(self): """down should delete the created json file.""" nic = "eth1" populate_dir( self.tmp, {nic + ".json": "{'abcd'}", "myfile.txt": "text"} ) dhc.run_hook(nic, "down", data_d=self.tmp, env={"old_host_name": "x1"}) self.assertEqual( set(["myfile.txt"]), set(dir2dict(self.tmp + os.path.sep)) )
def test_get_data_broken_contextdisk(self): orig_find_devs_with = util.find_devs_with try: # dont' try to lookup for CDs util.find_devs_with = lambda n: [] populate_dir(self.seed_dir, {'context.sh': INVALID_CONTEXT}) dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths) self.assertRaises(ds.BrokenContextDiskDir, dsrc.get_data) finally: util.find_devs_with = orig_find_devs_with
def test_get_data_broken_contextdisk(self): orig_find_devs_with = util.find_devs_with try: # dont' try to lookup for CDs util.find_devs_with = lambda n: [] # type: ignore populate_dir(self.seed_dir, {"context.sh": INVALID_CONTEXT}) dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths) self.assertRaises(ds.BrokenContextDiskDir, dsrc.get_data) finally: util.find_devs_with = orig_find_devs_with
def test_no_config_section_no_new_data(self): """When there is no config section no new data should be written to zypp.conf""" cfg = self._get_base_config_repos() root_d = self.tmp_dir() helpers.populate_dir(root_d, {self.zypp_conf: '# No data'}) self.reRoot(root_d) cc_zypper_add_repo._write_zypp_config(cfg.get('config', {})) cfg_out = os.path.join(root_d, self.zypp_conf) contents = util.load_file(cfg_out) self.assertEqual(contents, '# No data')
def test_seed_dir_bad_json_metadata(self): """Verify that bad json in metadata raises BrokenConfigDriveDir.""" data = copy(CFG_DRIVE_FILES_V2) data["openstack/2012-08-10/meta_data.json"] = "non-json garbage {}" data["openstack/2015-10-15/meta_data.json"] = "non-json garbage {}" data["openstack/latest/meta_data.json"] = "non-json garbage {}" populate_dir(self.tmp, data) self.assertRaises(openstack.BrokenMetadata, ds.read_config_drive, self.tmp)
def test_empty_config_value_no_new_data(self): """When the config section is not empty but there are no values no new data should be written to zypp.conf""" cfg = self._get_base_config_repos() cfg['zypper']['config'] = {'download.deltarpm': None} root_d = self.tmp_dir() helpers.populate_dir(root_d, {self.zypp_conf: '# No data'}) self.reRoot(root_d) cc_zypper_add_repo._write_zypp_config(cfg.get('config', {})) cfg_out = os.path.join(root_d, self.zypp_conf) contents = util.load_file(cfg_out) self.assertEqual(contents, '# No data')
def _get_ds(self, data): def dsdevs(): return data.get('dsdevs', []) def _invoke_agent(cmd): data['agent_invoked'] = cmd def _write_files(datadir, files, dirmode): data['files'] = {} data['datadir'] = datadir data['datadir_mode'] = dirmode for (fname, content) in files.items(): data['files'][fname] = content def _wait_for_files(flist, _maxwait=None, _naplen=None): data['waited'] = flist return [] def _pubkeys_from_crt_files(flist): data['pubkey_files'] = flist return ["pubkey_from: %s" % f for f in flist] def _iid_from_shared_config(path): data['iid_from_shared_cfg'] = path return 'i-my-azure-id' def _apply_hostname_bounce(**kwargs): data['apply_hostname_bounce'] = kwargs if data.get('ovfcontent') is not None: populate_dir(os.path.join(self.paths.seed_dir, "azure"), {'ovf-env.xml': data['ovfcontent']}) mod = DataSourceAzure if data.get('dsdevs'): self.apply_patches([(mod, 'list_possible_azure_ds_devs', dsdevs)]) self.apply_patches([(mod, 'invoke_agent', _invoke_agent), (mod, 'write_files', _write_files), (mod, 'wait_for_files', _wait_for_files), (mod, 'pubkeys_from_crt_files', _pubkeys_from_crt_files), (mod, 'iid_from_shared_config', _iid_from_shared_config), (mod, 'apply_hostname_bounce', _apply_hostname_bounce), ]) dsrc = mod.DataSourceAzureNet( data.get('sys_cfg', {}), distro=None, paths=self.paths) return dsrc
def test_empty_config_section_no_new_data(self): """When the config section is empty no new data should be written to zypp.conf""" cfg = self._get_base_config_repos() cfg["zypper"]["config"] = None root_d = self.tmp_dir() helpers.populate_dir(root_d, {self.zypp_conf: "# No data"}) self.reRoot(root_d) cc_zypper_add_repo._write_zypp_config(cfg.get("config", {})) cfg_out = os.path.join(root_d, self.zypp_conf) contents = util.load_file(cfg_out) self.assertEqual(contents, "# No data")
def test_nocloud_no_vendordata(self): populate_dir(os.path.join(self.paths.seed_dir, "nocloud"), {'user-data': "ud", 'meta-data': "instance-id: IID\n"}) sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} ds = DataSourceNoCloud.DataSourceNoCloud dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() self.assertEqual(dsrc.userdata_raw, "ud") self.assertFalse(dsrc.vendordata) self.assertTrue(ret)
def test_nocloud_no_vendordata(self, m_is_lxd): populate_dir( os.path.join(self.paths.seed_dir, "nocloud"), {"user-data": b"ud", "meta-data": "instance-id: IID\n"}, ) sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}} dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() self.assertEqual(dsrc.userdata_raw, b"ud") self.assertFalse(dsrc.vendordata) self.assertTrue(ret)
def cfg_ds_from_dir(base_d, files=None): run = os.path.join(base_d, "run") os.mkdir(run) cfg_ds = ds.DataSourceConfigDrive( settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': run})) cfg_ds.seed_dir = os.path.join(base_d, "seed") if files: populate_dir(cfg_ds.seed_dir, files) cfg_ds.known_macs = KNOWN_MACS.copy() if not cfg_ds.get_data(): raise RuntimeError("Data source did not extract itself from" " seed directory %s" % cfg_ds.seed_dir) return cfg_ds
def test_nocloud_seed_dir_non_lxd_platform_is_nocloud(self, m_is_lxd): """Non-lxd environments will list nocloud as the platform.""" m_is_lxd.return_value = False md = {"instance-id": "IID", "dsmode": "local"} seed_dir = os.path.join(self.paths.seed_dir, "nocloud") populate_dir( seed_dir, {"user-data": "", "meta-data": yaml.safe_dump(md)} ) sys_cfg = {"datasource": {"NoCloud": {"fs_label": None}}} dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) self.assertTrue(dsrc.get_data()) self.assertEqual(dsrc.platform_type, "nocloud") self.assertEqual(dsrc.subplatform, "seed-dir (%s)" % seed_dir)
def test_nocloud_no_vendordata(self): populate_dir(os.path.join(self.paths.seed_dir, "nocloud"), { 'user-data': "ud", 'meta-data': "instance-id: IID\n" }) sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} ds = DataSourceNoCloud.DataSourceNoCloud dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() self.assertEqual(dsrc.userdata_raw, "ud") self.assertFalse(dsrc.vendordata) self.assertTrue(ret)
def test_nocloud_seed_dir(self): md = {'instance-id': 'IID', 'dsmode': 'local'} ud = "USER_DATA_HERE" populate_dir(os.path.join(self.paths.seed_dir, "nocloud"), {'user-data': ud, 'meta-data': yaml.safe_dump(md)}) sys_cfg = { 'datasource': {'NoCloud': {'fs_label': None}} } ds = DataSourceNoCloud.DataSourceNoCloud dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() self.assertEqual(dsrc.userdata_raw, ud) self.assertEqual(dsrc.metadata, md) self.assertTrue(ret)
def test_seed_dir_valid_extra(self): """Verify extra files do not affect seed_dir validity.""" data = {'instance-id': 'i-valid-extra', 'local-hostname': 'valid-extra-hostname', 'user-data': 'valid-extra-userdata', 'foo': 'bar'} my_d = os.path.join(self.tmp, "valid_extra") populate_dir(my_d, data) (userdata, metadata) = DataSourceMAAS.read_maas_seed_dir(my_d) self.assertEqual(userdata, data['user-data']) for key in ('instance-id', 'local-hostname'): self.assertEqual(data[key], metadata[key]) # additional files should not just appear as keys in metadata atm self.assertFalse(('foo' in metadata))
def test_existing_ovf_same(self): # waagent/SharedConfig left alone if found ovf-env.xml same as cached odata = {'UserData': base64.b64encode("SOMEUSERDATA")} data = {'ovfcontent': construct_valid_ovf_env(data=odata)} populate_dir(self.waagent_d, {'ovf-env.xml': data['ovfcontent'], 'otherfile': 'otherfile-content', 'SharedConfig.xml': 'mysharedconfig'}) dsrc = self._get_ds(data) ret = dsrc.get_data() self.assertTrue(ret) self.assertTrue(os.path.exists( os.path.join(self.waagent_d, 'ovf-env.xml'))) self.assertTrue(os.path.exists( os.path.join(self.waagent_d, 'otherfile'))) self.assertTrue(os.path.exists( os.path.join(self.waagent_d, 'SharedConfig.xml')))
def test_seed_dir_valid(self): """Verify a valid seeddir is read as such.""" data = {'instance-id': 'i-valid01', 'local-hostname': 'valid01-hostname', 'user-data': 'valid01-userdata', 'public-keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname'} my_d = os.path.join(self.tmp, "valid") populate_dir(my_d, data) (userdata, metadata) = DataSourceMAAS.read_maas_seed_dir(my_d) self.assertEqual(userdata, data['user-data']) for key in ('instance-id', 'local-hostname'): self.assertEqual(data[key], metadata[key]) # verify that 'userdata' is not returned as part of the metadata self.assertFalse(('user-data' in metadata))
def test_seed_dir_invalid(self): """Verify that invalid seed_dir raises MAASSeedDirMalformed.""" valid = {"instance-id": "i-instanceid", "local-hostname": "test-hostname", "user-data": ""} my_based = os.path.join(self.tmp, "valid_extra") # missing 'userdata' file my_d = "%s-01" % my_based invalid_data = copy(valid) del invalid_data["local-hostname"] populate_dir(my_d, invalid_data) self.assertRaises(DataSourceMAAS.MAASSeedDirMalformed, DataSourceMAAS.read_maas_seed_dir, my_d) # missing 'instance-id' my_d = "%s-02" % my_based invalid_data = copy(valid) del invalid_data["instance-id"] populate_dir(my_d, invalid_data) self.assertRaises(DataSourceMAAS.MAASSeedDirMalformed, DataSourceMAAS.read_maas_seed_dir, my_d)
def test_seed_dir_valid(self): """Verify a valid seeddir is read as such.""" data = { "instance-id": "i-valid01", "local-hostname": "valid01-hostname", "user-data": "valid01-userdata", "public-keys": "ssh-rsa AAAAB3Nz...aC1yc2E= keyname", } my_d = os.path.join(self.tmp, "valid") populate_dir(my_d, data) (userdata, metadata) = DataSourceMAAS.read_maas_seed_dir(my_d) self.assertEqual(userdata, data["user-data"]) for key in ("instance-id", "local-hostname"): self.assertEqual(data[key], metadata[key]) # verify that 'userdata' is not returned as part of the metadata self.assertFalse(("user-data" in metadata))
def test_seed_dir_valid_extra(self): """Verify extra files do not affect seed_dir validity.""" data = { "instance-id": "i-valid-extra", "local-hostname": "valid-extra-hostname", "user-data": "valid-extra-userdata", "foo": "bar", } my_d = os.path.join(self.tmp, "valid_extra") populate_dir(my_d, data) (userdata, metadata) = DataSourceMAAS.read_maas_seed_dir(my_d) self.assertEqual(userdata, data["user-data"]) for key in ("instance-id", "local-hostname"): self.assertEqual(data[key], metadata[key]) # additional files should not just appear as keys in metadata atm self.assertFalse(("foo" in metadata))
def populate_context_dir(path, variables): data = "# Context variables generated by OpenNebula\n" for (k, v) in variables.iteritems(): data += ("%s='%s'\n" % (k.upper(), v.replace(r"'", r"'\''"))) populate_dir(path, {'context.sh': data})
def test_seed_dir_empty1_context(self): populate_dir(self.seed_dir, {'context.sh': ''}) results = ds.read_context_disk_dir(self.seed_dir) self.assertEqual(results['userdata'], None) self.assertEqual(results['metadata'], {})
def test_seed_dir_broken_context(self): populate_dir(self.seed_dir, {'context.sh': INVALID_CONTEXT}) self.assertRaises(ds.BrokenContextDiskDir, ds.read_context_disk_dir, self.seed_dir)
def test_required_missing(self): dirdata = {'f1': 'f1content'} populate_dir(self.tmp, dirdata) kwargs = {'required': ['f1', 'f2']} self.assertRaises(ValueError, util.pathprefix2dict, self.tmp, **kwargs)
def test_required_only(self): dirdata = {'f1': 'f1content', 'f2': 'f2content'} populate_dir(self.tmp, dirdata) ret = util.pathprefix2dict(self.tmp, required=['f1', 'f2']) self.assertEqual(dirdata, ret)