def handle(name, cfg, cloud, log, _args): # If there isn't a salt key in the configuration don't do anything if 'salt_minion' not in cfg: log.debug(("Skipping module named %s," " no 'salt_minion' key in configuration"), name) return s_cfg = cfg['salt_minion'] const = SaltConstants(cfg=s_cfg) # Start by installing the salt package ... cloud.distro.install_packages(const.pkg_name) # Ensure we can configure files at the right dir util.ensure_dir(const.conf_dir) # ... and then update the salt configuration if 'conf' in s_cfg: # Add all sections from the conf object to minion config file minion_config = os.path.join(const.conf_dir, 'minion') minion_data = util.yaml_dumps(s_cfg.get('conf')) util.write_file(minion_config, minion_data) if 'grains' in s_cfg: # add grains to /etc/salt/grains grains_config = os.path.join(const.conf_dir, 'grains') grains_data = util.yaml_dumps(s_cfg.get('grains')) util.write_file(grains_config, grains_data) # ... copy the key pair if specified if 'public_key' in s_cfg and 'private_key' in s_cfg: pki_dir_default = os.path.join(const.conf_dir, "pki/minion") if not os.path.isdir(pki_dir_default): pki_dir_default = os.path.join(const.conf_dir, "pki") pki_dir = s_cfg.get('pki_dir', pki_dir_default) with util.umask(0o77): util.ensure_dir(pki_dir) pub_name = os.path.join(pki_dir, 'minion.pub') pem_name = os.path.join(pki_dir, 'minion.pem') util.write_file(pub_name, s_cfg['public_key']) util.write_file(pem_name, s_cfg['private_key']) # we need to have the salt minion service enabled in rc in order to be # able to start the service. this does only apply on FreeBSD servers. if cloud.distro.osfamily == 'freebsd': cloud.distro.updatercconf('salt_minion_enable', 'YES') # restart salt-minion. 'service' will start even if not started. if it # was started, it needs to be restarted for config change. util.subp(['service', const.srv_name, 'restart'], capture=False)
def dump(self): state = { 'version': self._version, 'config': self._config, 'network_state': self._network_state, } return util.yaml_dumps(state)
def handle(name, cfg, cloud, log, _args): # If there isn't a salt key in the configuration don't do anything if 'salt_minion' not in cfg: log.debug(("Skipping module named %s," " no 'salt_minion' key in configuration"), name) return salt_cfg = cfg['salt_minion'] # Start by installing the salt package ... cloud.distro.install_packages(('salt-minion', )) # Ensure we can configure files at the right dir config_dir = salt_cfg.get("config_dir", '/etc/salt') util.ensure_dir(config_dir) # ... and then update the salt configuration if 'conf' in salt_cfg: # Add all sections from the conf object to /etc/salt/minion minion_config = os.path.join(config_dir, 'minion') minion_data = util.yaml_dumps(salt_cfg.get('conf')) util.write_file(minion_config, minion_data) # ... copy the key pair if specified if 'public_key' in salt_cfg and 'private_key' in salt_cfg: pki_dir = salt_cfg.get('pki_dir', '/etc/salt/pki') with util.umask(077): util.ensure_dir(pki_dir) pub_name = os.path.join(pki_dir, 'minion.pub') pem_name = os.path.join(pki_dir, 'minion.pem') util.write_file(pub_name, salt_cfg['public_key']) util.write_file(pem_name, salt_cfg['private_key'])
def setUp(self): super(TestSimpleRun, self).setUp() self.new_root = self.tmp_dir() self.replicateTestRoot('simple_ubuntu', self.new_root) # Seed cloud.cfg file for our tests self.cfg = { 'datasource_list': ['None'], 'runcmd': ['ls /etc'], # test ALL_DISTROS 'spacewalk': {}, # test non-ubuntu distros module definition 'system_info': {'paths': {'run_dir': self.new_root}}, 'write_files': [ { 'path': '/etc/blah.ini', 'content': 'blah', 'permissions': 0o755, }, ], 'cloud_init_modules': ['write-files', 'spacewalk', 'runcmd'], } cloud_cfg = util.yaml_dumps(self.cfg) util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) util.write_file(os.path.join(self.new_root, 'etc', 'cloud', 'cloud.cfg'), cloud_cfg) self.patchOS(self.new_root) self.patchUtils(self.new_root)
def test_none_ds(self): new_root = self.makeDir() self.replicateTestRoot("simple_ubuntu", new_root) cfg = {"datasource_list": ["None"], "cloud_init_modules": ["write-files"]} ud = self.readResource("user_data.1.txt") cloud_cfg = util.yaml_dumps(cfg) util.ensure_dir(os.path.join(new_root, "etc", "cloud")) util.write_file(os.path.join(new_root, "etc", "cloud", "cloud.cfg"), cloud_cfg) self._patchIn(new_root) # Now start verifying whats created initer = stages.Init() initer.read_cfg() initer.initialize() initer.fetch() initer.datasource.userdata_raw = ud _iid = initer.instancify() initer.update() initer.cloudify().run("consume_data", initer.consume_data, args=[PER_INSTANCE], freq=PER_INSTANCE) mirrors = initer.distro.get_option("package_mirrors") self.assertEquals(1, len(mirrors)) mirror = mirrors[0] self.assertEquals(mirror["arches"], ["i386", "amd64", "blah"]) mods = stages.Modules(initer) (which_ran, failures) = mods.run_section("cloud_init_modules") self.assertTrue(len(failures) == 0) self.assertTrue(os.path.exists("/etc/blah.ini")) self.assertIn("write-files", which_ran) contents = util.load_file("/etc/blah.ini") self.assertEquals(contents, "blah")
def setUp(self): super(TestMain, self).setUp() self.new_root = self.tmp_dir() self.cloud_dir = self.tmp_path('var/lib/cloud/', dir=self.new_root) os.makedirs(self.cloud_dir) self.replicateTestRoot('simple_ubuntu', self.new_root) self.cfg = { 'datasource_list': ['None'], 'runcmd': ['ls /etc'], # test ALL_DISTROS 'system_info': {'paths': {'cloud_dir': self.cloud_dir, 'run_dir': self.new_root}}, 'write_files': [ { 'path': '/etc/blah.ini', 'content': 'blah', 'permissions': 0o755, }, ], 'cloud_init_modules': ['write-files', 'runcmd'], } cloud_cfg = yaml_dumps(self.cfg) ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) self.cloud_cfg_file = os.path.join( self.new_root, 'etc', 'cloud', 'cloud.cfg') write_file(self.cloud_cfg_file, cloud_cfg) self.patchOS(self.new_root) self.patchUtils(self.new_root) self.stderr = StringIO() self.patchStdoutAndStderr(stderr=self.stderr)
def test_cloud_config_archive(self): non_decodable = b'\x11\xc9\xb4gTH\xee\x12' data = [{'content': '#cloud-config\npassword: gocubs\n'}, {'content': '#cloud-config\nlocale: chicago\n'}, {'content': non_decodable}] message = b'#cloud-config-archive\n' + util.yaml_dumps(data).encode() ci = stages.Init() ci.datasource = FakeDataSource(message) fs = {} def fsstore(filename, content, mode=0o0644, omode="wb"): fs[filename] = content # consuming the user-data provided should write 'cloud_config' file # which will have our yaml in it. with mock.patch('cloudinit.util.write_file') as mockobj: mockobj.side_effect = fsstore ci.fetch() ci.consume_data() cfg = util.load_yaml(fs[ci.paths.get_ipath("cloud_config")]) self.assertEqual(cfg.get('password'), 'gocubs') self.assertEqual(cfg.get('locale'), 'chicago')
def test_none_ds_run_with_no_config_modules(self): """run_section will report no modules run when none are configured.""" # re-write cloud.cfg with unverified_modules override cfg = copy.deepcopy(self.cfg) # Represent empty configuration in /etc/cloud/cloud.cfg cfg['cloud_init_modules'] = None cloud_cfg = util.yaml_dumps(cfg) util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) util.write_file( os.path.join(self.new_root, 'etc', 'cloud', 'cloud.cfg'), cloud_cfg) initer = stages.Init() initer.read_cfg() initer.initialize() initer.fetch() initer.instancify() initer.update() initer.cloudify().run('consume_data', initer.consume_data, args=[PER_INSTANCE], freq=PER_INSTANCE) mods = stages.Modules(initer) (which_ran, failures) = mods.run_section('cloud_init_modules') self.assertTrue(len(failures) == 0) self.assertEqual([], which_ran)
def setUp(self): super(TestSimpleRun, self).setUp() self.new_root = self.tmp_dir() self.replicateTestRoot('simple_ubuntu', self.new_root) # Seed cloud.cfg file for our tests self.cfg = { 'datasource_list': ['None'], 'runcmd': ['ls /etc'], # test ALL_DISTROS 'spacewalk': {}, # test non-ubuntu distros module definition 'system_info': { 'paths': { 'run_dir': self.new_root } }, 'write_files': [ { 'path': '/etc/blah.ini', 'content': 'blah', 'permissions': 0o755, }, ], 'cloud_init_modules': ['write-files', 'spacewalk', 'runcmd'], } cloud_cfg = util.yaml_dumps(self.cfg) util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) util.write_file( os.path.join(self.new_root, 'etc', 'cloud', 'cloud.cfg'), cloud_cfg) self.patchOS(self.new_root) self.patchUtils(self.new_root)
def handle_part(self, _data, ctype, filename, payload, frequency): if ctype == TAUPAGE_AMI_CONFIG_MIME_TYPE: LOG.info( "Got Taupage AMI configuration; merging with {config}".format( config=TAUPAGE_CONFIG)) LOG.debug("Parsing given input...") config_new = util.load_yaml(payload) LOG.debug("Loading existing configuration...") config_yaml = util.read_file_or_url(TAUPAGE_CONFIG) config_old = util.load_yaml(config_yaml) LOG.debug("Merging configurations...") config_merged = dict(config_old.items() + config_new.items()) LOG.debug("Storing merged configuration...") config_yaml = util.yaml_dumps(config_merged) util.write_file(TMP_TAUPAGE_CONFIG, config_yaml, 0o444) LOG.debug("Comparing current configuration with the old one...") subprocess.call( ['diff', '-u0', TAUPAGE_CONFIG, TMP_TAUPAGE_CONFIG]) LOG.debug("Moving the new configuration into place...") shutil.move(TMP_TAUPAGE_CONFIG, TAUPAGE_CONFIG)
def test_none_ds_run_with_no_config_modules(self): """run_section will report no modules run when none are configured.""" # re-write cloud.cfg with unverified_modules override cfg = copy.deepcopy(self.cfg) # Represent empty configuration in /etc/cloud/cloud.cfg cfg['cloud_init_modules'] = None cloud_cfg = util.yaml_dumps(cfg) util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) util.write_file(os.path.join(self.new_root, 'etc', 'cloud', 'cloud.cfg'), cloud_cfg) initer = stages.Init() initer.read_cfg() initer.initialize() initer.fetch() initer.instancify() initer.update() initer.cloudify().run('consume_data', initer.consume_data, args=[PER_INSTANCE], freq=PER_INSTANCE) mods = stages.Modules(initer) (which_ran, failures) = mods.run_section('cloud_init_modules') self.assertTrue(len(failures) == 0) self.assertEqual([], which_ran)
def setUp(self): super(TestMain, self).setUp() self.new_root = self.tmp_dir() self.cloud_dir = self.tmp_path('var/lib/cloud/', dir=self.new_root) os.makedirs(self.cloud_dir) self.replicateTestRoot('simple_ubuntu', self.new_root) self.cfg = { 'datasource_list': ['None'], 'runcmd': ['ls /etc'], # test ALL_DISTROS 'system_info': { 'paths': { 'cloud_dir': self.cloud_dir, 'run_dir': self.new_root } }, 'write_files': [ { 'path': '/etc/blah.ini', 'content': 'blah', 'permissions': 0o755, }, ], 'cloud_init_modules': ['write-files', 'runcmd'], } cloud_cfg = yaml_dumps(self.cfg) ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) self.cloud_cfg_file = os.path.join(self.new_root, 'etc', 'cloud', 'cloud.cfg') write_file(self.cloud_cfg_file, cloud_cfg) self.patchOS(self.new_root) self.patchUtils(self.new_root) self.stderr = StringIO() self.patchStdoutAndStderr(stderr=self.stderr)
def test_cloud_config_archive(self): non_decodable = b'\x11\xc9\xb4gTH\xee\x12' data = [{'content': '#cloud-config\npassword: gocubs\n'}, {'content': '#cloud-config\nlocale: chicago\n'}, {'content': non_decodable}] message = b'#cloud-config-archive\n' + util.yaml_dumps(data).encode() self.reRoot() ci = stages.Init() ci.datasource = FakeDataSource(message) fs = {} def fsstore(filename, content, mode=0o0644, omode="wb"): fs[filename] = content # consuming the user-data provided should write 'cloud_config' file # which will have our yaml in it. with mock.patch('cloudinit.util.write_file') as mockobj: mockobj.side_effect = fsstore ci.fetch() ci.consume_data() cfg = util.load_yaml(fs[ci.paths.get_ipath("cloud_config")]) self.assertEqual(cfg.get('password'), 'gocubs') self.assertEqual(cfg.get('locale'), 'chicago')
def test_none_ds_forces_run_via_unverified_modules(self): """run_section forced skipped modules by using unverified_modules.""" # re-write cloud.cfg with unverified_modules override cfg = copy.deepcopy(self.cfg) cfg['unverified_modules'] = ['spacewalk'] # Would have skipped cloud_cfg = util.yaml_dumps(cfg) util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) util.write_file( os.path.join(self.new_root, 'etc', 'cloud', 'cloud.cfg'), cloud_cfg) initer = stages.Init() initer.read_cfg() initer.initialize() initer.fetch() initer.instancify() initer.update() initer.cloudify().run('consume_data', initer.consume_data, args=[PER_INSTANCE], freq=PER_INSTANCE) mods = stages.Modules(initer) (which_ran, failures) = mods.run_section('cloud_init_modules') self.assertTrue(len(failures) == 0) self.assertIn('spacewalk', which_ran) self.assertIn("running unverified_modules: 'spacewalk'", self.logs.getvalue())
def test_none_ds_forces_run_via_unverified_modules(self): """run_section forced skipped modules by using unverified_modules.""" # re-write cloud.cfg with unverified_modules override cfg = copy.deepcopy(self.cfg) cfg['unverified_modules'] = ['spacewalk'] # Would have skipped cloud_cfg = util.yaml_dumps(cfg) util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) util.write_file(os.path.join(self.new_root, 'etc', 'cloud', 'cloud.cfg'), cloud_cfg) initer = stages.Init() initer.read_cfg() initer.initialize() initer.fetch() initer.instancify() initer.update() initer.cloudify().run('consume_data', initer.consume_data, args=[PER_INSTANCE], freq=PER_INSTANCE) mods = stages.Modules(initer) (which_ran, failures) = mods.run_section('cloud_init_modules') self.assertTrue(len(failures) == 0) self.assertIn('spacewalk', which_ran) self.assertIn( "running unverified_modules: 'spacewalk'", self.logs.getvalue())
def handle(name, cfg, cloud, log, _args): # If there isn't a salt key in the configuration don't do anything if 'salt_minion' not in cfg: log.debug(("Skipping module named %s," " no 'salt_minion' key in configuration"), name) return salt_cfg = cfg['salt_minion'] # Start by installing the salt package ... cloud.distro.install_packages(('salt-minion', )) # Ensure we can configure files at the right dir config_dir = salt_cfg.get("config_dir", '/etc/salt') util.ensure_dir(config_dir) # ... and then update the salt configuration if 'conf' in salt_cfg: # Add all sections from the conf object to /etc/salt/minion minion_config = os.path.join(config_dir, 'minion') minion_data = util.yaml_dumps(salt_cfg.get('conf')) util.write_file(minion_config, minion_data) if 'grains' in salt_cfg: # add grains to /etc/salt/grains grains_config = os.path.join(config_dir, 'grains') grains_data = util.yaml_dumps(salt_cfg.get('grains')) util.write_file(grains_config, grains_data) # ... copy the key pair if specified if 'public_key' in salt_cfg and 'private_key' in salt_cfg: if os.path.isdir("/etc/salt/pki/minion"): pki_dir_default = "/etc/salt/pki/minion" else: pki_dir_default = "/etc/salt/pki" pki_dir = salt_cfg.get('pki_dir', pki_dir_default) with util.umask(0o77): util.ensure_dir(pki_dir) pub_name = os.path.join(pki_dir, 'minion.pub') pem_name = os.path.join(pki_dir, 'minion.pem') util.write_file(pub_name, salt_cfg['public_key']) util.write_file(pem_name, salt_cfg['private_key']) # restart salt-minion. 'service' will start even if not started. if it # was started, it needs to be restarted for config change. util.subp(['service', 'salt-minion', 'restart'], capture=False)
def _render_section(name, section): if section: dump = util.yaml_dumps({name: section}, explicit_start=False, explicit_end=False) txt = util.indent(dump, ' ' * 4) return [txt] return []
def handle(name, cfg, cloud, log, args): verbose = util.get_cfg_by_path(cfg, ('debug', 'verbose'), default=True) if args: # if args are provided (from cmdline) then explicitly set verbose out_file = args[0] verbose = True else: out_file = util.get_cfg_by_path(cfg, ('debug', 'output')) if not verbose: log.debug(("Skipping module named %s," " verbose printing disabled"), name) return # Clean out some keys that we just don't care about showing... dump_cfg = copy.deepcopy(cfg) for k in ['log_cfgs']: dump_cfg.pop(k, None) all_keys = list(dump_cfg.keys()) for k in all_keys: if k.startswith("_"): dump_cfg.pop(k, None) # Now dump it... to_print = StringIO() to_print.write(_make_header("Config")) to_print.write(util.yaml_dumps(dump_cfg)) to_print.write("\n") to_print.write(_make_header("MetaData")) to_print.write(util.yaml_dumps(cloud.datasource.metadata)) to_print.write("\n") to_print.write(_make_header("Misc")) to_print.write("Datasource: %s\n" % (type_utils.obj_name(cloud.datasource))) to_print.write("Distro: %s\n" % (type_utils.obj_name(cloud.distro))) to_print.write("Hostname: %s\n" % (cloud.get_hostname(True))) to_print.write("Instance ID: %s\n" % (cloud.get_instance_id())) to_print.write("Locale: %s\n" % (cloud.get_locale())) to_print.write("Launch IDX: %s\n" % (cloud.launch_index)) contents = to_print.getvalue() content_to_file = [] for line in contents.splitlines(): line = "ci-info: %s\n" % (line) content_to_file.append(line) if out_file: util.write_file(out_file, "".join(content_to_file), 0644, "w") else: util.multi_log("".join(content_to_file), console=True, stderr=False)
def _render_section(name, section): if section: dump = util.yaml_dumps({name: section}, explicit_start=False, explicit_end=False, noalias=True) txt = util.indent(dump, ' ' * 4) return [txt] return []
def test_main_init_run_net_calls_set_hostname_when_metadata_present(self): """When local-hostname metadata is present, call cc_set_hostname.""" self.cfg['datasource'] = { 'None': {'metadata': {'local-hostname': 'md-hostname'}}} cloud_cfg = yaml_dumps(self.cfg) write_file(self.cloud_cfg_file, cloud_cfg) cmdargs = myargs( debug=False, files=None, force=False, local=False, reporter=None, subcommand='init') def set_hostname(name, cfg, cloud, log, args): self.assertEqual('set-hostname', name) updated_cfg = copy.deepcopy(self.cfg) updated_cfg.update( {'def_log_file': '/var/log/cloud-init.log', 'log_cfgs': [], 'syslog_fix_perms': [ 'syslog:adm', 'root:adm', 'root:wheel', 'root:root' ], 'vendor_data': {'enabled': True, 'prefix': []}}) updated_cfg.pop('system_info') self.assertEqual(updated_cfg, cfg) self.assertEqual(main.LOG, log) self.assertIsNone(args) (_item1, item2) = wrap_and_call( 'cloudinit.cmd.main', {'util.close_stdin': True, 'netinfo.debug_info': 'my net debug info', 'cc_set_hostname.handle': {'side_effect': set_hostname}, 'util.fixup_output': ('outfmt', 'errfmt')}, main.main_init, 'init', cmdargs) self.assertEqual([], item2) # Instancify is called instance_id_path = 'var/lib/cloud/data/instance-id' self.assertEqual( 'iid-datasource-none\n', os.path.join(load_file( os.path.join(self.new_root, instance_id_path)))) # modules are run (including write_files) self.assertEqual( 'blah', load_file(os.path.join(self.new_root, 'etc/blah.ini'))) expected_logs = [ 'network config is disabled by fallback', # apply_network_config 'my net debug info', # netinfo.debug_info 'no previous run detected' ] for log in expected_logs: self.assertIn(log, self.stderr.getvalue())
def test_main_init_run_net_calls_set_hostname_when_metadata_present(self): """When local-hostname metadata is present, call cc_set_hostname.""" self.cfg['datasource'] = { 'None': {'metadata': {'local-hostname': 'md-hostname'}}} cloud_cfg = yaml_dumps(self.cfg) write_file(self.cloud_cfg_file, cloud_cfg) cmdargs = myargs( debug=False, files=None, force=False, local=False, reporter=None, subcommand='init') def set_hostname(name, cfg, cloud, log, args): self.assertEqual('set-hostname', name) updated_cfg = copy.deepcopy(self.cfg) updated_cfg.update( {'def_log_file': '/var/log/cloud-init.log', 'log_cfgs': [], 'syslog_fix_perms': ['syslog:adm', 'root:adm', 'root:wheel'], 'vendor_data': {'enabled': True, 'prefix': []}}) updated_cfg.pop('system_info') self.assertEqual(updated_cfg, cfg) self.assertEqual(main.LOG, log) self.assertIsNone(args) (item1, item2) = wrap_and_call( 'cloudinit.cmd.main', {'util.close_stdin': True, 'netinfo.debug_info': 'my net debug info', 'cc_set_hostname.handle': {'side_effect': set_hostname}, 'util.fixup_output': ('outfmt', 'errfmt')}, main.main_init, 'init', cmdargs) self.assertEqual([], item2) # Instancify is called instance_id_path = 'var/lib/cloud/data/instance-id' self.assertEqual( 'iid-datasource-none\n', os.path.join(load_file( os.path.join(self.new_root, instance_id_path)))) # modules are run (including write_files) self.assertEqual( 'blah', load_file(os.path.join(self.new_root, 'etc/blah.ini'))) expected_logs = [ 'network config is disabled by fallback', # apply_network_config 'my net debug info', # netinfo.debug_info 'no previous run detected' ] for log in expected_logs: self.assertIn(log, self.stderr.getvalue())
def test_none_ds(self): new_root = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, new_root) self.replicateTestRoot('simple_ubuntu', new_root) cfg = { 'datasource_list': ['None'], 'write_files': [ { 'path': '/etc/blah.ini', 'content': 'blah', 'permissions': 0o755, }, ], 'cloud_init_modules': ['write-files'], } cloud_cfg = util.yaml_dumps(cfg) util.ensure_dir(os.path.join(new_root, 'etc', 'cloud')) util.write_file(os.path.join(new_root, 'etc', 'cloud', 'cloud.cfg'), cloud_cfg) self._patchIn(new_root) # Now start verifying whats created initer = stages.Init() initer.read_cfg() initer.initialize() self.assertTrue(os.path.exists("/var/lib/cloud")) for d in ['scripts', 'seed', 'instances', 'handlers', 'sem', 'data']: self.assertTrue(os.path.isdir(os.path.join("/var/lib/cloud", d))) initer.fetch() iid = initer.instancify() self.assertEqual(iid, 'iid-datasource-none') initer.update() self.assertTrue(os.path.islink("var/lib/cloud/instance")) initer.cloudify().run('consume_data', initer.consume_data, args=[PER_INSTANCE], freq=PER_INSTANCE) mods = stages.Modules(initer) (which_ran, failures) = mods.run_section('cloud_init_modules') self.assertTrue(len(failures) == 0) self.assertTrue(os.path.exists('/etc/blah.ini')) self.assertIn('write-files', which_ran) contents = util.load_file('/etc/blah.ini') self.assertEqual(contents, 'blah')
def handle_part(self, _data, ctype, filename, payload, frequency): if ctype == TAUPAGE_AMI_CONFIG_MIME_TYPE: LOG.info("Got Taupage AMI configuration; merging with {config}".format(config=TAUPAGE_CONFIG)) LOG.debug("Parsing given input...") config_new = util.load_yaml(payload) LOG.debug("Loading existing configuration...") config_yaml = util.read_file_or_url(TAUPAGE_CONFIG) config_old = util.load_yaml(config_yaml) LOG.debug("Merging configurations...") config_merged = dict(config_old.items() + config_new.items()) LOG.debug("Storing merged configuration...") config_yaml = util.yaml_dumps(config_merged) util.write_file(TAUPAGE_CONFIG, config_yaml, 0o444)
def test_none_ds(self): new_root = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, new_root) self.replicateTestRoot('simple_ubuntu', new_root) cfg = { 'datasource_list': ['None'], 'cloud_init_modules': ['write-files'], 'system_info': { 'paths': { 'run_dir': new_root } } } ud = self.readResource('user_data.1.txt') cloud_cfg = util.yaml_dumps(cfg) util.ensure_dir(os.path.join(new_root, 'etc', 'cloud')) util.write_file(os.path.join(new_root, 'etc', 'cloud', 'cloud.cfg'), cloud_cfg) self._patchIn(new_root) # Now start verifying whats created initer = stages.Init() initer.read_cfg() initer.initialize() initer.fetch() initer.datasource.userdata_raw = ud initer.instancify() initer.update() initer.cloudify().run('consume_data', initer.consume_data, args=[PER_INSTANCE], freq=PER_INSTANCE) mirrors = initer.distro.get_option('package_mirrors') self.assertEqual(1, len(mirrors)) mirror = mirrors[0] self.assertEqual(mirror['arches'], ['i386', 'amd64', 'blah']) mods = stages.Modules(initer) (which_ran, failures) = mods.run_section('cloud_init_modules') self.assertTrue(len(failures) == 0) self.assertTrue(os.path.exists('/etc/blah.ini')) self.assertIn('write-files', which_ran) contents = util.load_file('/etc/blah.ini') self.assertEqual(contents, 'blah')
def _write_cloud_config(self): if not self.cloud_fn: return # Capture which files we merged from... file_lines = [] if self.file_names: file_lines.append("# from %s files" % (len(self.file_names))) for fn in self.file_names: if not fn: fn = "?" file_lines.append("# %s" % (fn)) file_lines.append("") if self.cloud_buf is not None: # Something was actually gathered.... lines = [CLOUD_PREFIX, ""] lines.extend(file_lines) lines.append(util.yaml_dumps(self.cloud_buf)) else: lines = [] util.write_file(self.cloud_fn, "\n".join(lines), 0o600)
def handle(name, cfg, cloud, log, _args): # If there isn't a salt key in the configuration don't do anything if 'salt_minion' not in cfg: log.debug(("Skipping module named %s," " no 'salt_minion' key in configuration"), name) return salt_cfg = cfg['salt_minion'] # Start by installing the salt package ... cloud.distro.install_packages(('salt-minion',)) # Ensure we can configure files at the right dir config_dir = salt_cfg.get("config_dir", '/etc/salt') util.ensure_dir(config_dir) # ... and then update the salt configuration if 'conf' in salt_cfg: # Add all sections from the conf object to /etc/salt/minion minion_config = os.path.join(config_dir, 'minion') minion_data = util.yaml_dumps(salt_cfg.get('conf')) util.write_file(minion_config, minion_data) # ... copy the key pair if specified if 'public_key' in salt_cfg and 'private_key' in salt_cfg: if os.path.isdir("/etc/salt/pki/minion"): pki_dir_default = "/etc/salt/pki/minion" else: pki_dir_default = "/etc/salt/pki" pki_dir = salt_cfg.get('pki_dir', pki_dir_default) with util.umask(0o77): util.ensure_dir(pki_dir) pub_name = os.path.join(pki_dir, 'minion.pub') pem_name = os.path.join(pki_dir, 'minion.pem') util.write_file(pub_name, salt_cfg['public_key']) util.write_file(pem_name, salt_cfg['private_key']) # restart salt-minion. 'service' will start even if not started. if it # was started, it needs to be restarted for config change. util.subp(['service', 'salt-minion', 'restart'], capture=False)
def test_none_ds(self): new_root = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, new_root) self.replicateTestRoot('simple_ubuntu', new_root) cfg = { 'datasource_list': ['None'], 'cloud_init_modules': ['write-files'], 'system_info': {'paths': {'run_dir': new_root}} } ud = helpers.readResource('user_data.1.txt') cloud_cfg = util.yaml_dumps(cfg) util.ensure_dir(os.path.join(new_root, 'etc', 'cloud')) util.write_file(os.path.join(new_root, 'etc', 'cloud', 'cloud.cfg'), cloud_cfg) self._patchIn(new_root) # Now start verifying whats created initer = stages.Init() initer.read_cfg() initer.initialize() initer.fetch() initer.datasource.userdata_raw = ud initer.instancify() initer.update() initer.cloudify().run('consume_data', initer.consume_data, args=[PER_INSTANCE], freq=PER_INSTANCE) mirrors = initer.distro.get_option('package_mirrors') self.assertEqual(1, len(mirrors)) mirror = mirrors[0] self.assertEqual(mirror['arches'], ['i386', 'amd64', 'blah']) mods = stages.Modules(initer) (which_ran, failures) = mods.run_section('cloud_init_modules') self.assertTrue(len(failures) == 0) self.assertTrue(os.path.exists('/etc/blah.ini')) self.assertIn('write-files', which_ran) contents = util.load_file('/etc/blah.ini') self.assertEqual(contents, 'blah')
def handle_part(self, _data, ctype, filename, payload, frequency): if ctype == TAUPAGE_AMI_CONFIG_MIME_TYPE: LOG.info("Got Taupage AMI configuration; merging with {config}".format(config=TAUPAGE_CONFIG)) LOG.debug("Parsing given input...") config_new = util.load_yaml(payload) LOG.debug("Loading existing configuration...") config_yaml = util.read_file_or_url(TAUPAGE_CONFIG) config_old = util.load_yaml(config_yaml) LOG.debug("Merging configurations...") config_merged = dict(config_old.items() + config_new.items()) LOG.debug("Storing merged configuration...") config_yaml = util.yaml_dumps(config_merged) util.write_file(TMP_TAUPAGE_CONFIG, config_yaml, 0o444) LOG.debug("Comparing current configuration with the old one...") subprocess.call(['diff', '-u0', TAUPAGE_CONFIG, TMP_TAUPAGE_CONFIG]) LOG.debug("Moving the new configuration into place...") shutil.move(TMP_TAUPAGE_CONFIG, TAUPAGE_CONFIG)
def _write_cloud_config(self): if not self.cloud_fn: return # Capture which files we merged from... file_lines = [] if self.file_names: file_lines.append("# from %s files" % (len(self.file_names))) for fn in self.file_names: if not fn: fn = '?' file_lines.append("# %s" % (fn)) file_lines.append("") if self.cloud_buf is not None: # Something was actually gathered.... lines = [ CLOUD_PREFIX, '', ] lines.extend(file_lines) lines.append(util.yaml_dumps(self.cloud_buf)) else: lines = [] util.write_file(self.cloud_fn, "\n".join(lines), 0o600)
def render_snap_op(op, name, path=None, cfgfile=None, config=None): if op not in ('install', 'config'): raise ValueError("cannot render op '%s'" % op) shortname = name.partition(NAMESPACE_DELIM)[0] try: cfg_tmpf = None if config is not None: # input to 'snappy config packagename' must have nested data. odd. # config: # packagename: # config # Note, however, we do not touch config files on disk. nested_cfg = {'config': {shortname: config}} (fd, cfg_tmpf) = tempfile.mkstemp() os.write(fd, util.yaml_dumps(nested_cfg).encode()) os.close(fd) cfgfile = cfg_tmpf cmd = [SNAPPY_CMD, op] if op == 'install': if path: cmd.append("--allow-unauthenticated") cmd.append(path) else: cmd.append(name) if cfgfile: cmd.append(cfgfile) elif op == 'config': cmd += [name, cfgfile] util.subp(cmd) finally: if cfg_tmpf: os.unlink(cfg_tmpf)
def _render_content(self, network_state): # if content already in netplan format, pass it back if network_state.version == 2: LOG.debug('V2 to V2 passthrough') return util.yaml_dumps({'network': network_state.config}, explicit_start=False, explicit_end=False) ethernets = {} wifis = {} bridges = {} bonds = {} vlans = {} content = [] interfaces = network_state._network_state.get('interfaces', []) nameservers = network_state.dns_nameservers searchdomains = network_state.dns_searchdomains for config in network_state.iter_interfaces(): ifname = config.get('name') # filter None (but not False) entries up front ifcfg = dict((key, value) for (key, value) in config.items() if value is not None) if_type = ifcfg.get('type') if if_type == 'physical': # required_keys = ['name', 'mac_address'] eth = { 'set-name': ifname, 'match': ifcfg.get('match', None), } if eth['match'] is None: macaddr = ifcfg.get('mac_address', None) if macaddr is not None: eth['match'] = {'macaddress': macaddr.lower()} else: del eth['match'] del eth['set-name'] _extract_addresses(ifcfg, eth, ifname) ethernets.update({ifname: eth}) elif if_type == 'bond': # required_keys = ['name', 'bond_interfaces'] bond = {} bond_config = {} # extract bond params and drop the bond_ prefix as it's # redundent in v2 yaml format v2_bond_map = NET_CONFIG_TO_V2.get('bond') for match in ['bond_', 'bond-']: bond_params = _get_params_dict_by_match(ifcfg, match) for (param, value) in bond_params.items(): newname = v2_bond_map.get(param.replace('_', '-')) if newname is None: continue bond_config.update({newname: value}) if len(bond_config) > 0: bond.update({'parameters': bond_config}) if ifcfg.get('mac_address'): bond['macaddress'] = ifcfg.get('mac_address').lower() slave_interfaces = ifcfg.get('bond-slaves') if slave_interfaces == 'none': _extract_bond_slaves_by_name(interfaces, bond, ifname) _extract_addresses(ifcfg, bond, ifname) bonds.update({ifname: bond}) elif if_type == 'bridge': # required_keys = ['name', 'bridge_ports'] ports = sorted(copy.copy(ifcfg.get('bridge_ports'))) bridge = { 'interfaces': ports, } # extract bridge params and drop the bridge prefix as it's # redundent in v2 yaml format match_prefix = 'bridge_' params = _get_params_dict_by_match(ifcfg, match_prefix) br_config = {} # v2 yaml uses different names for the keys # and at least one value format change v2_bridge_map = NET_CONFIG_TO_V2.get('bridge') for (param, value) in params.items(): newname = v2_bridge_map.get(param) if newname is None: continue br_config.update({newname: value}) if newname in ['path-cost', 'port-priority']: # <interface> <value> -> <interface>: int(<value>) newvalue = {} for val in value: (port, portval) = val.split() newvalue[port] = int(portval) br_config.update({newname: newvalue}) if len(br_config) > 0: bridge.update({'parameters': br_config}) if ifcfg.get('mac_address'): bridge['macaddress'] = ifcfg.get('mac_address').lower() _extract_addresses(ifcfg, bridge, ifname) bridges.update({ifname: bridge}) elif if_type == 'vlan': # required_keys = ['name', 'vlan_id', 'vlan-raw-device'] vlan = { 'id': ifcfg.get('vlan_id'), 'link': ifcfg.get('vlan-raw-device') } macaddr = ifcfg.get('mac_address', None) if macaddr is not None: vlan['macaddress'] = macaddr.lower() _extract_addresses(ifcfg, vlan, ifname) vlans.update({ifname: vlan}) # inject global nameserver values under each all interface which # has addresses and do not already have a DNS configuration if nameservers or searchdomains: nscfg = {'addresses': nameservers, 'search': searchdomains} for section in [ethernets, wifis, bonds, bridges, vlans]: for _name, cfg in section.items(): if 'nameservers' in cfg or 'addresses' not in cfg: continue cfg.update({'nameservers': nscfg}) # workaround yaml dictionary key sorting when dumping def _render_section(name, section): if section: dump = util.yaml_dumps({name: section}, explicit_start=False, explicit_end=False, noalias=True) txt = util.indent(dump, ' ' * 4) return [txt] return [] content.append("network:\n version: 2\n") content += _render_section('ethernets', ethernets) content += _render_section('wifis', wifis) content += _render_section('bonds', bonds) content += _render_section('bridges', bridges) content += _render_section('vlans', vlans) return "".join(content)
def dump_network_state(self): return util.yaml_dumps(self._network_state)
def test_none_ds(self): new_root = self.makeDir() self.replicateTestRoot('simple_ubuntu', new_root) cfg = { 'datasource_list': ['None'], 'write_files': [ { 'path': '/etc/blah.ini', 'content': 'blah', 'permissions': 0755, }, ], 'cloud_init_modules': ['write-files'], } cloud_cfg = util.yaml_dumps(cfg) util.ensure_dir(os.path.join(new_root, 'etc', 'cloud')) util.write_file(os.path.join(new_root, 'etc', 'cloud', 'cloud.cfg'), cloud_cfg) self._patchIn(new_root) # Now start verifying whats created initer = stages.Init() initer.read_cfg() initer.initialize() self.assertTrue(os.path.exists("/var/lib/cloud")) for d in ['scripts', 'seed', 'instances', 'handlers', 'sem', 'data']: self.assertTrue(os.path.isdir(os.path.join("/var/lib/cloud", d))) initer.fetch() iid = initer.instancify()
def _render_content(self, network_state): # if content already in netplan format, pass it back if network_state.version == 2: LOG.debug('V2 to V2 passthrough') return util.yaml_dumps({'network': network_state.config}, explicit_start=False, explicit_end=False) ethernets = {} wifis = {} bridges = {} bonds = {} vlans = {} content = [] interfaces = network_state._network_state.get('interfaces', []) nameservers = network_state.dns_nameservers searchdomains = network_state.dns_searchdomains for config in network_state.iter_interfaces(): ifname = config.get('name') # filter None (but not False) entries up front ifcfg = dict((key, value) for (key, value) in config.items() if value is not None) if_type = ifcfg.get('type') if if_type == 'physical': # required_keys = ['name', 'mac_address'] eth = { 'set-name': ifname, 'match': ifcfg.get('match', None), } if eth['match'] is None: macaddr = ifcfg.get('mac_address', None) if macaddr is not None: eth['match'] = {'macaddress': macaddr.lower()} else: del eth['match'] del eth['set-name'] _extract_addresses(ifcfg, eth, ifname) ethernets.update({ifname: eth}) elif if_type == 'bond': # required_keys = ['name', 'bond_interfaces'] bond = {} bond_config = {} # extract bond params and drop the bond_ prefix as it's # redundent in v2 yaml format v2_bond_map = NET_CONFIG_TO_V2.get('bond') for match in ['bond_', 'bond-']: bond_params = _get_params_dict_by_match(ifcfg, match) for (param, value) in bond_params.items(): newname = v2_bond_map.get(param.replace('_', '-')) if newname is None: continue bond_config.update({newname: value}) if len(bond_config) > 0: bond.update({'parameters': bond_config}) if ifcfg.get('mac_address'): bond['macaddress'] = ifcfg.get('mac_address').lower() slave_interfaces = ifcfg.get('bond-slaves') if slave_interfaces == 'none': _extract_bond_slaves_by_name(interfaces, bond, ifname) _extract_addresses(ifcfg, bond, ifname) bonds.update({ifname: bond}) elif if_type == 'bridge': # required_keys = ['name', 'bridge_ports'] ports = sorted(copy.copy(ifcfg.get('bridge_ports'))) bridge = { 'interfaces': ports, } # extract bridge params and drop the bridge prefix as it's # redundent in v2 yaml format match_prefix = 'bridge_' params = _get_params_dict_by_match(ifcfg, match_prefix) br_config = {} # v2 yaml uses different names for the keys # and at least one value format change v2_bridge_map = NET_CONFIG_TO_V2.get('bridge') for (param, value) in params.items(): newname = v2_bridge_map.get(param) if newname is None: continue br_config.update({newname: value}) if newname in ['path-cost', 'port-priority']: # <interface> <value> -> <interface>: int(<value>) newvalue = {} for val in value: (port, portval) = val.split() newvalue[port] = int(portval) br_config.update({newname: newvalue}) if len(br_config) > 0: bridge.update({'parameters': br_config}) if ifcfg.get('mac_address'): bridge['macaddress'] = ifcfg.get('mac_address').lower() _extract_addresses(ifcfg, bridge, ifname) bridges.update({ifname: bridge}) elif if_type == 'vlan': # required_keys = ['name', 'vlan_id', 'vlan-raw-device'] vlan = { 'id': ifcfg.get('vlan_id'), 'link': ifcfg.get('vlan-raw-device') } macaddr = ifcfg.get('mac_address', None) if macaddr is not None: vlan['macaddress'] = macaddr.lower() _extract_addresses(ifcfg, vlan, ifname) vlans.update({ifname: vlan}) # inject global nameserver values under each all interface which # has addresses and do not already have a DNS configuration if nameservers or searchdomains: nscfg = {'addresses': nameservers, 'search': searchdomains} for section in [ethernets, wifis, bonds, bridges, vlans]: for _name, cfg in section.items(): if 'nameservers' in cfg or 'addresses' not in cfg: continue cfg.update({'nameservers': nscfg}) # workaround yaml dictionary key sorting when dumping def _render_section(name, section): if section: dump = util.yaml_dumps({name: section}, explicit_start=False, explicit_end=False) txt = util.indent(dump, ' ' * 4) return [txt] return [] content.append("network:\n version: 2\n") content += _render_section('ethernets', ethernets) content += _render_section('wifis', wifis) content += _render_section('bonds', bonds) content += _render_section('bridges', bridges) content += _render_section('vlans', vlans) return "".join(content)
def _dumps(obj): text = util.yaml_dumps(obj, explicit_start=False, explicit_end=False) return text.rstrip()