def test_interface_has_own_mac_true_when_not_stolen(self): """Return False from interface_has_own_mac when mac isn't stolen.""" valid_assign_types = ['0', '1', '3'] assign_path = os.path.join(self.sysdir, 'eth1', 'addr_assign_type') for _type in valid_assign_types: write_file(assign_path, _type) self.assertTrue(net.interface_has_own_mac('eth1'))
def test_is_vlan(self): """is_vlan is True when /sys/net/devname/uevent has DEVTYPE=vlan.""" ensure_file(os.path.join(self.sysdir, 'eth0', 'uevent')) self.assertFalse(net.is_vlan('eth0')) content = 'junk\nDEVTYPE=vlan\njunk\n' write_file(os.path.join(self.sysdir, 'eth0', 'uevent'), content) self.assertTrue(net.is_vlan('eth0'))
def test_is_connected_when_wireless_and_carrier_active(self): """is_connected is True if wireless /sys/net/devname/carrier is 1.""" self.assertFalse(net.is_connected('eth0')) ensure_file(os.path.join(self.sysdir, 'eth0', 'wireless')) self.assertFalse(net.is_connected('eth0')) write_file(os.path.join(self.sysdir, 'eth0', 'carrier'), "1") self.assertTrue(net.is_connected('eth0'))
def test_none_ds(self): new_root = self.makeDir() self.replicateTestRoot("simple_ubuntu", new_root) cfg = {"datasource_list": ["None"], "cloud_init_modules": ["write-files"]} ud = self.readResource("user_data.1.txt") cloud_cfg = util.yaml_dumps(cfg) util.ensure_dir(os.path.join(new_root, "etc", "cloud")) util.write_file(os.path.join(new_root, "etc", "cloud", "cloud.cfg"), cloud_cfg) self._patchIn(new_root) # Now start verifying whats created initer = stages.Init() initer.read_cfg() initer.initialize() initer.fetch() initer.datasource.userdata_raw = ud _iid = initer.instancify() initer.update() initer.cloudify().run("consume_data", initer.consume_data, args=[PER_INSTANCE], freq=PER_INSTANCE) mirrors = initer.distro.get_option("package_mirrors") self.assertEquals(1, len(mirrors)) mirror = mirrors[0] self.assertEquals(mirror["arches"], ["i386", "amd64", "blah"]) mods = stages.Modules(initer) (which_ran, failures) = mods.run_section("cloud_init_modules") self.assertTrue(len(failures) == 0) self.assertTrue(os.path.exists("/etc/blah.ini")) self.assertIn("write-files", which_ran) contents = util.load_file("/etc/blah.ini") self.assertEquals(contents, "blah")
def write_testcase_config(args, fmt_args, testcase_file): """Write the testcase config file.""" testcase_config = {'enabled': args.enable, 'collect_scripts': {}} if args.config: testcase_config['cloud_config'] = args.config fmt_args['config'] = util.yaml_format(testcase_config) c_util.write_file(testcase_file, _config_fmt.format(**fmt_args), omode='w')
def test_rc_local_exists(self): """ This test is designed to verify the different scenarios associated with the presence of rclocal. """ # test when rc local does not exist postCust = PostCustomScript("test-cust", self.tmpDir) with mock.patch.object(CustomScriptConstant, "RC_LOCAL", "/no/path"): rclocal = postCust.find_rc_local() self.assertEqual("", rclocal) # test when rc local exists rclocalFile = self.tmp_path("vmware-rclocal", self.tmpDir) util.write_file(rclocalFile, "# Run post-reboot guest customization", omode="w") with mock.patch.object(CustomScriptConstant, "RC_LOCAL", rclocalFile): rclocal = postCust.find_rc_local() self.assertEqual(rclocalFile, rclocal) self.assertTrue(postCust.has_previous_agent, rclocal) # test when rc local is a symlink rclocalLink = self.tmp_path("dummy-rclocal-link", self.tmpDir) util.sym_link(rclocalFile, rclocalLink, True) with mock.patch.object(CustomScriptConstant, "RC_LOCAL", rclocalLink): rclocal = postCust.find_rc_local() self.assertEqual(rclocalFile, rclocal)
def test_prepare_custom_script(self): """ This test is designed to verify the behavior based on the presence of custom script. Mainly needed for scenario where a custom script is expected, but was not properly copied. "CustomScriptNotFound" exception is raised in such cases. """ # Custom script does not exist. preCust = PreCustomScript("random-vmw-test", self.tmpDir) self.assertEqual("random-vmw-test", preCust.scriptname) self.assertEqual(self.tmpDir, preCust.directory) self.assertEqual(self.tmp_path("random-vmw-test", self.tmpDir), preCust.scriptpath) with self.assertRaises(CustomScriptNotFound): preCust.prepare_script() # Custom script exists. custScript = self.tmp_path("test-cust", self.tmpDir) util.write_file(custScript, "test-CR-strip/r/r") postCust = PostCustomScript("test-cust", self.tmpDir) self.assertEqual("test-cust", postCust.scriptname) self.assertEqual(self.tmpDir, postCust.directory) self.assertEqual(custScript, postCust.scriptpath) self.assertFalse(postCust.postreboot) postCust.prepare_script() # Check if all carraige returns are stripped from script. self.assertFalse("/r" in custScript)
def _install_post_reboot_agent(self, rclocal): """ Install post-reboot agent for running custom script after reboot. As part of this process, we are editing the rclocal file to run a VMware script, which in turn is resposible for handling the user script. @param: path to rc local. """ LOG.debug("Installing post-reboot customization from %s to %s", self.directory, rclocal) if not self.has_previous_agent(rclocal): LOG.info("Adding post-reboot customization agent to rc.local") new_content = dedent(""" # Run post-reboot guest customization /bin/sh %s exit 0 """) % CustomScriptConstant.POST_CUST_RUN_SCRIPT existing_rclocal = util.load_file(rclocal).replace('exit 0\n', '') st = os.stat(rclocal) # "x" flag should be set mode = st.st_mode | stat.S_IEXEC util.write_file(rclocal, existing_rclocal + new_content, mode) else: # We don't need to update rclocal file everytime a customization # is requested. It just needs to be done for the first time. LOG.info("Post-reboot guest customization agent is already " "registered in rc.local") LOG.debug("Installing post-reboot customization agent finished: %s", self.postreboot)
def get_data(self): found = None md = {} ud = "" defaults = {"instance-id": DEFAULT_IID, "dsmode": "pass"} if os.path.isdir(self.seeddir): try: (md, ud) = read_config_drive_dir(self.seeddir) found = self.seeddir except nonConfigDriveDir: pass if not found: dev = cfg_drive_device() if dev: try: (md, ud) = util.mount_callback_umount(dev, read_config_drive_dir) found = dev except (nonConfigDriveDir, util.mountFailedError): pass if not found: return False if 'dsconfig' in md: self.cfg = md['dscfg'] md = util.mergedict(md, defaults) # update interfaces and ifup only on the local datasource # this way the DataSourceConfigDriveNet doesn't do it also. if 'network-interfaces' in md and self.dsmode == "local": if md['dsmode'] == "pass": log.info("updating network interfaces from configdrive") else: log.debug("updating network interfaces from configdrive") util.write_file("/etc/network/interfaces", md['network-interfaces']) try: (out, err) = util.subp(['ifup', '--all']) if len(out) or len(err): log.warn("ifup --all had stderr: %s" % err) except subprocess.CalledProcessError as exc: log.warn("ifup --all failed: %s" % (exc.output[1])) self.seed = found self.metadata = md self.userdata_raw = ud if md['dsmode'] == self.dsmode: return True log.debug("%s: not claiming datasource, dsmode=%s" % (self, md['dsmode'])) return False
def _write_network(self, settings): entries = net_util.translate_network(settings) nameservers = [] searchdomains = [] dev_names = entries.keys() for (device, info) in entries.items(): # Skip the loopback interface. if device.startswith('lo'): continue dev = self.getnetifname(device) LOG.info('Configuring interface %s', dev) if info.get('bootproto') == 'static': LOG.debug('Configuring dev %s with %s / %s', dev, info.get('address'), info.get('netmask')) # Configure an ipv4 address. ifconfig = (info.get('address') + ' netmask ' + info.get('netmask')) # Configure the gateway. self.updatercconf('defaultrouter', info.get('gateway')) if 'dns-nameservers' in info: nameservers.extend(info['dns-nameservers']) if 'dns-search' in info: searchdomains.extend(info['dns-search']) else: ifconfig = 'DHCP' self.updatercconf('ifconfig_' + dev, ifconfig) # Try to read the /etc/resolv.conf or just start from scratch if that # fails. try: resolvconf = ResolvConf(util.load_file(self.resolv_conf_fn)) resolvconf.parse() except IOError: util.logexc(LOG, "Failed to parse %s, use new empty file", self.resolv_conf_fn) resolvconf = ResolvConf('') resolvconf.parse() # Add some nameservers for server in nameservers: try: resolvconf.add_nameserver(server) except ValueError: util.logexc(LOG, "Failed to add nameserver %s", server) # And add any searchdomains. for domain in searchdomains: try: resolvconf.add_search_domain(domain) except ValueError: util.logexc(LOG, "Failed to add search domain %s", domain) util.write_file(self.resolv_conf_fn, str(resolvconf), 0o644) return dev_names
def test_main_init_run_net_stops_on_file_no_net(self): """When no-net file is present, main_init does not process modules.""" stop_file = os.path.join(self.cloud_dir, 'data', 'no-net') # stop file write_file(stop_file, '') cmdargs = myargs( debug=False, files=None, force=False, local=False, reporter=None, subcommand='init') (_item1, item2) = wrap_and_call( 'cloudinit.cmd.main', {'util.close_stdin': True, 'netinfo.debug_info': 'my net debug info', 'util.fixup_output': ('outfmt', 'errfmt')}, main.main_init, 'init', cmdargs) # We should not run write_files module self.assertFalse( os.path.exists(os.path.join(self.new_root, 'etc/blah.ini')), 'Unexpected run of write_files module produced blah.ini') self.assertEqual([], item2) # Instancify is called instance_id_path = 'var/lib/cloud/data/instance-id' self.assertFalse( os.path.exists(os.path.join(self.new_root, instance_id_path)), 'Unexpected call to datasource.instancify produced instance-id') expected_logs = [ "Exiting. stop file ['{stop_file}'] existed\n".format( stop_file=stop_file), 'my net debug info' # netinfo.debug_info ] for log in expected_logs: self.assertIn(log, self.stderr.getvalue())
def handle(_name, cfg, cloud, log, _args): """ Basically turn a top level 'landscape' entry with a 'client' dict and render it to ConfigObj format under '[client]' section in /etc/landscape/client.conf """ ls_cloudcfg = cfg.get("landscape", {}) if not isinstance(ls_cloudcfg, (dict)): raise RuntimeError(("'landscape' key existed in config," " but not a dictionary type," " is a %s instead"), type_utils.obj_name(ls_cloudcfg)) if not ls_cloudcfg: return cloud.distro.install_packages(('landscape-client',)) merge_data = [ LSC_BUILTIN_CFG, LSC_CLIENT_CFG_FILE, ls_cloudcfg, ] merged = merge_together(merge_data) contents = StringIO() merged.write(contents) util.ensure_dir(os.path.dirname(LSC_CLIENT_CFG_FILE)) util.write_file(LSC_CLIENT_CFG_FILE, contents.getvalue()) log.debug("Wrote landscape config file to %s", LSC_CLIENT_CFG_FILE) util.write_file(LS_DEFAULT_FILE, "RUN=1\n") util.subp(["service", "landscape-client", "restart"])
def write_sudo_rules(self, user, rules, sudo_file=None): if not sudo_file: sudo_file = self.ci_sudoers_fn lines = ["", "# User rules for %s" % user] if isinstance(rules, (list, tuple)): for rule in rules: lines.append("%s %s" % (user, rule)) elif isinstance(rules, (basestring, str)): lines.append("%s %s" % (user, rules)) else: msg = "Can not create sudoers rule addition with type %r" raise TypeError(msg % (type_utils.obj_name(rules))) content = "\n".join(lines) content += "\n" # trailing newline self.ensure_sudo_dir(os.path.dirname(sudo_file)) if not os.path.exists(sudo_file): contents = [util.make_header(), content] try: util.write_file(sudo_file, "\n".join(contents), 0440) except IOError as e: util.logexc(LOG, "Failed to write sudoers file %s", sudo_file) raise e else: try: util.append_file(sudo_file, content) except IOError as e: util.logexc(LOG, "Failed to append sudoers file %s", sudo_file) raise e
def write_files(datadir, files, dirmode=None): def _redact_password(cnt, fname): """Azure provides the UserPassword in plain text. So we redact it""" try: root = ET.fromstring(cnt) for elem in root.iter(): if ('UserPassword' in elem.tag and elem.text != DEF_PASSWD_REDACTION): elem.text = DEF_PASSWD_REDACTION return ET.tostring(root) except Exception: LOG.critical("failed to redact userpassword in {}".format(fname)) return cnt if not datadir: return if not files: files = {} util.ensure_dir(datadir, dirmode) for (name, content) in files.items(): fname = os.path.join(datadir, name) if 'ovf-env.xml' in name: content = _redact_password(content, fname) util.write_file(filename=fname, content=content, mode=0o600)
def test_proxy_replaced(self): util.write_file(self.cfile, "content doesnt matter") cc_apt_configure.apply_apt_config({'apt_proxy': "foo"}, self.pfile, self.cfile) self.assertTrue(os.path.isfile(self.pfile)) contents = load_tfile_or_url(self.pfile) self.assertTrue(self._search_apt_config(contents, "http", "foo"))
def test_handle_args_returns_standardized_vars_to_top_level_aliases(self): """Any standardized vars under v# are promoted as top-level aliases.""" write_file( self.instance_data, '{"v1": {"v1_1": "val1.1"}, "v2": {"v2_2": "val2.2"},' ' "top": "gun"}') expected = dedent("""\ { "top": "gun", "userdata": "<redacted for non-root user> file:ud", "v1": { "v1_1": "val1.1" }, "v1_1": "val1.1", "v2": { "v2_2": "val2.2" }, "v2_2": "val2.2", "vendordata": "<redacted for non-root user> file:vd" } """) args = self.args( debug=False, dump_all=True, format=None, instance_data=self.instance_data, user_data='ud', vendor_data='vd', list_keys=False, varname=None) with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: self.assertEqual(0, query.handle_args('anyname', args)) self.assertEqual(expected, m_stdout.getvalue())
def test_existing_config_is_saved(self): cfg = {'loglevel': 'warn'} util.write_file(self.server_cfg, STOCK_CONFIG) cc_mcollective.configure(config=cfg, server_cfg=self.server_cfg) self.assertTrue(os.path.exists(self.server_cfg)) self.assertTrue(os.path.exists(self.server_cfg + ".old")) self.assertEqual(util.load_file(self.server_cfg + ".old"), STOCK_CONFIG)
def setUp(self): super(TestMain, self).setUp() self.new_root = self.tmp_dir() self.cloud_dir = self.tmp_path('var/lib/cloud/', dir=self.new_root) os.makedirs(self.cloud_dir) self.replicateTestRoot('simple_ubuntu', self.new_root) self.cfg = { 'datasource_list': ['None'], 'runcmd': ['ls /etc'], # test ALL_DISTROS 'system_info': {'paths': {'cloud_dir': self.cloud_dir, 'run_dir': self.new_root}}, 'write_files': [ { 'path': '/etc/blah.ini', 'content': 'blah', 'permissions': 0o755, }, ], 'cloud_init_modules': ['write-files', 'runcmd'], } cloud_cfg = yaml_dumps(self.cfg) ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) self.cloud_cfg_file = os.path.join( self.new_root, 'etc', 'cloud', 'cloud.cfg') write_file(self.cloud_cfg_file, cloud_cfg) self.patchOS(self.new_root) self.patchUtils(self.new_root) self.stderr = StringIO() self.patchStdoutAndStderr(stderr=self.stderr)
def set_etc_timezone(tz, tz_file=None, tz_conf="/etc/timezone", tz_local="/etc/localtime"): util.write_file(tz_conf, str(tz).rstrip() + "\n") # This ensures that the correct tz will be used for the system if tz_local and tz_file: util.copy(tz_file, tz_local) return
def install_chef(cloud, chef_cfg, log): # If chef is not installed, we install chef based on 'install_type' install_type = util.get_cfg_option_str(chef_cfg, 'install_type', 'packages') run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False) if install_type == "gems": # This will install and run the chef-client from gems chef_version = util.get_cfg_option_str(chef_cfg, 'version', None) ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version', RUBY_VERSION_DEFAULT) install_chef_from_gems(cloud.distro, ruby_version, chef_version) # Retain backwards compat, by preferring True instead of False # when not provided/overriden... run = util.get_cfg_option_bool(chef_cfg, 'exec', default=True) elif install_type == 'packages': # This will install and run the chef-client from packages cloud.distro.install_packages(('chef',)) elif install_type == 'omnibus': # This will install as a omnibus unified package url = util.get_cfg_option_str(chef_cfg, "omnibus_url", OMNIBUS_URL) retries = max(0, util.get_cfg_option_int(chef_cfg, "omnibus_url_retries", default=OMNIBUS_URL_RETRIES)) content = url_helper.readurl(url=url, retries=retries) with util.tempdir() as tmpd: # Use tmpdir over tmpfile to avoid 'text file busy' on execute tmpf = "%s/chef-omnibus-install" % tmpd util.write_file(tmpf, str(content), mode=0700) util.subp([tmpf], capture=False) else: log.warn("Unknown chef install type '%s'", install_type) run = False return run
def apply_locale(self, locale, out_fn=None): # Adjust the locals value to the new value newconf = StringIO() for line in util.load_file(self.login_conf_fn).splitlines(): newconf.write(re.sub(r'^default:', r'default:lang=%s:' % locale, line)) newconf.write("\n") # Make a backup of login.conf. util.copy(self.login_conf_fn, self.login_conf_fn_bak) # And write the new login.conf. util.write_file(self.login_conf_fn, newconf.getvalue()) try: LOG.debug("Running cap_mkdb for %s", locale) util.subp(['cap_mkdb', self.login_conf_fn]) except util.ProcessExecutionError: # cap_mkdb failed, so restore the backup. util.logexc(LOG, "Failed to apply locale %s", locale) try: util.copy(self.login_conf_fn_bak, self.login_conf_fn) except IOError: util.logexc(LOG, "Failed to restore %s backup", self.login_conf_fn)
def generate_sources_list(cfg, release, mirrors, cloud): """generate_sources_list create a source.list file based on a custom or default template by replacing mirrors and release in the template""" aptsrc = "/etc/apt/sources.list" params = {'RELEASE': release, 'codename': release} for k in mirrors: params[k] = mirrors[k] params[k.lower()] = mirrors[k] tmpl = cfg.get('sources_list', None) if tmpl is None: LOG.info("No custom template provided, fall back to builtin") template_fn = cloud.get_template_filename('sources.list.%s' % (cloud.distro.name)) if not template_fn: template_fn = cloud.get_template_filename('sources.list') if not template_fn: LOG.warn("No template found, not rendering /etc/apt/sources.list") return tmpl = util.load_file(template_fn) rendered = templater.render_string(tmpl, params) disabled = disable_suites(cfg.get('disable_suites'), rendered, release) util.write_file(aptsrc, disabled, mode=0o644)
def setup_user_keys(keys, user, key_prefix, log=None): import pwd saved_umask = os.umask(077) pwent = pwd.getpwnam(user) ssh_dir = '%s/.ssh' % pwent.pw_dir if not os.path.exists(ssh_dir): os.mkdir(ssh_dir) os.chown(ssh_dir, pwent.pw_uid, pwent.pw_gid) try: ssh_cfg = parse_ssh_config() akeys = ssh_cfg.get("AuthorizedKeysFile", "%h/.ssh/authorized_keys") akeys = akeys.replace("%h", pwent.pw_dir) akeys = akeys.replace("%u", user) authorized_keys = akeys except Exception: authorized_keys = '%s/.ssh/authorized_keys' % pwent.pw_dir if log: util.logexc(log) key_entries = [] for k in keys: ke = AuthKeyEntry(k, def_opt=key_prefix) key_entries.append(ke) content = update_authorized_keys(authorized_keys, key_entries) util.write_file(authorized_keys, content, 0600) os.chown(authorized_keys, pwent.pw_uid, pwent.pw_gid) util.restorecon_if_possible(ssh_dir, recursive=True) os.umask(saved_umask)
def _write_part(self, payload, filename): filename = util.clean_filename(filename) filepath = os.path.join(self.boothook_dir, filename) contents = util.strip_prefix_suffix(util.dos2unix(payload), prefix=self.prefixes[0]) util.write_file(filepath, contents.lstrip(), 0o700) return filepath
def test_unrecognized(self): '''Failure Test module get_data() forcing unrecognized.''' util.write_file(self.cloud_info_file, 'unrecognized') dsrc = dsac.DataSourceAltCloud({}, None, self.paths) with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file): self.assertEqual(False, dsrc.get_data())
def configure(self, osfamily=None): """ Configure the /etc/network/interfaces Make a back up of the original """ if not osfamily or osfamily != "debian": logger.info("Debian OS not detected. Skipping the configure step") return containingDir = '/etc/network' interfaceFile = os.path.join(containingDir, 'interfaces') originalFile = os.path.join(containingDir, 'interfaces.before_vmware_customization') if not os.path.exists(originalFile) and os.path.exists(interfaceFile): os.rename(interfaceFile, originalFile) lines = [ "# DO NOT EDIT THIS FILE BY HAND --" " AUTOMATICALLY GENERATED BY cloud-init", "source /etc/network/interfaces.d/*.cfg", ] util.write_file(interfaceFile, content='\n'.join(lines)) self.clear_dhcp()
def add_sources(srclist, template_params=None, aa_repo_match=None): """ add entries in /etc/apt/sources.list.d for each abbreviated sources.list entry in 'srclist'. When rendering template, also include the values in dictionary searchList """ if template_params is None: template_params = {} if aa_repo_match is None: aa_repo_match = lambda f: False errorlist = [] for ent in srclist: if 'source' not in ent: errorlist.append(["", "missing source"]) continue source = ent['source'] source = templater.render_string(source, template_params) if aa_repo_match(source): try: util.subp(["add-apt-repository", source]) except util.ProcessExecutionError as e: errorlist.append([source, ("add-apt-repository failed. " + str(e))]) continue if 'filename' not in ent: ent['filename'] = 'cloud_config_sources.list' if not ent['filename'].startswith("/"): ent['filename'] = os.path.join("/etc/apt/sources.list.d/", ent['filename']) if ('keyid' in ent and 'key' not in ent): ks = "keyserver.ubuntu.com" if 'keyserver' in ent: ks = ent['keyserver'] try: ent['key'] = getkeybyid(ent['keyid'], ks) except: errorlist.append([source, "failed to get key from %s" % ks]) continue if 'key' in ent: try: util.subp(('apt-key', 'add', '-'), ent['key']) except: errorlist.append([source, "failed add key"]) try: contents = "%s\n" % (source) util.write_file(ent['filename'], contents, omode="ab") except: errorlist.append([source, "failed write to file %s" % ent['filename']]) return errorlist
def _write_network(self, settings): entries = net_util.translate_network(settings) LOG.debug("Translated ubuntu style network settings %s into %s", settings, entries) dev_names = entries.keys() # Format for netctl for (dev, info) in entries.items(): nameservers = [] net_fn = self.network_conf_dir + dev net_cfg = { 'Connection': 'ethernet', 'Interface': dev, 'IP': info.get('bootproto'), 'Address': "('%s/%s')" % (info.get('address'), info.get('netmask')), 'Gateway': info.get('gateway'), 'DNS': str(tuple(info.get('dns-nameservers'))).replace(',', '') } util.write_file(net_fn, convert_netctl(net_cfg)) if info.get('auto'): self._enable_interface(dev) if 'dns-nameservers' in info: nameservers.extend(info['dns-nameservers']) if nameservers: util.write_file(self.resolve_conf_fn, convert_resolv_conf(nameservers)) return dev_names
def on_first_boot(data, distro=None): """Performs any first-boot actions using data read from a config-drive.""" if not isinstance(data, dict): raise TypeError("Config-drive data expected to be a dict; not %s" % (type(data))) networkapplied = False jsonnet_conf = data.get('vendordata', {}).get('network_info') if jsonnet_conf: try: LOG.debug("Updating network interfaces from JSON in config drive") distro_user_config = distro.apply_network_json(jsonnet_conf) networkapplied = True except NotImplementedError: LOG.debug( "Distro does not implement networking setup via Vendor JSON.") pass net_conf = data.get("network_config", '') if networkapplied is False and net_conf and distro: LOG.debug("Updating network interfaces from config drive") distro.apply_network(net_conf) files = data.get('files', {}) if files: LOG.debug("Writing %s injected files", len(files)) for (filename, content) in files.items(): if not filename.startswith(os.sep): filename = os.sep + filename try: util.write_file(filename, content, mode=0o660) except IOError: util.logexc(LOG, "Failed writing file: %s", filename)
def handle_part(self, data, ctype, filename, payload, frequency): if ctype in handlers.CONTENT_SIGNALS: return # See: https://bugs.launchpad.net/bugs/819507 if frequency != PER_INSTANCE: return if not self.upstart_dir: return filename = util.clean_filename(filename) (_name, ext) = os.path.splitext(filename) if not ext: ext = '' ext = ext.lower() if ext != ".conf": filename = filename + ".conf" payload = util.dos2unix(payload) path = os.path.join(self.upstart_dir, filename) util.write_file(path, payload, 0o644) if SUITABLE_UPSTART: util.subp(["initctl", "reload-configuration"], capture=False)
def handle(_name, cfg, cloud, log, args): msg_in = '' if len(args) != 0: msg_in = str(args[0]) else: msg_in = util.get_cfg_option_str(cfg, "final_message", "") msg_in = msg_in.strip() if not msg_in: msg_in = FINAL_MESSAGE_DEF uptime = util.uptime() ts = util.time_rfc2822() cver = version.version_string() try: subs = { 'uptime': uptime, 'timestamp': ts, 'version': cver, 'datasource': str(cloud.datasource), } subs.update(dict([(k.upper(), v) for k, v in subs.items()])) util.multi_log("%s\n" % (templater.render_string(msg_in, subs)), console=False, stderr=True, log=log) except Exception: util.logexc(log, "Failed to render final message template") boot_fin_fn = cloud.paths.boot_finished try: contents = "%s - %s - v. %s\n" % (uptime, ts, cver) util.write_file(boot_fin_fn, contents) except Exception: util.logexc(log, "Failed to write boot finished file %s", boot_fin_fn) if cloud.datasource.is_disconnected: log.warning("Used fallback datasource")
def test_execute_post_cust(self): """ This test is designed to verify the behavior after execute post customization. """ # Prepare the customize package postCustRun = self.tmp_path("post-customize-guest.sh", self.tmpDir) util.write_file(postCustRun, "This is the script to run post cust") userScript = self.tmp_path("test-cust", self.tmpDir) util.write_file(userScript, "This is the post cust script") # Mock the cc_scripts_per_instance dir and marker file. # Create another tmp dir for cc_scripts_per_instance. ccScriptDir = self.tmp_dir() ccScript = os.path.join(ccScriptDir, "post-customize-guest.sh") markerFile = os.path.join(self.tmpDir, ".markerFile") with mock.patch.object(CustomScriptConstant, "CUSTOM_TMP_DIR", self.execDir): with mock.patch.object(CustomScriptConstant, "CUSTOM_SCRIPT", self.execScript): with mock.patch.object(CustomScriptConstant, "POST_CUSTOM_PENDING_MARKER", markerFile): postCust = PostCustomScript("test-cust", self.tmpDir, ccScriptDir) postCust.execute() # Check cc_scripts_per_instance and marker file # are created. self.assertTrue(os.path.exists(ccScript)) with open(ccScript, "r") as f: content = f.read() self.assertEqual(content, "This is the script to run post cust") self.assertTrue(os.path.exists(markerFile))
def test_get_data_vmware_customization_enabled(self): """When cloud-init workflow for vmware is enabled via sys_cfg log a message. """ paths = Paths({"cloud_dir": self.tdir}) ds = self.datasource( sys_cfg={"disable_vmware_customization": False}, distro={}, paths=paths, ) conf_file = self.tmp_path("test-cust", self.tdir) conf_content = dedent( """\ [CUSTOM-SCRIPT] SCRIPT-NAME = test-script [MISC] MARKER-ID = 12345345 """ ) util.write_file(conf_file, conf_content) with mock.patch(MPATH + "get_tools_config", return_value="true"): with self.assertRaises(CustomScriptNotFound) as context: wrap_and_call( "cloudinit.sources.DataSourceOVF", { "dmi.read_dmi_data": "vmware", "util.del_dir": True, "search_file": self.tdir, "wait_for_imc_cfg_file": conf_file, "get_nics_to_enable": "", }, ds.get_data, ) customscript = self.tmp_path("test-script", self.tdir) self.assertIn( "Script %s not found!!" % customscript, str(context.exception) )
def _render_network(entries, target="/", conf_dir="etc/netctl", resolv_conf="etc/resolv.conf", enable_func=None): """Render the translate_network format into netctl files in target. Paths will be rendered under target. """ devs = [] nameservers = [] resolv_conf = subp.target_path(target, resolv_conf) conf_dir = subp.target_path(target, conf_dir) for (dev, info) in entries.items(): if dev == 'lo': # no configuration should be rendered for 'lo' continue devs.append(dev) net_fn = os.path.join(conf_dir, dev) net_cfg = { 'Connection': 'ethernet', 'Interface': dev, 'IP': info.get('bootproto'), 'Address': "%s/%s" % (info.get('address'), info.get('netmask')), 'Gateway': info.get('gateway'), 'DNS': info.get('dns-nameservers', []), } util.write_file(net_fn, convert_netctl(net_cfg)) if enable_func and info.get('auto'): enable_func(dev) if 'dns-nameservers' in info: nameservers.extend(info['dns-nameservers']) if nameservers: util.write_file(resolv_conf, convert_resolv_conf(nameservers)) return devs
def apply_rsyslog_changes(configs, def_fname, cfg_dir): # apply the changes in 'configs' to the paths in def_fname and cfg_dir # return a list of the files changed files = [] for cur_pos, ent in enumerate(configs): if isinstance(ent, dict): if "content" not in ent: LOG.warn("No 'content' entry in config entry %s", cur_pos + 1) continue content = ent['content'] filename = ent.get("filename", def_fname) else: content = ent filename = def_fname filename = filename.strip() if not filename: LOG.warn("Entry %s has an empty filename", cur_pos + 1) continue filename = os.path.join(cfg_dir, filename) # Truncate filename first time you see it omode = "ab" if filename not in files: omode = "wb" files.append(filename) try: endl = "" if not content.endswith("\n"): endl = "\n" util.write_file(filename, content + endl, omode=omode) except Exception: util.logexc(LOG, "Failed to write to %s", filename) return files
def test_none_ds(self): new_root = self.makeDir() self.replicateTestRoot('simple_ubuntu', new_root) cfg = { 'datasource_list': ['None'], 'cloud_init_modules': ['write-files'], } ud = self.readResource('user_data.1.txt') cloud_cfg = util.yaml_dumps(cfg) util.ensure_dir(os.path.join(new_root, 'etc', 'cloud')) util.write_file(os.path.join(new_root, 'etc', 'cloud', 'cloud.cfg'), cloud_cfg) self._patchIn(new_root) # Now start verifying whats created initer = stages.Init() initer.read_cfg() initer.initialize() initer.fetch() initer.datasource.userdata_raw = ud _iid = initer.instancify() initer.update() initer.cloudify().run('consume_data', initer.consume_data, args=[PER_INSTANCE], freq=PER_INSTANCE) mirrors = initer.distro.get_option('package_mirrors') self.assertEquals(1, len(mirrors)) mirror = mirrors[0] self.assertEquals(mirror['arches'], ['i386', 'amd64', 'blah']) mods = stages.Modules(initer) (which_ran, failures) = mods.run_section('cloud_init_modules') self.assertTrue(len(failures) == 0) self.assertTrue(os.path.exists('/etc/blah.ini')) self.assertIn('write-files', which_ran) contents = util.load_file('/etc/blah.ini') self.assertEquals(contents, 'blah')
def handle(name, cfg, cloud, log, _args): # If there isn't a salt key in the configuration don't do anything if 'salt_minion' not in cfg: log.debug(("Skipping module named %s," " no 'salt_minion' key in configuration"), name) return salt_cfg = cfg['salt_minion'] # Start by installing the salt package ... cloud.distro.install_packages(('salt-minion', )) # Ensure we can configure files at the right dir config_dir = salt_cfg.get("config_dir", '/etc/salt') util.ensure_dir(config_dir) # ... and then update the salt configuration if 'conf' in salt_cfg: # Add all sections from the conf object to /etc/salt/minion minion_config = os.path.join(config_dir, 'minion') minion_data = util.yaml_dumps(salt_cfg.get('conf')) util.write_file(minion_config, minion_data) if 'grains' in salt_cfg: # add grains to /etc/salt/grains grains_config = os.path.join(config_dir, 'grains') grains_data = util.yaml_dumps(salt_cfg.get('grains')) util.write_file(grains_config, grains_data) # ... copy the key pair if specified if 'public_key' in salt_cfg and 'private_key' in salt_cfg: if os.path.isdir("/etc/salt/pki/minion"): pki_dir_default = "/etc/salt/pki/minion" else: pki_dir_default = "/etc/salt/pki" pki_dir = salt_cfg.get('pki_dir', pki_dir_default) with util.umask(0o77): util.ensure_dir(pki_dir) pub_name = os.path.join(pki_dir, 'minion.pub') pem_name = os.path.join(pki_dir, 'minion.pem') util.write_file(pub_name, salt_cfg['public_key']) util.write_file(pem_name, salt_cfg['private_key']) # restart salt-minion. 'service' will start even if not started. if it # was started, it needs to be restarted for config change. util.subp(['service', 'salt-minion', 'restart'], capture=False)
def _write_interface_file(self, net_fn, net_cfg, route_entry): if not net_cfg['Name']: return content = "[Match]\n" content += "Name=%s\n" % (net_cfg['Name']) if 'MACAddress' in net_cfg: content += "MACAddress=%s\n" % (net_cfg['MACAddress']) content += "[Network]\n" if 'DHCP' in net_cfg and net_cfg['DHCP'] in {'yes', 'ipv4', 'ipv6'}: content += "DHCP=%s\n" % (net_cfg['DHCP']) else: if 'Address' in net_cfg: content += "Address=%s\n" % (net_cfg['Address']) if 'Gateway' in net_cfg: content += "Gateway=%s\n" % (net_cfg['Gateway']) if 'DHCP' in net_cfg and net_cfg['DHCP'] == 'no': content += "DHCP=%s\n" % (net_cfg['DHCP']) route_index = 0 found = True if route_entry: while found: route_name = 'routes.' + str(route_index) if route_name in route_entry: content += "[Route]\n" if len(route_entry[route_name]) != 2: continue content += "Gateway=%s\n" % ( route_entry[route_name][0]) content += "Destination=%s\n" % ( route_entry[route_name][1]) else: found = False route_index += 1 util.write_file(net_fn, content)
def _resolve_conf(self, settings, target=None): nameservers = settings.dns_nameservers searchdomains = settings.dns_searchdomains for interface in settings.iter_interfaces(): for subnet in interface.get("subnets", []): if 'dns_nameservers' in subnet: nameservers.extend(subnet['dns_nameservers']) if 'dns_search' in subnet: searchdomains.extend(subnet['dns_search']) # Try to read the /etc/resolv.conf or just start from scratch if that # fails. try: resolvconf = ResolvConf(util.load_file(subp.target_path( target, self.resolv_conf_fn))) resolvconf.parse() except IOError: util.logexc(LOG, "Failed to parse %s, use new empty file", subp.target_path(target, self.resolv_conf_fn)) resolvconf = ResolvConf('') resolvconf.parse() # Add some nameservers for server in nameservers: try: resolvconf.add_nameserver(server) except ValueError: util.logexc(LOG, "Failed to add nameserver %s", server) # And add any searchdomains. for domain in searchdomains: try: resolvconf.add_search_domain(domain) except ValueError: util.logexc(LOG, "Failed to add search domain %s", domain) util.write_file( subp.target_path(target, self.resolv_conf_fn), str(resolvconf), 0o644)
def push_log_to_kvp(file_name=CFG_BUILTIN['def_log_file']): """Push a portion of cloud-init.log file or the whole file to KVP based on the file size. If called more than once, it skips pushing the log file to KVP again.""" log_pushed_to_kvp = bool(os.path.isfile(LOG_PUSHED_TO_KVP_MARKER_FILE)) if log_pushed_to_kvp: report_diagnostic_event("cloud-init.log is already pushed to KVP") return LOG.debug("Dumping cloud-init.log file to KVP") try: with open(file_name, "rb") as f: f.seek(0, os.SEEK_END) seek_index = max(f.tell() - MAX_LOG_TO_KVP_LENGTH, 0) report_diagnostic_event( "Dumping last {} bytes of cloud-init.log file to KVP".format( f.tell() - seek_index)) f.seek(seek_index, os.SEEK_SET) report_compressed_event("cloud-init.log", f.read()) util.write_file(LOG_PUSHED_TO_KVP_MARKER_FILE, '') except Exception as ex: report_diagnostic_event("Exception when dumping log file: %s" % repr(ex))
def apply_apt_config(cfg, proxy_fname, config_fname): """apply_apt_config Applies any apt*proxy config from if specified """ # Set up any apt proxy cfgs = (('proxy', 'Acquire::http::Proxy "%s";'), ('http_proxy', 'Acquire::http::Proxy "%s";'), ('ftp_proxy', 'Acquire::ftp::Proxy "%s";'), ('https_proxy', 'Acquire::https::Proxy "%s";')) proxies = [fmt % cfg.get(name) for (name, fmt) in cfgs if cfg.get(name)] if len(proxies): LOG.debug("write apt proxy info to %s", proxy_fname) util.write_file(proxy_fname, '\n'.join(proxies) + '\n') elif os.path.isfile(proxy_fname): util.del_file(proxy_fname) LOG.debug("no apt proxy configured, removed %s", proxy_fname) if cfg.get('conf', None): LOG.debug("write apt config info to %s", config_fname) util.write_file(config_fname, cfg.get('conf')) elif os.path.isfile(config_fname): util.del_file(config_fname) LOG.debug("no apt config configured, removed %s", config_fname)
def test_config_from_cmdline_net_cfg(self): files = [] pairs = (('net-eth0.cfg', DHCP_CONTENT_1), ('net-eth1.cfg', STATIC_CONTENT_1)) macs = {'eth1': 'b8:ae:ed:75:ff:2b', 'eth0': 'b8:ae:ed:75:ff:2a'} dhcp = copy.deepcopy(DHCP_EXPECTED_1) dhcp['mac_address'] = macs['eth0'] static = copy.deepcopy(STATIC_EXPECTED_1) static['mac_address'] = macs['eth1'] expected = {'version': 1, 'config': [dhcp, static]} with util.tempdir() as tmpd: for fname, content in pairs: fp = os.path.join(tmpd, fname) files.append(fp) util.write_file(fp, content) found = cmdline.config_from_klibc_net_cfg(files=files, mac_addrs=macs) self.assertEqual(found, expected)
def add_assertions(assertions=None): """Import list of assertions. Import assertions by concatenating each assertion into a string separated by a '\n'. Write this string to a instance file and then invoke `snap ack /path/to/file` and check for errors. If snap exits 0, then all assertions are imported. """ if not assertions: assertions = [] if not isinstance(assertions, list): raise ValueError('assertion parameter was not a list: %s', assertions) snap_cmd = [SNAPPY_CMD, 'ack'] combined = "\n".join(assertions) if len(combined) == 0: raise ValueError("Assertion list is empty") for asrt in assertions: LOG.debug('Acking: %s', asrt.split('\n')[0:2]) util.write_file(ASSERTIONS_FILE, combined.encode('utf-8')) util.subp(snap_cmd + [ASSERTIONS_FILE], capture=True)
def _should_reprovision(self, ret): """Whether or not we should poll IMDS for reprovisioning data. Also sets a marker file to poll IMDS. The marker file is used for the following scenario: the VM boots into this polling loop, which we expect to be proceeding infinitely until the VM is picked. If for whatever reason the platform moves us to a new host (for instance a hardware issue), we need to keep polling. However, since the VM reports ready to the Fabric, we will not attach the ISO, thus cloud-init needs to have a way of knowing that it should jump back into the polling loop in order to retrieve the ovf_env.""" if not ret: return False (_md, _userdata_raw, cfg, _files) = ret path = REPROVISION_MARKER_FILE if (cfg.get('PreprovisionedVm') is True or os.path.isfile(path)): if not os.path.isfile(path): LOG.info("Creating a marker file to poll imds: %s", path) util.write_file(path, "{pid}: {time}\n".format( pid=os.getpid(), time=time())) return True return False
def configure(config, server_cfg=SERVER_CFG, pubcert_file=PUBCERT_FILE, pricert_file=PRICERT_FILE): # Read server.cfg (if it exists) values from the # original file in order to be able to mix the rest up. try: old_contents = util.load_file(server_cfg, quiet=False, decode=False) mcollective_config = ConfigObj(io.BytesIO(old_contents)) except IOError as e: if e.errno != errno.ENOENT: raise else: LOG.debug( "Did not find file %s (starting with an empty" " config)", server_cfg) mcollective_config = ConfigObj() for (cfg_name, cfg) in config.items(): if cfg_name == 'public-cert': util.write_file(pubcert_file, cfg, mode=0o644) mcollective_config['plugin.ssl_server_public'] = pubcert_file mcollective_config['securityprovider'] = 'ssl' elif cfg_name == 'private-cert': util.write_file(pricert_file, cfg, mode=0o600) mcollective_config['plugin.ssl_server_private'] = pricert_file mcollective_config['securityprovider'] = 'ssl' else: if isinstance(cfg, str): # Just set it in the 'main' section mcollective_config[cfg_name] = cfg elif isinstance(cfg, (dict)): # Iterate through the config items, create a section if # it is needed and then add/or create items as needed if cfg_name not in mcollective_config.sections: mcollective_config[cfg_name] = {} for (o, v) in cfg.items(): mcollective_config[cfg_name][o] = v else: # Otherwise just try to convert it to a string mcollective_config[cfg_name] = str(cfg) try: # We got all our config as wanted we'll copy # the previous server.cfg and overwrite the old with our new one util.copy(server_cfg, "%s.old" % (server_cfg)) except IOError as e: if e.errno == errno.ENOENT: # Doesn't exist to copy... pass else: raise # Now we got the whole (new) file, write to disk... contents = io.BytesIO() mcollective_config.write(contents) util.write_file(server_cfg, contents.getvalue(), mode=0o644)
def test_get_data_cloudinit_metadata_not_found(self): """Test metadata file can't be found.""" paths = Paths({"cloud_dir": self.tdir}) ds = self.datasource( sys_cfg={"disable_vmware_customization": True}, distro={}, paths=paths, ) # Prepare the conf file conf_file = self.tmp_path("test-cust", self.tdir) conf_content = dedent( """\ [CLOUDINIT] METADATA = test-meta """ ) util.write_file(conf_file, conf_content) # Don't prepare the meta data file with mock.patch( MPATH + "set_customization_status", return_value=("msg", b"") ): with self.assertRaises(FileNotFoundError) as context: wrap_and_call( "cloudinit.sources.DataSourceOVF", { "dmi.read_dmi_data": "vmware", "util.del_dir": True, "search_file": self.tdir, "wait_for_imc_cfg_file": conf_file, "get_nics_to_enable": "", }, ds.get_data, ) self.assertIn("is not found", str(context.exception))
def show_warning(name, cfg=None, sleep=None, mode=True, **kwargs): # kwargs are used for .format of the message. # sleep and mode are default values used if # cfg['warnings']['name'] is not present. if cfg is None: cfg = {} mode, sleep = _load_warn_cfg(cfg, name, mode=mode, sleep=sleep) if not mode: return msg = WARNINGS[name].format(**kwargs) msgwidth = 70 linewidth = msgwidth + 4 fmt = "# %%-%ds #" % msgwidth topline = "*" * linewidth + "\n" fmtlines = [] for line in msg.strip("\n").splitlines(): fmtlines.append(fmt % line) closeline = topline if sleep: sleepmsg = " [sleeping for %d seconds] " % sleep closeline = sleepmsg.center(linewidth, "*") + "\n" util.write_file( os.path.join(_get_warn_dir(cfg), name), topline + "\n".join(fmtlines) + "\n" + topline, ) LOG.warning("%s%s\n%s", topline, "\n".join(fmtlines), closeline) if sleep: LOG.debug("sleeping %d seconds for warning '%s'", sleep, name) time.sleep(sleep)
def test_dhcp_discovery_run_in_sandbox_waits_on_lease_and_pid(self, m_subp, m_wait, m_kill, m_getppid): """dhcp_discovery waits for the presence of pidfile and dhcp.leases.""" m_subp.return_value = ('', '') tmpdir = self.tmp_dir() dhclient_script = os.path.join(tmpdir, 'dhclient.orig') script_content = '#!/bin/bash\necho fake-dhclient' write_file(dhclient_script, script_content, mode=0o755) # Don't create pid or leases file pidfile = self.tmp_path('dhclient.pid', tmpdir) leasefile = self.tmp_path('dhcp.leases', tmpdir) m_wait.return_value = [pidfile] # Return the missing pidfile wait for m_getppid.return_value = 1 # Indicate that dhclient has daemonized self.assertEqual([], dhcp_discovery(dhclient_script, 'eth9', tmpdir)) self.assertEqual( mock.call([pidfile, leasefile], maxwait=5, naplen=0.01), m_wait.call_args_list[0]) self.assertIn( 'WARNING: dhclient did not produce expected files: dhclient.pid', self.logs.getvalue()) m_kill.assert_not_called()
def test_get_data_cust_script_enabled(self): """If custom script is enabled by VMware tools configuration, execute the script. """ paths = Paths({'cloud_dir': self.tdir}) ds = self.datasource(sys_cfg={'disable_vmware_customization': False}, distro={}, paths=paths) # Prepare the conf file conf_file = self.tmp_path('test-cust', self.tdir) conf_content = dedent("""\ [CUSTOM-SCRIPT] SCRIPT-NAME = test-script [MISC] MARKER-ID = 12345346 """) util.write_file(conf_file, conf_content) # Mock custom script is enabled by return true when calling # get_tools_config with mock.patch(MPATH + 'get_tools_config', return_value="true"): with mock.patch(MPATH + 'set_customization_status', return_value=('msg', b'')): with self.assertRaises(CustomScriptNotFound) as context: wrap_and_call( 'cloudinit.sources.DataSourceOVF', { 'dmi.read_dmi_data': 'vmware', 'util.del_dir': True, 'search_file': self.tdir, 'wait_for_imc_cfg_file': conf_file, 'get_nics_to_enable': '' }, ds.get_data) # Verify custom script is trying to be executed customscript = self.tmp_path('test-script', self.tdir) self.assertIn('Script %s not found!!' % customscript, str(context.exception))
def update_cert_config(distro_cfg): """ Update Certificate config file to add the file path managed cloud-init @param distro_cfg: A hash providing _distro_ca_certs_configs function. """ if distro_cfg["ca_cert_config"] is None: return if os.stat(distro_cfg["ca_cert_config"]).st_size == 0: # If the CA_CERT_CONFIG file is empty (i.e. all existing # CA certs have been deleted) then simply output a single # line with the cloud-init cert filename. out = "%s\n" % distro_cfg["ca_cert_filename"] else: # Append cert filename to CA_CERT_CONFIG file. # We have to strip the content because blank lines in the file # causes subsequent entries to be ignored. (LP: #1077020) orig = util.load_file(distro_cfg["ca_cert_config"]) cr_cont = "\n".join([ line for line in orig.splitlines() if line != distro_cfg["ca_cert_filename"] ]) out = "%s\n%s\n" % (cr_cont.rstrip(), distro_cfg["ca_cert_filename"]) util.write_file(distro_cfg["ca_cert_config"], out, omode="wb")
def test_validation_cert_with_system(self): # test validation_cert content is not written over system file tpl_file = util.load_file(CLIENT_TEMPL) self.patchUtils(self.tmp) self.patchOS(self.tmp) v_path = "/etc/chef/vkey.pem" v_cert = "system" expected_cert = "this is the system file certificate" cfg = { "chef": { "server_url": "localhost", "validation_name": "bob", "validation_key": v_path, "validation_cert": v_cert, }, } util.write_file("/etc/cloud/templates/chef_client.rb.tmpl", tpl_file) util.write_file(v_path, expected_cert) cc_chef.handle("chef", cfg, get_cloud(), LOG, []) content = util.load_file(cc_chef.CHEF_RB_PATH) self.assertIn(v_path, content) util.load_file(v_path) self.assertEqual(expected_cert, util.load_file(v_path))
def write_sudo_rules(self, user, rules, sudo_file=None): if not sudo_file: sudo_file = self.ci_sudoers_fn lines = [ '', "# User rules for %s" % user, ] if isinstance(rules, (list, tuple)): for rule in rules: lines.append("%s %s" % (user, rule)) elif isinstance(rules, str): lines.append("%s %s" % (user, rules)) else: msg = "Can not create sudoers rule addition with type %r" raise TypeError(msg % (type_utils.obj_name(rules))) content = "\n".join(lines) content += "\n" # trailing newline self.ensure_sudo_dir(os.path.dirname(sudo_file)) if not os.path.exists(sudo_file): contents = [ util.make_header(), content, ] try: util.write_file(sudo_file, "\n".join(contents), 0o440) except IOError as e: util.logexc(LOG, "Failed to write sudoers file %s", sudo_file) raise e else: try: util.append_file(sudo_file, content) except IOError as e: util.logexc(LOG, "Failed to append sudoers file %s", sudo_file) raise e
def walker_handle_handler(pdata, _ctype, _filename, payload): curcount = pdata["handlercount"] modname = PART_HANDLER_FN_TMPL % (curcount) frequency = pdata["frequency"] modfname = os.path.join(pdata["handlerdir"], "%s" % (modname)) if not modfname.endswith(".py"): modfname = "%s.py" % (modfname) # TODO(harlowja): Check if path exists?? util.write_file(modfname, payload, 0o600) handlers = pdata["handlers"] try: mod = fixup_handler(importer.import_module(modname)) call_begin(mod, pdata["data"], frequency) # Only register and increment after the above have worked, so we don't # register if it fails starting. handlers.register(mod, initialized=True) pdata["handlercount"] = curcount + 1 except Exception: util.logexc( LOG, "Failed at registering python file: %s (part handler %s)", modfname, curcount, )
def write_files(datadir, files, dirmode=None): def _redact_password(cnt, fname): """Azure provides the UserPassword in plain text. So we redact it""" try: root = ET.fromstring(cnt) for elem in root.iter(): if ('UserPassword' in elem.tag and elem.text != DEF_PASSWD_REDACTION): elem.text = DEF_PASSWD_REDACTION return ET.tostring(root) except Exception: LOG.critical("failed to redact userpassword in %s", fname) return cnt if not datadir: return if not files: files = {} util.ensure_dir(datadir, dirmode) for (name, content) in files.items(): fname = os.path.join(datadir, name) if 'ovf-env.xml' in name: content = _redact_password(content, fname) util.write_file(filename=fname, content=content, mode=0o600)
def render_network_state(self, network_state, templates=None, target=None): if not templates: templates = self.templates file_mode = 0o644 base_sysconf_dir = util.target_path(target, self.sysconf_dir) for path, data in self._render_sysconfig(base_sysconf_dir, network_state, templates=templates).items(): util.write_file(path, data, file_mode) if self.dns_path: dns_path = util.target_path(target, self.dns_path) resolv_content = self._render_dns(network_state, existing_dns_path=dns_path) util.write_file(dns_path, resolv_content, file_mode) if self.networkmanager_conf_path: nm_conf_path = util.target_path(target, self.networkmanager_conf_path) nm_conf_content = self._render_networkmanager_conf(network_state, templates) if nm_conf_content: util.write_file(nm_conf_path, nm_conf_content, file_mode) if self.netrules_path: netrules_content = self._render_persistent_net(network_state) netrules_path = util.target_path(target, self.netrules_path) util.write_file(netrules_path, netrules_content, file_mode) sysconfig_path = util.target_path(target, templates.get('control')) # Distros configuring /etc/sysconfig/network as a file e.g. Centos if sysconfig_path.endswith('network'): util.ensure_dir(os.path.dirname(sysconfig_path)) netcfg = [_make_header(), 'NETWORKING=yes'] if network_state.use_ipv6: netcfg.append('NETWORKING_IPV6=yes') netcfg.append('IPV6_AUTOCONF=no') util.write_file(sysconfig_path, "\n".join(netcfg) + "\n", file_mode)
def test_validation_cert_with_system(self): # test validation_cert content is not written over system file tpl_file = util.load_file('templates/chef_client.rb.tmpl') self.patchUtils(self.tmp) self.patchOS(self.tmp) v_path = '/etc/chef/vkey.pem' v_cert = "system" expected_cert = "this is the system file certificate" cfg = { 'chef': { 'server_url': 'localhost', 'validation_name': 'bob', 'validation_key': v_path, 'validation_cert': v_cert }, } util.write_file('/etc/cloud/templates/chef_client.rb.tmpl', tpl_file) util.write_file(v_path, expected_cert) cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, []) content = util.load_file(cc_chef.CHEF_RB_PATH) self.assertIn(v_path, content) util.load_file(v_path) self.assertEqual(expected_cert, util.load_file(v_path))
def test_get_interfaces_by_mac_skips_missing_mac(self): """Ignore interfaces without an address from get_interfaces_by_mac.""" write_file(os.path.join(self.sysdir, 'eth1', 'addr_assign_type'), '0') address_path = os.path.join(self.sysdir, 'eth1', 'address') self.assertFalse(os.path.exists(address_path)) mac = 'aa:bb:cc:aa:bb:cc' write_file(os.path.join(self.sysdir, 'eth2', 'addr_assign_type'), '0') write_file(os.path.join(self.sysdir, 'eth2', 'address'), mac) expected = [('eth2', 'aa:bb:cc:aa:bb:cc', None, None)] self.assertEqual(expected, net.get_interfaces())
def test_generate_fallback_finds_connected_eth_with_mac(self): """generate_fallback_config finds any connected device with a mac.""" write_file(os.path.join(self.sysdir, 'eth0', 'carrier'), '1') write_file(os.path.join(self.sysdir, 'eth1', 'carrier'), '1') mac = 'aa:bb:cc:aa:bb:cc' write_file(os.path.join(self.sysdir, 'eth1', 'address'), mac) expected = { 'config': [{'type': 'physical', 'mac_address': mac, 'name': 'eth1', 'subnets': [{'type': 'dhcp'}]}], 'version': 1} self.assertEqual(expected, net.generate_fallback_config())
def handle(name, cfg, cloud, log, _args): # If there isn't a mcollective key in the configuration don't do anything if 'mcollective' not in cfg: log.debug(("Skipping module named %s, " "no 'mcollective' key in configuration"), name) return mcollective_cfg = cfg['mcollective'] # Start by installing the mcollective package ... cloud.distro.install_packages(("mcollective", )) # ... and then update the mcollective configuration if 'conf' in mcollective_cfg: # Read server.cfg values from the # original file in order to be able to mix the rest up mcollective_config = ConfigObj(SERVER_CFG) # See: http://tiny.cc/jh9agw for (cfg_name, cfg) in mcollective_cfg['conf'].iteritems(): if cfg_name == 'public-cert': util.write_file(PUBCERT_FILE, cfg, mode=0o644) mcollective_config['plugin.ssl_server_public'] = PUBCERT_FILE mcollective_config['securityprovider'] = 'ssl' elif cfg_name == 'private-cert': util.write_file(PRICERT_FILE, cfg, mode=0o600) mcollective_config['plugin.ssl_server_private'] = PRICERT_FILE mcollective_config['securityprovider'] = 'ssl' else: if isinstance(cfg, (basestring, str)): # Just set it in the 'main' section mcollective_config[cfg_name] = cfg elif isinstance(cfg, (dict)): # Iterate through the config items, create a section # if it is needed and then add/or create items as needed if cfg_name not in mcollective_config.sections: mcollective_config[cfg_name] = {} for (o, v) in cfg.iteritems(): mcollective_config[cfg_name][o] = v else: # Otherwise just try to convert it to a string mcollective_config[cfg_name] = str(cfg) # We got all our config as wanted we'll rename # the previous server.cfg and create our new one util.rename(SERVER_CFG, "%s.old" % (SERVER_CFG)) # Now we got the whole file, write to disk... contents = StringIO() mcollective_config.write(contents) contents = contents.getvalue() util.write_file(SERVER_CFG, contents, mode=0o644) # Start mcollective util.subp(['service', 'mcollective', 'start'], capture=False)
def handle(_name, cfg, _cloud, _log, _args): # If there isn't a mcollective key in the configuration don't do anything if 'mcollective' not in cfg: return mcollective_cfg = cfg['mcollective'] # Start by installing the mcollective package ... cc.install_packages(("mcollective", )) # ... and then update the mcollective configuration if 'conf' in mcollective_cfg: # Create object for reading server.cfg values mcollective_config = ConfigParser.ConfigParser() # Read server.cfg values from original file in order to be able to mix # the rest up mcollective_config.readfp( FakeSecHead(open('/etc/mcollective/' 'server.cfg'))) for cfg_name, cfg in mcollective_cfg['conf'].iteritems(): if cfg_name == 'public-cert': util.write_file(pubcert_file, cfg, mode=0644) mcollective_config.set(cfg_name, 'plugin.ssl_server_public', pubcert_file) mcollective_config.set(cfg_name, 'securityprovider', 'ssl') elif cfg_name == 'private-cert': util.write_file(pricert_file, cfg, mode=0600) mcollective_config.set(cfg_name, 'plugin.ssl_server_private', pricert_file) mcollective_config.set(cfg_name, 'securityprovider', 'ssl') else: # Iterate throug the config items, we'll use ConfigParser.set # to overwrite or create new items as needed for o, v in cfg.iteritems(): mcollective_config.set(cfg_name, o, v) # We got all our config as wanted we'll rename # the previous server.cfg and create our new one os.rename('/etc/mcollective/server.cfg', '/etc/mcollective/server.cfg.old') outputfile = StringIO.StringIO() mcollective_config.write(outputfile) # Now we got the whole file, write to disk except first line # Note below, that we've just used ConfigParser because it generally # works. Below, we remove the initial 'nullsection' header # and then change 'key = value' to 'key: value'. The global # search and replace of '=' with ':' could be problematic though. # this most likely needs fixing. util.write_file('/etc/mcollective/server.cfg', outputfile.getvalue().replace('[nullsection]\n', '').replace(' =', ':'), mode=0644) # Start mcollective subprocess.check_call(['service', 'mcollective', 'start'])