def install_chef(cloud, chef_cfg, log): # If chef is not installed, we install chef based on 'install_type' install_type = util.get_cfg_option_str(chef_cfg, 'install_type', 'packages') run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False) if install_type == "gems": # This will install and run the chef-client from gems chef_version = util.get_cfg_option_str(chef_cfg, 'version', None) ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version', RUBY_VERSION_DEFAULT) install_chef_from_gems(ruby_version, chef_version, cloud.distro) # Retain backwards compat, by preferring True instead of False # when not provided/overriden... run = util.get_cfg_option_bool(chef_cfg, 'exec', default=True) elif install_type == 'packages': # This will install and run the chef-client from packages cloud.distro.install_packages(('chef',)) elif install_type == 'omnibus': omnibus_version = util.get_cfg_option_str(chef_cfg, "omnibus_version") install_chef_from_omnibus( url=util.get_cfg_option_str(chef_cfg, "omnibus_url"), retries=util.get_cfg_option_int(chef_cfg, "omnibus_url_retries"), omnibus_version=omnibus_version) else: log.warn("Unknown chef install type '%s'", install_type) run = False return run
def get_template_params(iid, chef_cfg, log): params = CHEF_RB_TPL_DEFAULTS.copy() # Allow users to overwrite any of the keys they want (if they so choose), # when a value is None, then the value will be set to None and no boolean # or string version will be populated... for (k, v) in chef_cfg.items(): if k not in CHEF_RB_TPL_KEYS: log.debug("Skipping unknown chef template key '%s'", k) continue if v is None: params[k] = None else: # This will make the value a boolean or string... if k in CHEF_RB_TPL_BOOL_KEYS: params[k] = util.get_cfg_option_bool(chef_cfg, k) else: params[k] = util.get_cfg_option_str(chef_cfg, k) # These ones are overwritten to be exact values... params.update({ 'generated_by': util.make_header(), 'node_name': util.get_cfg_option_str(chef_cfg, 'node_name', default=iid), 'environment': util.get_cfg_option_str(chef_cfg, 'environment', default='_default'), # These two are mandatory... 'server_url': chef_cfg['server_url'], 'validation_name': chef_cfg['validation_name'], }) return params
def install_chef(cloud, chef_cfg, log): # If chef is not installed, we install chef based on 'install_type' install_type = util.get_cfg_option_str(chef_cfg, 'install_type', 'packages') run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False) if install_type == "gems": # This will install and run the chef-client from gems chef_version = util.get_cfg_option_str(chef_cfg, 'version', None) ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version', RUBY_VERSION_DEFAULT) install_chef_from_gems(cloud.distro, ruby_version, chef_version) # Retain backwards compat, by preferring True instead of False # when not provided/overriden... run = util.get_cfg_option_bool(chef_cfg, 'exec', default=True) elif install_type == 'packages': # This will install and run the chef-client from packages cloud.distro.install_packages(('chef',)) elif install_type == 'omnibus': # This will install as a omnibus unified package url = util.get_cfg_option_str(chef_cfg, "omnibus_url", OMNIBUS_URL) retries = max(0, util.get_cfg_option_int(chef_cfg, "omnibus_url_retries", default=OMNIBUS_URL_RETRIES)) content = url_helper.readurl(url=url, retries=retries) with util.tempdir() as tmpd: # Use tmpdir over tmpfile to avoid 'text file busy' on execute tmpf = "%s/chef-omnibus-install" % tmpd util.write_file(tmpf, str(content), mode=0700) util.subp([tmpf], capture=False) else: log.warn("Unknown chef install type '%s'", install_type) run = False return run
def handle(_name, cfg, cloud, log, _args): # If there isn't a chef key in the configuration don't do anything if 'chef' not in cfg: return chef_cfg = cfg['chef'] # ensure the chef directories we use exist mkdirs(['/etc/chef', '/var/log/chef', '/var/lib/chef', '/var/cache/chef', '/var/backups/chef', '/var/run/chef']) # set the validation key based on the presence of either 'validation_key' # or 'validation_cert'. In the case where both exist, 'validation_key' # takes precedence for key in ('validation_key', 'validation_cert'): if key in chef_cfg and chef_cfg[key]: with open('/etc/chef/validation.pem', 'w') as validation_key_fh: validation_key_fh.write(chef_cfg[key]) break # create the chef config from template util.render_to_file('chef_client.rb', '/etc/chef/client.rb', {'server_url': chef_cfg['server_url'], 'node_name': util.get_cfg_option_str(chef_cfg, 'node_name', cloud.datasource.get_instance_id()), 'environment': util.get_cfg_option_str(chef_cfg, 'environment', '_default'), 'validation_name': chef_cfg['validation_name']}) # set the firstboot json with open('/etc/chef/firstboot.json', 'w') as firstboot_json_fh: initial_json = {} if 'run_list' in chef_cfg: initial_json['run_list'] = chef_cfg['run_list'] if 'initial_attributes' in chef_cfg: initial_attributes = chef_cfg['initial_attributes'] for k in initial_attributes.keys(): initial_json[k] = initial_attributes[k] firstboot_json_fh.write(json.dumps(initial_json)) # If chef is not installed, we install chef based on 'install_type' if not os.path.isfile('/usr/bin/chef-client'): install_type = util.get_cfg_option_str(chef_cfg, 'install_type', 'packages') if install_type == "gems": # this will install and run the chef-client from gems chef_version = util.get_cfg_option_str(chef_cfg, 'version', None) ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version', ruby_version_default) install_chef_from_gems(ruby_version, chef_version) # and finally, run chef-client log.debug('running chef-client') subprocess.check_call(['/usr/bin/chef-client', '-d', '-i', '1800', '-s', '20']) else: # this will install and run the chef-client from packages cc.install_packages(('chef',))
def _initialize_filesystem(self): util.ensure_dirs(self._initial_subdirs()) log_file = util.get_cfg_option_str(self.cfg, "def_log_file") perms = util.get_cfg_option_str(self.cfg, "syslog_fix_perms") if log_file: util.ensure_file(log_file) if perms: u, g = util.extract_usergroup(perms) try: util.chownbyname(log_file, u, g) except OSError: util.logexc(LOG, "Unable to change the ownership of %s to " "user %s, group %s", log_file, u, g)
def handle(_name, cfg, _cloud, log, args): if len(args) != 0: value = args[0] else: value = util.get_cfg_option_str(cfg, "byobu_by_default", "") if not value: return if value == "user" or value == "system": value = "enable-%s" % value valid = ("enable-user", "enable-system", "enable", "disable-user", "disable-system", "disable") if not value in valid: log.warn("Unknown value %s for byobu_by_default" % value) mod_user = value.endswith("-user") mod_sys = value.endswith("-system") if value.startswith("enable"): bl_inst = "install" dc_val = "byobu byobu/launch-by-default boolean true" mod_sys = True else: if value == "disable": mod_user = True mod_sys = True bl_inst = "uninstall" dc_val = "byobu byobu/launch-by-default boolean false" shcmd = "" if mod_user: user = util.get_cfg_option_str(cfg, "user", "ubuntu") shcmd += " sudo -Hu \"%s\" byobu-launcher-%s" % (user, bl_inst) shcmd += " || X=$(($X+1)); " if mod_sys: shcmd += "echo \"%s\" | debconf-set-selections" % dc_val shcmd += " && dpkg-reconfigure byobu --frontend=noninteractive" shcmd += " || X=$(($X+1)); " cmd = ["/bin/sh", "-c", "%s %s %s" % ("X=0;", shcmd, "exit $X")] log.debug("setting byobu to %s" % value) try: subprocess.check_call(cmd) except subprocess.CalledProcessError as e: log.debug(traceback.format_exc(e)) raise Exception("Cmd returned %s: %s" % (e.returncode, cmd)) except OSError as e: log.debug(traceback.format_exc(e)) raise Exception("Cmd failed to execute: %s" % (cmd))
def handle(name, cfg, cloud, log, args): if len(args) != 0: locale = args[0] else: locale = util.get_cfg_option_str(cfg, "locale", cloud.get_locale()) if not locale: log.debug(("Skipping module named %s, " "no 'locale' configuration found"), name) return log.debug("Setting locale to %s", locale) locale_cfgfile = util.get_cfg_option_str(cfg, "locale_configfile") cloud.distro.apply_locale(locale, locale_cfgfile)
def handle(name, cfg, cloud, log, args): if len(args) != 0: locale = args[0] else: locale = util.get_cfg_option_str(cfg, "locale", cloud.get_locale()) if util.is_false(locale): log.debug("Skipping module named %s, disabled by config: %s", name, locale) return log.debug("Setting locale to %s", locale) locale_cfgfile = util.get_cfg_option_str(cfg, "locale_configfile") cloud.distro.apply_locale(locale, locale_cfgfile)
def handle(_name, cfg, _cloud, log, args): if len(args) != 0: msg_in = args[0] else: msg_in = util.get_cfg_option_str(cfg, "final_message", final_message) try: uptimef = open("/proc/uptime") uptime = uptimef.read().split(" ")[0] uptimef.close() except IOError as e: log.warn("unable to open /proc/uptime\n") uptime = "na" try: ts = time.strftime("%a, %d %b %Y %H:%M:%S %z", time.gmtime()) except: ts = "na" try: subs = {'UPTIME': uptime, 'TIMESTAMP': ts} sys.stdout.write("%s\n" % util.render_string(msg_in, subs)) except Exception as e: log.warn("failed to render string to stdout: %s" % e) fp = open(boot_finished, "wb") fp.write(uptime + "\n") fp.close()
def handle(_name, cfg, _cloud, log, args): if len(args) != 0: user = args[0] ids = [] if len(args) > 1: ids = args[1:] else: user = util.get_cfg_option_str(cfg, "user", "ubuntu") ids = util.get_cfg_option_list_or_str(cfg, "ssh_import_id", []) if len(ids) == 0: return cmd = ["sudo", "-Hu", user, "ssh-import-id"] + ids log.debug("importing ssh ids. cmd = %s" % cmd) try: subprocess.check_call(cmd) except subprocess.CalledProcessError as e: log.debug(traceback.format_exc(e)) raise Exception("Cmd returned %s: %s" % (e.returncode, cmd)) except OSError as e: log.debug(traceback.format_exc(e)) raise Exception("Cmd failed to execute: %s" % (cmd))
def logging_set_from_cfg(cfg): log_cfgs = [] logcfg = util.get_cfg_option_str(cfg, "log_cfg", False) if logcfg: # if there is a 'logcfg' entry in the config, respect # it, it is the old keyname log_cfgs = [logcfg] elif "log_cfgs" in cfg: for cfg in cfg['log_cfgs']: if isinstance(cfg, list): log_cfgs.append('\n'.join(cfg)) else: log_cfgs.append() if not len(log_cfgs): sys.stderr.write("Warning, no logging configured\n") return for logcfg in log_cfgs: try: logging.config.fileConfig(StringIO.StringIO(logcfg)) return except: pass raise Exception("no valid logging found\n")
def handle(name, cfg, cloud, log, _args): manage_hosts = util.get_cfg_option_str(cfg, "manage_etc_hosts", False) if util.translate_bool(manage_hosts, addons=['template']): (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) if not hostname: log.warn(("Option 'manage_etc_hosts' was set," " but no hostname was found")) return # Render from a template file tpl_fn_name = cloud.get_template_filename("hosts.%s" % (cloud.distro.osfamily)) if not tpl_fn_name: raise RuntimeError(("No hosts template could be" " found for distro %s") % (cloud.distro.osfamily)) templater.render_to_file(tpl_fn_name, '/etc/hosts', {'hostname': hostname, 'fqdn': fqdn}) elif manage_hosts == "localhost": (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) if not hostname: log.warn(("Option 'manage_etc_hosts' was set," " but no hostname was found")) return log.debug("Managing localhost in /etc/hosts") cloud.distro.update_etc_hosts(hostname, fqdn) else: log.debug(("Configuration option 'manage_etc_hosts' is not set," " not managing /etc/hosts in module %s"), name)
def handle(_name, cfg, cloud, log, _args): (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) manage_hosts = util.get_cfg_option_str(cfg, "manage_etc_hosts", False) if manage_hosts in ("True", "true", True, "template"): # render from template file try: if not hostname: log.info("manage_etc_hosts was set, but no hostname found") return util.render_to_file('hosts', '/etc/hosts', {'hostname': hostname, 'fqdn': fqdn}) except Exception: log.warn("failed to update /etc/hosts") raise elif manage_hosts == "localhost": log.debug("managing 127.0.1.1 in /etc/hosts") update_etc_hosts(hostname, fqdn, log) return else: if manage_hosts not in ("False", False): log.warn("Unknown value for manage_etc_hosts. Assuming False") else: log.debug("not managing /etc/hosts")
def handle(name, cfg, _cloud, log, _args): mycfg = cfg.get("grub_dpkg", cfg.get("grub-dpkg", {})) if not mycfg: mycfg = {} enabled = mycfg.get('enabled', True) if util.is_false(enabled): log.debug("%s disabled by config grub_dpkg/enabled=%s", name, enabled) return idevs = util.get_cfg_option_str(mycfg, "grub-pc/install_devices", None) idevs_empty = util.get_cfg_option_str(mycfg, "grub-pc/install_devices_empty", None) if ((os.path.exists("/dev/sda1") and not os.path.exists("/dev/sda")) or (os.path.exists("/dev/xvda1") and not os.path.exists("/dev/xvda"))): if idevs is None: idevs = "" if idevs_empty is None: idevs_empty = "true" else: if idevs_empty is None: idevs_empty = "false" if idevs is None: idevs = "/dev/sda" for dev in ("/dev/sda", "/dev/vda", "/dev/xvda", "/dev/sda1", "/dev/vda1", "/dev/xvda1"): if os.path.exists(dev): idevs = dev break # now idevs and idevs_empty are set to determined values # or, those set by user dconf_sel = (("grub-pc grub-pc/install_devices string %s\n" "grub-pc grub-pc/install_devices_empty boolean %s\n") % (idevs, idevs_empty)) log.debug("Setting grub debconf-set-selections with '%s','%s'" % (idevs, idevs_empty)) try: util.subp(['debconf-set-selections'], dconf_sel) except: util.logexc(log, "Failed to run debconf-set-selections for grub-dpkg")
def handle(name,cfg,cloud,log,args): update = util.get_cfg_option_bool(cfg, 'repo_update', False) # map the various possible upgrade level choices to well known ones upgrade_val = util.get_cfg_option_str(cfg, 'repo_upgrade') upgrade = util.UPGRADE_NONE if upgrade_val: if upgrade_val.lower() in [ 'security', 'critical' ]: upgrade = util.UPGRADE_SECURITY elif upgrade_val.lower() in [ 'fixes', 'bugs', 'bugfix', 'bugfixes' ]: upgrade = util.UPGRADE_BUGFIX elif upgrade_val.lower() in [ 'true', '1', 'on', 'yes', 'all' ]: upgrade = util.UPGRADE_ALL dist = cloudinit.DistAction.DistAction("/etc/cloud/dist-defs.cfg") if not util.get_cfg_option_bool(cfg, 'repo_preserve', True): repo_cfg = dist.get_config_section('repo') if cfg.has_key("repo_mirror"): repo_cfg['mirror'] = cfg["repo_mirror"] else: # May build mirror from availabity zone information: availability_zone = cloud.datasource.get_availability_zone() repo_cfg['ec2_az'] = availability_zone[:-1] log.debug("Generating default repo files"); dist.repo_generate(repo_cfg) # Make this part of repo_generate?? (TODO) #old_mir = util.get_cfg_option_str(cfg,'repo_old_mirror', \ # mirror) #rename_repo(old_mir, mirror) # Equivalent to 'apt_sources': add a new package repository if cfg.has_key('repo_additions'): log.debug("Adding repo files from config"); errors = dist.repo_add(cfg['repo_additions']) for e in errors: log.warn("Source Error: %s\n" % ':'.join(e)) pkglist = [] if 'packages' in cfg: if isinstance(cfg['packages'],list): pkglist = cfg['packages'] else: pkglist.append(cfg['packages']) if update or upgrade or pkglist: log.debug("Running update on repo"); dist.repo_update() if upgrade: log.debug("Running upgrade on repo"); dist.repo_upgrade(upgrade) if pkglist: log.debug("Installing packages from repo"); dist.repo_install(pkglist) return(True)
def handle(name, cfg, cloud, log, _args): do_migrate = util.get_cfg_option_str(cfg, "migrate", True) if not util.translate_bool(do_migrate): log.debug("Skipping module named %s, migration disabled", name) return sems_moved = _migrate_canon_sems(cloud) log.debug("Migrated %s semaphore files to there canonicalized names", sems_moved) _migrate_legacy_sems(cloud, log)
def handle(name, cfg, cloud, log, _args): if util.is_false(cfg.get('apt_configure_enabled', True)): log.debug("Skipping module named %s, disabled by config.", name) return release = get_release() mirrors = find_apt_mirror_info(cloud, cfg) if not mirrors or "primary" not in mirrors: log.debug(("Skipping module named %s," " no package 'mirror' located"), name) return # backwards compatibility mirror = mirrors["primary"] mirrors["mirror"] = mirror log.debug("Mirror info: %s" % mirrors) if not util.get_cfg_option_bool(cfg, 'apt_preserve_sources_list', False): generate_sources_list(release, mirrors, cloud, log) old_mirrors = cfg.get('apt_old_mirrors', {"primary": "archive.ubuntu.com/ubuntu", "security": "security.ubuntu.com/ubuntu"}) rename_apt_lists(old_mirrors, mirrors) try: apply_apt_config(cfg, APT_PROXY_FN, APT_CONFIG_FN) except Exception as e: log.warn("failed to proxy or apt config info: %s", e) # Process 'apt_sources' if 'apt_sources' in cfg: params = mirrors params['RELEASE'] = release params['MIRROR'] = mirror matchcfg = cfg.get('add_apt_repo_match', ADD_APT_REPO_MATCH) if matchcfg: matcher = re.compile(matchcfg).search else: def matcher(x): return False errors = add_sources(cfg['apt_sources'], params, aa_repo_match=matcher) for e in errors: log.warn("Add source error: %s", ':'.join(e)) dconf_sel = util.get_cfg_option_str(cfg, 'debconf_selections', False) if dconf_sel: log.debug("Setting debconf selections per cloud config") try: util.subp(('debconf-set-selections', '-'), dconf_sel) except Exception: util.logexc(log, "Failed to run debconf-set-selections")
def handle(_name, cfg, _cloud, log, _args): idevs = None idevs_empty = None if _cloud.distro.name not in distros: return if "grub-dpkg" in cfg: idevs = util.get_cfg_option_str(cfg["grub-dpkg"], "grub-pc/install_devices", None) idevs_empty = util.get_cfg_option_str(cfg["grub-dpkg"], "grub-pc/install_devices_empty", None) if ((os.path.exists("/dev/sda1") and not os.path.exists("/dev/sda")) or (os.path.exists("/dev/xvda1") and not os.path.exists("/dev/xvda"))): if idevs is None: idevs = "" if idevs_empty is None: idevs_empty = "true" else: if idevs_empty is None: idevs_empty = "false" if idevs is None: idevs = "/dev/sda" for dev in ("/dev/sda", "/dev/vda", "/dev/sda1", "/dev/vda1"): if os.path.exists(dev): idevs = dev break # now idevs and idevs_empty are set to determined values # or, those set by user dconf_sel = (("grub-pc grub-pc/install_devices string %s\n" "grub-pc grub-pc/install_devices_empty boolean %s\n") % (idevs, idevs_empty)) log.debug("Setting grub debconf-set-selections with '%s','%s'" % (idevs, idevs_empty)) try: util.subp(['debconf-set-selections'], dconf_sel) except: util.logexc(log, "Failed to run debconf-set-selections for grub-dpkg")
def handle(_name, cfg, cloud, log, args): if len(args) != 0: locale = args[0] else: locale = util.get_cfg_option_str(cfg, "locale", cloud.get_locale()) locale_cfgfile = util.get_cfg_option_str(cfg, "locale_configfile", "/etc/default/locale") if not locale: return log.debug("setting locale to %s" % locale) try: apply_locale(locale, locale_cfgfile) except Exception as e: log.debug(traceback.format_exc(e)) raise Exception("failed to apply locale %s" % locale)
def handle(name, cfg, cloud, log, _args): if 'no_ssh_fingerprints' in cfg: log.debug(("Skipping module named %s, " "logging of ssh fingerprints disabled"), name) hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "md5") (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro) for (user_name, _cfg) in users.items(): (key_fn, key_entries) = ssh_util.extract_authorized_keys(user_name) _pprint_key_entries(user_name, key_fn, key_entries, hash_meth)
def __init__(self, cfg): # constants tailored for FreeBSD if util.is_FreeBSD(): self.pkg_name = 'py27-salt' self.srv_name = 'salt_minion' self.conf_dir = '/usr/local/etc/salt' # constants for any other OS else: self.pkg_name = 'salt-minion' self.srv_name = 'salt-minion' self.conf_dir = '/etc/salt' # if there are constants given in cloud config use those self.pkg_name = util.get_cfg_option_str(cfg, 'pkg_name', self.pkg_name) self.conf_dir = util.get_cfg_option_str(cfg, 'config_dir', self.conf_dir) self.srv_name = util.get_cfg_option_str(cfg, 'service_name', self.srv_name)
def initfs(): subds = ['scripts/per-instance', 'scripts/per-once', 'scripts/per-boot', 'seed', 'instances', 'handlers', 'sem', 'data'] dlist = [] for subd in subds: dlist.append("%s/%s" % (varlibdir, subd)) util.ensure_dirs(dlist) cfg = util.get_base_cfg(system_config, cfg_builtin, parsed_cfgs) log_file = util.get_cfg_option_str(cfg, 'def_log_file', None) perms = util.get_cfg_option_str(cfg, 'syslog_fix_perms', None) if log_file: fp = open(log_file, "ab") fp.close() if log_file and perms: (u, g) = perms.split(':', 1) if u == "-1" or u == "None": u = None if g == "-1" or g == "None": g = None util.chownbyname(log_file, u, g)
def handle(name, cfg, cloud, log, args): if len(args) != 0: timezone = args[0] else: timezone = util.get_cfg_option_str(cfg, "timezone", False) if not timezone: log.debug("Skipping module named %s, no 'timezone' specified", name) return # Let the distro handle settings its timezone cloud.distro.set_timezone(timezone)
def handle(_name, cfg, _cloud, log, _args): apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", False) apt_pipe_value_s = str(apt_pipe_value).lower().strip() if apt_pipe_value_s == "false": write_apt_snippet("0", log, DEFAULT_FILE) elif apt_pipe_value_s in ("none", "unchanged", "os"): return elif apt_pipe_value_s in [str(b) for b in xrange(0, 6)]: write_apt_snippet(apt_pipe_value_s, log, DEFAULT_FILE) else: log.warn("Invalid option for apt_pipeling: %s", apt_pipe_value)
def handle(_name, cfg, _cloud, log, _args): idevs = None idevs_empty = None if "grub-dpkg" in cfg: idevs = util.get_cfg_option_str(cfg["grub-dpkg"], "grub-pc/install_devices", None) idevs_empty = util.get_cfg_option_str(cfg["grub-dpkg"], "grub-pc/install_devices_empty", None) if ((os.path.exists("/dev/sda1") and not os.path.exists("/dev/sda")) or (os.path.exists("/dev/xvda1") and not os.path.exists("/dev/xvda"))): if idevs == None: idevs = "" if idevs_empty == None: idevs_empty = "true" else: if idevs_empty == None: idevs_empty = "false" if idevs == None: idevs = "/dev/sda" for dev in ("/dev/sda", "/dev/vda", "/dev/sda1", "/dev/vda1"): if os.path.exists(dev): idevs = dev break # now idevs and idevs_empty are set to determined values # or, those set by user dconf_sel = "grub-pc grub-pc/install_devices string %s\n" % idevs + \ "grub-pc grub-pc/install_devices_empty boolean %s\n" % idevs_empty log.debug("setting grub debconf-set-selections with '%s','%s'" % (idevs, idevs_empty)) try: util.subp(('debconf-set-selections'), dconf_sel) except: log.error("Failed to run debconf-set-selections for grub-dpkg") log.debug(traceback.format_exc())
def handle(name, cfg, cloud, log, args): if len(args) != 0: value = args[0] else: value = util.get_cfg_option_str(cfg, "byobu_by_default", "") if not value: log.debug("Skipping module named %s, no 'byobu' values found", name) return if value == "user" or value == "system": value = "enable-%s" % value valid = ("enable-user", "enable-system", "enable", "disable-user", "disable-system", "disable") if not value in valid: log.warn("Unknown value %s for byobu_by_default", value) mod_user = value.endswith("-user") mod_sys = value.endswith("-system") if value.startswith("enable"): bl_inst = "install" dc_val = "byobu byobu/launch-by-default boolean true" mod_sys = True else: if value == "disable": mod_user = True mod_sys = True bl_inst = "uninstall" dc_val = "byobu byobu/launch-by-default boolean false" shcmd = "" if mod_user: (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro) (user, _user_config) = ds.extract_default(users) if not user: log.warn(("No default byobu user provided, " "can not launch %s for the default user"), bl_inst) else: shcmd += " sudo -Hu \"%s\" byobu-launcher-%s" % (user, bl_inst) shcmd += " || X=$(($X+1)); " if mod_sys: shcmd += "echo \"%s\" | debconf-set-selections" % dc_val shcmd += " && dpkg-reconfigure byobu --frontend=noninteractive" shcmd += " || X=$(($X+1)); " if len(shcmd): cmd = ["/bin/sh", "-c", "%s %s %s" % ("X=0;", shcmd, "exit $X")] log.debug("Setting byobu to %s", value) util.subp(cmd, capture=False)
def handle(_name, cfg, _cloud, log, _args): apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", False) apt_pipe_value = str(apt_pipe_value).lower() if apt_pipe_value == "false": write_apt_snippet("0", log) elif apt_pipe_value in ("none", "unchanged", "os"): return elif apt_pipe_value in str(range(0, 6)): write_apt_snippet(apt_pipe_value, log) else: log.warn("Invalid option for apt_pipeling: %s" % apt_pipe_value)
def handle(name, cfg, _cloud, log, _args): repos = cfg.get('yum_repos') if not repos: log.debug(("Skipping module named %s," " no 'yum_repos' configuration found"), name) return repo_base_path = util.get_cfg_option_str(cfg, 'yum_repo_dir', '/etc/yum.repos.d/') repo_locations = {} repo_configs = {} for (repo_id, repo_config) in repos.items(): canon_repo_id = _canonicalize_id(repo_id) repo_fn_pth = os.path.join(repo_base_path, "%s.repo" % (canon_repo_id)) if os.path.exists(repo_fn_pth): log.info("Skipping repo %s, file %s already exists!", repo_id, repo_fn_pth) continue elif canon_repo_id in repo_locations: log.info("Skipping repo %s, file %s already pending!", repo_id, repo_fn_pth) continue if not repo_config: repo_config = {} # Do some basic sanity checks/cleaning n_repo_config = {} for (k, v) in repo_config.items(): k = k.lower().strip().replace("-", "_") if k: n_repo_config[k] = v repo_config = n_repo_config missing_required = 0 for req_field in ['baseurl']: if req_field not in repo_config: log.warn(("Repository %s does not contain a %s" " configuration 'required' entry"), repo_id, req_field) missing_required += 1 if not missing_required: repo_configs[canon_repo_id] = repo_config repo_locations[canon_repo_id] = repo_fn_pth else: log.warn("Repository %s is missing %s required fields, skipping!", repo_id, missing_required) for (c_repo_id, path) in repo_locations.items(): repo_blob = _format_repository_config(c_repo_id, repo_configs.get(c_repo_id)) util.write_file(path, repo_blob)
def handle(name, _cfg, _cloud, log, _args): default_interface = 'eth0' system_info = util.system_info() if 'aix' in system_info['platform'].lower(): default_interface = 'en0' interface = util.get_cfg_option_str(_cfg, 'set_hostname_from_interface', default=default_interface) log.debug('Setting hostname based on interface %s' % interface) set_hostname = False fqdn = None # Look up the IP address on the interface # and then reverse lookup the hostname in DNS info = netinfo.netdev_info() if interface in info: set_short = util.get_cfg_option_bool(_cfg, "set_dns_shortname", False) if 'addr' in info[interface] and info[interface]['addr']: # Handle IPv4 address set_hostname =_set_hostname(_cfg, _cloud, log, info[interface]['addr'], set_short) elif 'addr6' in info[interface] and info[interface]['addr6']: # Handle IPv6 addresses for ipaddr in info[interface]['addr6']: ipaddr = ipaddr.split('/')[0] set_hostname = _set_hostname(_cfg, _cloud, log, ipaddr, set_short) if set_hostname: break else: log.warning('Interface %s was not found on the system. ' 'Interfaces found on system: %s' % (interface, info.keys())) # Reverse lookup failed, fall back to cc_set_hostname way. if not set_hostname: (short_hostname, fqdn) = util.get_hostname_fqdn(_cfg, _cloud) try: log.info('Fall back to setting hostname on VM as %s' % fqdn) _cloud.distro.set_hostname(short_hostname, fqdn=fqdn) except Exception: util.logexc(log, "Failed to set the hostname to %s", fqdn) raise
def handle(name, cfg, cloud, log, _args): release = get_release() mirrors = find_apt_mirror_info(cloud, cfg) if not mirrors or "primary" not in mirrors: log.debug(("Skipping module named %s," " no package 'mirror' located"), name) return # backwards compatibility mirror = mirrors["primary"] mirrors["mirror"] = mirror log.debug("Mirror info: %s" % mirrors) if not util.get_cfg_option_bool(cfg, "apt_preserve_sources_list", False): generate_sources_list(release, mirrors, cloud, log) old_mirrors = cfg.get( "apt_old_mirrors", {"primary": "archive.ubuntu.com/ubuntu", "security": "security.ubuntu.com/ubuntu"} ) rename_apt_lists(old_mirrors, mirrors) try: apply_apt_config(cfg, APT_PROXY_FN, APT_CONFIG_FN) except Exception as e: log.warn("failed to proxy or apt config info: %s", e) # Process 'apt_sources' if "apt_sources" in cfg: params = mirrors params["RELEASE"] = release params["MIRROR"] = mirror errors = add_sources(cfg["apt_sources"], params) for e in errors: log.warn("Add source error: %s", ":".join(e)) dconf_sel = util.get_cfg_option_str(cfg, "debconf_selections", False) if dconf_sel: log.debug("Setting debconf selections per cloud config") try: util.subp(("debconf-set-selections", "-"), dconf_sel) except Exception: util.logexc(log, "Failed to run debconf-set-selections")
def handle(name, cfg, _cloud, log, _args): repos = cfg.get("yum_repos") if not repos: log.debug( "Skipping module named %s, no 'yum_repos' configuration found", name, ) return repo_base_path = util.get_cfg_option_str(cfg, "yum_repo_dir", "/etc/yum.repos.d/") repo_locations = {} repo_configs = {} for (repo_id, repo_config) in repos.items(): canon_repo_id = _canonicalize_id(repo_id) repo_fn_pth = os.path.join(repo_base_path, "%s.repo" % (canon_repo_id)) if os.path.exists(repo_fn_pth): log.info( "Skipping repo %s, file %s already exists!", repo_id, repo_fn_pth, ) continue elif canon_repo_id in repo_locations: log.info( "Skipping repo %s, file %s already pending!", repo_id, repo_fn_pth, ) continue if not repo_config: repo_config = {} # Do some basic sanity checks/cleaning n_repo_config = {} for (k, v) in repo_config.items(): k = k.lower().strip().replace("-", "_") if k: n_repo_config[k] = v repo_config = n_repo_config missing_required = 0 for req_field in ["baseurl"]: if req_field not in repo_config: log.warning( "Repository %s does not contain a %s" " configuration 'required' entry", repo_id, req_field, ) missing_required += 1 if not missing_required: repo_configs[canon_repo_id] = repo_config repo_locations[canon_repo_id] = repo_fn_pth else: log.warning( "Repository %s is missing %s required fields, skipping!", repo_id, missing_required, ) for (c_repo_id, path) in repo_locations.items(): repo_blob = _format_repository_config(c_repo_id, repo_configs.get(c_repo_id)) util.write_file(path, repo_blob)
def handle(_name, cfg, cloud, log, args): if len(args) != 0: # if run from command line, and give args, wipe the chpasswd['list'] password = args[0] if 'chpasswd' in cfg and 'list' in cfg['chpasswd']: del cfg['chpasswd']['list'] else: password = util.get_cfg_option_str(cfg, "password", None) expire = True pw_auth = "no" change_pwauth = False plist = None if 'chpasswd' in cfg: chfg = cfg['chpasswd'] plist = util.get_cfg_option_str(chfg, 'list', plist) expire = util.get_cfg_option_bool(chfg, 'expire', expire) if not plist and password: (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro) (user, _user_config) = ds.extract_default(users) if user: plist = "%s:%s" % (user, password) else: log.warn("No default or defined user to change password for.") errors = [] if plist: plist_in = [] randlist = [] users = [] for line in plist.splitlines(): u, p = line.split(':', 1) if p == "R" or p == "RANDOM": p = rand_user_password() randlist.append("%s:%s" % (u, p)) plist_in.append("%s:%s" % (u, p)) users.append(u) ch_in = '\n'.join(plist_in) + '\n' try: log.debug("Changing password for %s:", users) util.subp(['chpasswd'], ch_in) except Exception as e: errors.append(e) util.logexc(log, "Failed to set passwords with chpasswd for %s", users) if len(randlist): blurb = ("Set the following 'random' passwords\n", '\n'.join(randlist)) sys.stderr.write("%s\n%s\n" % blurb) if expire: expired_users = [] for u in users: try: util.subp(['passwd', '--expire', u]) expired_users.append(u) except Exception as e: errors.append(e) util.logexc(log, "Failed to set 'expire' for %s", u) if expired_users: log.debug("Expired passwords for: %s users", expired_users) change_pwauth = False pw_auth = None if 'ssh_pwauth' in cfg: change_pwauth = True if util.is_true(cfg['ssh_pwauth']): pw_auth = 'yes' if util.is_false(cfg['ssh_pwauth']): pw_auth = 'no' if change_pwauth: replaced_auth = False # See: man sshd_config old_lines = ssh_util.parse_ssh_config(ssh_util.DEF_SSHD_CFG) new_lines = [] i = 0 for (i, line) in enumerate(old_lines): # Keywords are case-insensitive and arguments are case-sensitive if line.key == 'passwordauthentication': log.debug("Replacing auth line %s with %s", i + 1, pw_auth) replaced_auth = True line.value = pw_auth new_lines.append(line) if not replaced_auth: log.debug("Adding new auth line %s", i + 1) replaced_auth = True new_lines.append( ssh_util.SshdConfigLine('', 'PasswordAuthentication', pw_auth)) lines = [str(l) for l in new_lines] util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines)) try: cmd = cloud.distro.init_cmd # Default service cmd.append(cloud.distro.get_option('ssh_svcname', 'ssh')) cmd.append('restart') if 'systemctl' in cmd: # Switch action ordering cmd[1], cmd[2] = cmd[2], cmd[1] cmd = filter(None, cmd) # Remove empty arguments util.subp(cmd) log.debug("Restarted the ssh daemon") except: util.logexc(log, "Restarting of the ssh daemon failed") if len(errors): log.debug("%s errors occured, re-raising the last one", len(errors)) raise errors[-1]
def handle(_name, cfg, cloud, log, _args): # remove the static keys from the pristine image if cfg.get("ssh_deletekeys", True): key_pth = os.path.join("/etc/ssh/", "ssh_host_*key*") for f in glob.glob(key_pth): try: util.del_file(f) except Exception: util.logexc(log, "Failed deleting key file %s", f) if "ssh_keys" in cfg: # if there are keys in cloud-config, use them for (key, val) in cfg["ssh_keys"].items(): if key in CONFIG_KEY_TO_FILE: tgt_fn = CONFIG_KEY_TO_FILE[key][0] tgt_perms = CONFIG_KEY_TO_FILE[key][1] util.write_file(tgt_fn, val, tgt_perms) for (priv, pub) in PRIV_TO_PUB.items(): if pub in cfg['ssh_keys'] or priv not in cfg['ssh_keys']: continue pair = (CONFIG_KEY_TO_FILE[priv][0], CONFIG_KEY_TO_FILE[pub][0]) cmd = ['sh', '-xc', KEY_GEN_TPL % pair] try: # TODO(harlowja): Is this guard needed? with util.SeLinuxGuard("/etc/ssh", recursive=True): util.subp(cmd, capture=False) log.debug("Generated a key for %s from %s", pair[0], pair[1]) except Exception: util.logexc(log, "Failed generated a key for %s from %s", pair[0], pair[1]) else: # if not, generate them genkeys = util.get_cfg_option_list(cfg, 'ssh_genkeytypes', GENERATE_KEY_NAMES) lang_c = os.environ.copy() lang_c['LANG'] = 'C' for keytype in genkeys: keyfile = KEY_FILE_TPL % (keytype) if os.path.exists(keyfile): continue util.ensure_dir(os.path.dirname(keyfile)) cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile] # TODO(harlowja): Is this guard needed? with util.SeLinuxGuard("/etc/ssh", recursive=True): try: out, err = util.subp(cmd, capture=True, env=lang_c) sys.stdout.write(util.decode_binary(out)) except util.ProcessExecutionError as e: err = util.decode_binary(e.stderr).lower() if (e.exit_code == 1 and err.lower().startswith("unknown key")): log.debug("ssh-keygen: unknown key type '%s'", keytype) else: util.logexc( log, "Failed generating key type %s to " "file %s", keytype, keyfile) try: (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro) (user, _user_config) = ug_util.extract_default(users) disable_root = util.get_cfg_option_bool(cfg, "disable_root", True) disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts", DISABLE_ROOT_OPTS) keys = cloud.get_public_ssh_keys() or [] if "ssh_authorized_keys" in cfg: cfgkeys = cfg["ssh_authorized_keys"] keys.extend(cfgkeys) apply_credentials(keys, user, disable_root, disable_root_opts) except Exception: util.logexc(log, "Applying ssh credentials failed!")
def handle(_name, cfg, cloud, log, _args): # remove the static keys from the pristine image if cfg.get("ssh_deletekeys", True): for f in glob.glob("/etc/ssh/ssh_host_*key*"): try: os.unlink(f) except: pass if "ssh_keys" in cfg: # if there are keys in cloud-config, use them key2file = { "rsa_private": ("/etc/ssh/ssh_host_rsa_key", 0600), "rsa_public": ("/etc/ssh/ssh_host_rsa_key.pub", 0644), "dsa_private": ("/etc/ssh/ssh_host_dsa_key", 0600), "dsa_public": ("/etc/ssh/ssh_host_dsa_key.pub", 0644), "ecdsa_private": ("/etc/ssh/ssh_host_ecdsa_key", 0600), "ecdsa_public": ("/etc/ssh/ssh_host_ecdsa_key.pub", 0644), } for key, val in cfg["ssh_keys"].items(): if key in key2file: util.write_file(key2file[key][0], val, key2file[key][1]) priv2pub = { 'rsa_private': 'rsa_public', 'dsa_private': 'dsa_public', 'ecdsa_private': 'ecdsa_public', } cmd = 'o=$(ssh-keygen -yf "%s") && echo "$o" root@localhost > "%s"' for priv, pub in priv2pub.iteritems(): if pub in cfg['ssh_keys'] or not priv in cfg['ssh_keys']: continue pair = (key2file[priv][0], key2file[pub][0]) subprocess.call(('sh', '-xc', cmd % pair)) log.debug("generated %s from %s" % pair) else: # if not, generate them for keytype in util.get_cfg_option_list_or_str( cfg, 'ssh_genkeytypes', ['rsa', 'dsa', 'ecdsa']): keyfile = '/etc/ssh/ssh_host_%s_key' % keytype if not os.path.exists(keyfile): subprocess.call( ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile]) util.restorecon_if_possible('/etc/ssh', recursive=True) try: user = util.get_cfg_option_str(cfg, 'user') disable_root = util.get_cfg_option_bool(cfg, "disable_root", True) disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts", DISABLE_ROOT_OPTS) keys = cloud.get_public_ssh_keys() if "ssh_authorized_keys" in cfg: cfgkeys = cfg["ssh_authorized_keys"] keys.extend(cfgkeys) apply_credentials(keys, user, disable_root, disable_root_opts, log) except: util.logexc(log) log.warn("applying credentials failed!\n")
def handle(name, cfg, _cloud, log, args): if len(args) != 0: resize_root = args[0] else: resize_root = util.get_cfg_option_str(cfg, "resize_rootfs", True) if not util.translate_bool(resize_root, addons=[NOBLOCK]): log.debug("Skipping module named %s, resizing disabled", name) return # TODO(harlowja) is the directory ok to be used?? resize_root_d = util.get_cfg_option_str(cfg, "resize_rootfs_tmp", "/run") util.ensure_dir(resize_root_d) # TODO(harlowja): allow what is to be resized to be configurable?? resize_what = "/" result = util.get_mount_info(resize_what, log) if not result: log.warn("Could not determine filesystem type of %s", resize_what) return (devpth, fs_type, mount_point) = result # Ensure the path is a block device. info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what) log.debug("resize_info: %s" % info) container = util.is_container() if (devpth == "/dev/root" and not os.path.exists(devpth) and not container): devpth = rootdev_from_cmdline(util.get_cmdline()) if devpth is None: log.warn("Unable to find device '/dev/root'") return log.debug("Converted /dev/root to '%s' per kernel cmdline", devpth) try: statret = os.stat(devpth) except OSError as exc: if container and exc.errno == errno.ENOENT: log.debug("Device '%s' did not exist in container. " "cannot resize: %s" % (devpth, info)) elif exc.errno == errno.ENOENT: log.warn("Device '%s' did not exist. cannot resize: %s" % (devpth, info)) else: raise exc return if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(statret.st_mode): if container: log.debug("device '%s' not a block device in container." " cannot resize: %s" % (devpth, info)) else: log.warn("device '%s' not a block device. cannot resize: %s" % (devpth, info)) return resizer = None fstype_lc = fs_type.lower() for (pfix, root_cmd) in RESIZE_FS_PREFIXES_CMDS: if fstype_lc.startswith(pfix): resizer = root_cmd break if not resizer: log.warn("Not resizing unknown filesystem type %s for %s", fs_type, resize_what) return resize_cmd = resizer(resize_what, devpth) log.debug("Resizing %s (%s) using %s", resize_what, fs_type, ' '.join(resize_cmd)) if resize_root == NOBLOCK: # Fork to a child that will run # the resize command util.fork_cb( util.log_time(logfunc=log.debug, msg="backgrounded Resizing", func=do_resize, args=(resize_cmd, log))) else: util.log_time(logfunc=log.debug, msg="Resizing", func=do_resize, args=(resize_cmd, log)) action = 'Resized' if resize_root == NOBLOCK: action = 'Resizing (via forking)' log.debug("%s root filesystem (type=%s, val=%s)", action, fs_type, resize_root)
def handle(name, cfg, cloud, log, _args): # If there isn't a puppet key in the configuration don't do anything if 'puppet' not in cfg: log.debug(("Skipping module named %s," " no 'puppet' configuration found"), name) return puppet_cfg = cfg['puppet'] # Start by installing the puppet package if necessary... install = util.get_cfg_option_bool(puppet_cfg, 'install', True) version = util.get_cfg_option_str(puppet_cfg, 'version', None) package_name = util.get_cfg_option_str(puppet_cfg, 'package_name', PUPPET_PACKAGE_NAME) conf_file = util.get_cfg_option_str(puppet_cfg, 'conf_file', PUPPET_CONF_PATH) ssl_dir = util.get_cfg_option_str(puppet_cfg, 'ssl_dir', PUPPET_SSL_DIR) csr_attributes_path = util.get_cfg_option_str(puppet_cfg, 'csr_attributes_path', PUPPET_CSR_ATTRIBUTES_PATH) p_constants = PuppetConstants(conf_file, ssl_dir, csr_attributes_path, log) if not install and version: log.warning(("Puppet install set false but version supplied," " doing nothing.")) elif install: log.debug(("Attempting to install puppet %s,"), version if version else 'latest') cloud.distro.install_packages((package_name, version)) # ... and then update the puppet configuration if 'conf' in puppet_cfg: # Add all sections from the conf object to puppet.conf contents = util.load_file(p_constants.conf_path) # Create object for reading puppet.conf values puppet_config = helpers.DefaultingConfigParser() # Read puppet.conf values from original file in order to be able to # mix the rest up. First clean them up # (TODO(harlowja) is this really needed??) cleaned_lines = [i.lstrip() for i in contents.splitlines()] cleaned_contents = '\n'.join(cleaned_lines) # Move to puppet_config.read_file when dropping py2.7 puppet_config.readfp( # pylint: disable=W1505 StringIO(cleaned_contents), filename=p_constants.conf_path) for (cfg_name, cfg) in puppet_cfg['conf'].items(): # Cert configuration is a special case # Dump the puppet master ca certificate in the correct place if cfg_name == 'ca_cert': # Puppet ssl sub-directory isn't created yet # Create it with the proper permissions and ownership util.ensure_dir(p_constants.ssl_dir, 0o771) util.chownbyname(p_constants.ssl_dir, 'puppet', 'root') util.ensure_dir(p_constants.ssl_cert_dir) util.chownbyname(p_constants.ssl_cert_dir, 'puppet', 'root') util.write_file(p_constants.ssl_cert_path, cfg) util.chownbyname(p_constants.ssl_cert_path, 'puppet', 'root') else: # Iterate through the config items, we'll use ConfigParser.set # to overwrite or create new items as needed for (o, v) in cfg.items(): if o == 'certname': # Expand %f as the fqdn # TODO(harlowja) should this use the cloud fqdn?? v = v.replace("%f", socket.getfqdn()) # Expand %i as the instance id v = v.replace("%i", cloud.get_instance_id()) # certname needs to be downcased v = v.lower() puppet_config.set(cfg_name, o, v) # We got all our config as wanted we'll rename # the previous puppet.conf and create our new one util.rename(p_constants.conf_path, "%s.old" % (p_constants.conf_path)) util.write_file(p_constants.conf_path, puppet_config.stringify()) if 'csr_attributes' in puppet_cfg: util.write_file( p_constants.csr_attributes_path, yaml.dump(puppet_cfg['csr_attributes'], default_flow_style=False)) # Set it up so it autostarts _autostart_puppet(log) # Start puppetd util.subp(['service', 'puppet', 'start'], capture=False)
def handle(name, cfg, cloud, log, _args): """Handler method activated by cloud-init.""" # If there isn't a chef key in the configuration don't do anything if "chef" not in cfg: log.debug("Skipping module named %s, no 'chef' key in configuration", name) return chef_cfg = cfg["chef"] # Ensure the chef directories we use exist chef_dirs = util.get_cfg_option_list(chef_cfg, "directories") if not chef_dirs: chef_dirs = list(CHEF_DIRS) for d in itertools.chain(chef_dirs, REQUIRED_CHEF_DIRS): util.ensure_dir(d) vkey_path = chef_cfg.get("validation_key", CHEF_VALIDATION_PEM_PATH) vcert = chef_cfg.get("validation_cert") # special value 'system' means do not overwrite the file # but still render the template to contain 'validation_key' if vcert: if vcert != "system": util.write_file(vkey_path, vcert) elif not os.path.isfile(vkey_path): log.warning( "chef validation_cert provided as 'system', but " "validation_key path '%s' does not exist.", vkey_path, ) # Create the chef config from template template_fn = cloud.get_template_filename("chef_client.rb") if template_fn: iid = str(cloud.datasource.get_instance_id()) params = get_template_params(iid, chef_cfg, log) # Do a best effort attempt to ensure that the template values that # are associated with paths have their parent directory created # before they are used by the chef-client itself. param_paths = set() for (k, v) in params.items(): if k in CHEF_RB_TPL_PATH_KEYS and v: param_paths.add(os.path.dirname(v)) util.ensure_dirs(param_paths) templater.render_to_file(template_fn, CHEF_RB_PATH, params) else: log.warning("No template found, not rendering to %s", CHEF_RB_PATH) # Set the firstboot json fb_filename = util.get_cfg_option_str(chef_cfg, "firstboot_path", default=CHEF_FB_PATH) if not fb_filename: log.info("First boot path empty, not writing first boot json file") else: initial_json = {} if "run_list" in chef_cfg: initial_json["run_list"] = chef_cfg["run_list"] if "initial_attributes" in chef_cfg: initial_attributes = chef_cfg["initial_attributes"] for k in list(initial_attributes.keys()): initial_json[k] = initial_attributes[k] util.write_file(fb_filename, json.dumps(initial_json)) # Try to install chef, if its not already installed... force_install = util.get_cfg_option_bool(chef_cfg, "force_install", default=False) installed = subp.is_exe(CHEF_EXEC_PATH) if not installed or force_install: run = install_chef(cloud, chef_cfg, log) elif installed: run = util.get_cfg_option_bool(chef_cfg, "exec", default=False) else: run = False if run: run_chef(chef_cfg, log) post_run_chef(chef_cfg, log)
def handle(_name, cfg, _cloud, log, args): if len(args) != 0: # if run from command line, and give args, wipe the chpasswd['list'] password = args[0] if 'chpasswd' in cfg and 'list' in cfg['chpasswd']: del cfg['chpasswd']['list'] else: password = util.get_cfg_option_str(cfg, "password", None) expire = True pw_auth = "no" change_pwauth = False plist = None if 'chpasswd' in cfg: chfg = cfg['chpasswd'] plist = util.get_cfg_option_str(chfg, 'list', plist) expire = util.get_cfg_option_bool(chfg, 'expire', expire) if not plist and password: user = util.get_cfg_option_str(cfg, "user", "ubuntu") plist = "%s:%s" % (user, password) errors = [] if plist: plist_in = [] randlist = [] users = [] for line in plist.splitlines(): u, p = line.split(':', 1) if p == "R" or p == "RANDOM": p = rand_user_password() randlist.append("%s:%s" % (u, p)) plist_in.append("%s:%s" % (u, p)) users.append(u) ch_in = '\n'.join(plist_in) try: util.subp(['chpasswd'], ch_in) log.debug("changed password for %s:" % users) except Exception as e: errors.append(e) log.warn("failed to set passwords with chpasswd: %s" % e) if len(randlist): sys.stdout.write( "%s\n%s\n" % ("Set the following passwords\n", '\n'.join(randlist))) if expire: enum = len(errors) for u in users: try: util.subp(['passwd', '--expire', u]) except Exception as e: errors.append(e) log.warn("failed to expire account for %s" % u) if enum == len(errors): log.debug("expired passwords for: %s" % u) if 'ssh_pwauth' in cfg: val = str(cfg['ssh_pwauth']).lower() if val in ("true", "1", "yes"): pw_auth = "yes" change_pwauth = True elif val in ("false", "0", "no"): pw_auth = "no" change_pwauth = True else: change_pwauth = False if change_pwauth: pa_s = "\(#*\)\(PasswordAuthentication[[:space:]]\+\)\(yes\|no\)" msg = "set PasswordAuthentication to '%s'" % pw_auth try: cmd = [ 'sed', '-i', 's,%s,\\2%s,' % (pa_s, pw_auth), '/etc/ssh/sshd_config' ] util.subp(cmd) log.debug(msg) except Exception as e: log.warn("failed %s" % msg) errors.append(e) try: p = util.subp( ['service', cfg.get('ssh_svcname', 'ssh'), 'restart']) log.debug("restarted sshd") except: log.warn("restart of ssh failed") if len(errors): raise (errors[0]) return
def handle(name, cfg, _cloud, log, args): if len(args) != 0: resize_root = args[0] else: resize_root = util.get_cfg_option_str(cfg, "resize_rootfs", True) validate_cloudconfig_schema(cfg, schema) if not util.translate_bool(resize_root, addons=[NOBLOCK]): log.debug("Skipping module named %s, resizing disabled", name) return # TODO(harlowja): allow what is to be resized to be configurable?? resize_what = "/" result = util.get_mount_info(resize_what, log) if not result: log.warn("Could not determine filesystem type of %s", resize_what) return (devpth, fs_type, mount_point) = result # if we have a zfs then our device path at this point # is the zfs label. For example: vmzroot/ROOT/freebsd # we will have to get the zpool name out of this # and set the resize_what variable to the zpool # so the _resize_zfs function gets the right attribute. if fs_type == 'zfs': zpool = devpth.split('/')[0] devpth = util.get_device_info_from_zpool(zpool) if not devpth: return # could not find device from zpool resize_what = zpool info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what) log.debug("resize_info: %s" % info) devpth = maybe_get_writable_device_path(devpth, info, log) if not devpth: return # devpath was not a writable block device resizer = None if can_skip_resize(fs_type, resize_what, devpth): log.debug("Skip resize filesystem type %s for %s", fs_type, resize_what) return fstype_lc = fs_type.lower() for (pfix, root_cmd) in RESIZE_FS_PREFIXES_CMDS: if fstype_lc.startswith(pfix): resizer = root_cmd break if not resizer: log.warn("Not resizing unknown filesystem type %s for %s", fs_type, resize_what) return resize_cmd = resizer(resize_what, devpth) log.debug("Resizing %s (%s) using %s", resize_what, fs_type, ' '.join(resize_cmd)) if resize_root == NOBLOCK: # Fork to a child that will run # the resize command util.fork_cb(util.log_time, logfunc=log.debug, msg="backgrounded Resizing", func=do_resize, args=(resize_cmd, log)) else: util.log_time(logfunc=log.debug, msg="Resizing", func=do_resize, args=(resize_cmd, log)) action = 'Resized' if resize_root == NOBLOCK: action = 'Resizing (via forking)' log.debug("%s root filesystem (type=%s, val=%s)", action, fs_type, resize_root)
def handle(_name, cfg, _cloud, log, args): if len(args) != 0: resize_root = False if str(args[0]).lower() in ['true', '1', 'on', 'yes']: resize_root = True else: resize_root = util.get_cfg_option_str(cfg, "resize_rootfs", True) if str(resize_root).lower() in ['false', '0']: return # we use mktemp rather than mkstemp because early in boot nothing # else should be able to race us for this, and we need to mknod. devpth = tempfile.mktemp(prefix="cloudinit.resizefs.", dir="/run") try: st_dev = os.stat("/").st_dev dev = os.makedev(os.major(st_dev), os.minor(st_dev)) os.mknod(devpth, 0400 | stat.S_IFBLK, dev) except: if util.is_container(): log.debug("inside container, ignoring mknod failure in resizefs") return log.warn("Failed to make device node to resize /") raise cmd = ['blkid', '-c', '/dev/null', '-sTYPE', '-ovalue', devpth] try: (fstype, _err) = util.subp(cmd) except subprocess.CalledProcessError as e: log.warn("Failed to get filesystem type of maj=%s, min=%s via: %s" % (os.major(st_dev), os.minor(st_dev), cmd)) log.warn("output=%s\nerror=%s\n", e.output[0], e.output[1]) os.unlink(devpth) raise if str(fstype).startswith("ext"): resize_cmd = ['resize2fs', devpth] elif fstype == "xfs": resize_cmd = ['xfs_growfs', devpth] else: os.unlink(devpth) log.debug("not resizing unknown filesystem %s" % fstype) return if resize_root == "noblock": fid = os.fork() if fid == 0: try: do_resize(resize_cmd, devpth, log) os._exit(0) # pylint: disable=W0212 except Exception as exc: sys.stderr.write("Failed: %s" % exc) os._exit(1) # pylint: disable=W0212 else: do_resize(resize_cmd, devpth, log) log.debug("resizing root filesystem (type=%s, maj=%i, min=%i, val=%s)" % (str(fstype).rstrip("\n"), os.major(st_dev), os.minor(st_dev), resize_root)) return
def handle(_name, cfg, cloud, log, args): if len(args) != 0: # if run from command line, and give args, wipe the chpasswd['list'] password = args[0] if 'chpasswd' in cfg and 'list' in cfg['chpasswd']: del cfg['chpasswd']['list'] else: password = util.get_cfg_option_str(cfg, "password", None) expire = True plist = None if 'chpasswd' in cfg: chfg = cfg['chpasswd'] if 'list' in chfg and chfg['list']: if isinstance(chfg['list'], list): log.debug("Handling input for chpasswd as list.") plist = util.get_cfg_option_list(chfg, 'list', plist) else: log.debug("Handling input for chpasswd as multiline string.") plist = util.get_cfg_option_str(chfg, 'list', plist) if plist: plist = plist.splitlines() expire = util.get_cfg_option_bool(chfg, 'expire', expire) if not plist and password: (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro) (user, _user_config) = ug_util.extract_default(users) if user: plist = ["%s:%s" % (user, password)] else: log.warning("No default or defined user to change password for.") errors = [] if plist: plist_in = [] hashed_plist_in = [] hashed_users = [] randlist = [] users = [] # N.B. This regex is included in the documentation (i.e. the module # docstring), so any changes to it should be reflected there. prog = re.compile(r'\$(1|2a|2y|5|6)(\$.+){2}') for line in plist: u, p = line.split(':', 1) if prog.match(p) is not None and ":" not in p: hashed_plist_in.append("%s:%s" % (u, p)) hashed_users.append(u) else: if p == "R" or p == "RANDOM": p = rand_user_password() randlist.append("%s:%s" % (u, p)) plist_in.append("%s:%s" % (u, p)) users.append(u) ch_in = '\n'.join(plist_in) + '\n' if users: try: log.debug("Changing password for %s:", users) util.subp(['chpasswd'], ch_in) except Exception as e: errors.append(e) util.logexc(log, "Failed to set passwords with chpasswd for %s", users) hashed_ch_in = '\n'.join(hashed_plist_in) + '\n' if hashed_users: try: log.debug("Setting hashed password for %s:", hashed_users) util.subp(['chpasswd', '-e'], hashed_ch_in) except Exception as e: errors.append(e) util.logexc( log, "Failed to set hashed passwords with chpasswd for %s", hashed_users) if len(randlist): blurb = ("Set the following 'random' passwords\n", '\n'.join(randlist)) sys.stderr.write("%s\n%s\n" % blurb) if expire: expired_users = [] for u in users: try: util.subp(['passwd', '--expire', u]) expired_users.append(u) except Exception as e: errors.append(e) util.logexc(log, "Failed to set 'expire' for %s", u) if expired_users: log.debug("Expired passwords for: %s users", expired_users) handle_ssh_pwauth(cfg.get('ssh_pwauth'), service_cmd=cloud.distro.init_cmd, service_name=cloud.distro.get_option( 'ssh_svcname', 'ssh')) if len(errors): log.debug("%s errors occured, re-raising the last one", len(errors)) raise errors[-1]
def handle(name, cfg, cloud, log, _args): # If there isn't a chef key in the configuration don't do anything if 'chef' not in cfg: log.debug(("Skipping module named %s," " no 'chef' key in configuration"), name) return chef_cfg = cfg['chef'] # Ensure the chef directories we use exist for d in CHEF_DIRS: util.ensure_dir(d) # Set the validation key based on the presence of either 'validation_key' # or 'validation_cert'. In the case where both exist, 'validation_key' # takes precedence for key in ('validation_key', 'validation_cert'): if key in chef_cfg and chef_cfg[key]: util.write_file('/etc/chef/validation.pem', chef_cfg[key]) break # Create the chef config from template template_fn = cloud.get_template_filename('chef_client.rb') if template_fn: iid = str(cloud.datasource.get_instance_id()) params = { 'server_url': chef_cfg['server_url'], 'node_name': util.get_cfg_option_str(chef_cfg, 'node_name', iid), 'environment': util.get_cfg_option_str(chef_cfg, 'environment', '_default'), 'validation_name': chef_cfg['validation_name'] } templater.render_to_file(template_fn, '/etc/chef/client.rb', params) else: log.warn("No template found, not rendering to /etc/chef/client.rb") # set the firstboot json initial_json = {} if 'run_list' in chef_cfg: initial_json['run_list'] = chef_cfg['run_list'] if 'initial_attributes' in chef_cfg: initial_attributes = chef_cfg['initial_attributes'] for k in list(initial_attributes.keys()): initial_json[k] = initial_attributes[k] util.write_file('/etc/chef/firstboot.json', json.dumps(initial_json)) # If chef is not installed, we install chef based on 'install_type' if (not os.path.isfile('/usr/bin/chef-client') or util.get_cfg_option_bool( chef_cfg, 'force_install', default=False)): install_type = util.get_cfg_option_str(chef_cfg, 'install_type', 'packages') if install_type == "gems": # this will install and run the chef-client from gems chef_version = util.get_cfg_option_str(chef_cfg, 'version', None) ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version', RUBY_VERSION_DEFAULT) install_chef_from_gems(cloud.distro, ruby_version, chef_version) # and finally, run chef-client log.debug('Running chef-client') util.subp(['/usr/bin/chef-client', '-d', '-i', '1800', '-s', '20'], capture=False) elif install_type == 'packages': # this will install and run the chef-client from packages cloud.distro.install_packages(('chef', )) elif install_type == 'omnibus': url = util.get_cfg_option_str(chef_cfg, "omnibus_url", OMNIBUS_URL) content = url_helper.readurl(url=url, retries=5) with util.tempdir() as tmpd: # use tmpd over tmpfile to avoid 'Text file busy' on execute tmpf = "%s/chef-omnibus-install" % tmpd util.write_file(tmpf, str(content), mode=0o700) util.subp([tmpf], capture=False) else: log.warn("Unknown chef install type %s", install_type)
def handle(name, cfg, cloud, log, _args): # If there isn't a puppet key in the configuration don't do anything if "puppet" not in cfg: log.debug("Skipping module named %s, no 'puppet' configuration found", name) return puppet_cfg = cfg["puppet"] # Start by installing the puppet package if necessary... install = util.get_cfg_option_bool(puppet_cfg, "install", True) version = util.get_cfg_option_str(puppet_cfg, "version", None) collection = util.get_cfg_option_str(puppet_cfg, "collection", None) install_type = util.get_cfg_option_str(puppet_cfg, "install_type", "packages") cleanup = util.get_cfg_option_bool(puppet_cfg, "cleanup", True) run = util.get_cfg_option_bool(puppet_cfg, "exec", default=False) start_puppetd = util.get_cfg_option_bool(puppet_cfg, "start_service", default=True) aio_install_url = util.get_cfg_option_str(puppet_cfg, "aio_install_url", default=AIO_INSTALL_URL) # AIO and distro packages use different paths if install_type == "aio": puppet_user = "******" puppet_bin = "/opt/puppetlabs/bin/puppet" puppet_package = "puppet-agent" else: # default to 'packages' puppet_user = "******" puppet_bin = "puppet" puppet_package = "puppet" package_name = util.get_cfg_option_str(puppet_cfg, "package_name", puppet_package) if not install and version: log.warning( "Puppet install set to false but version supplied, doing nothing.") elif install: log.debug( "Attempting to install puppet %s from %s", version if version else "latest", install_type, ) if install_type == "packages": cloud.distro.install_packages((package_name, version)) elif install_type == "aio": install_puppet_aio(aio_install_url, version, collection, cleanup) else: log.warning("Unknown puppet install type '%s'", install_type) run = False conf_file = util.get_cfg_option_str(puppet_cfg, "conf_file", get_config_value(puppet_bin, "config")) ssl_dir = util.get_cfg_option_str(puppet_cfg, "ssl_dir", get_config_value(puppet_bin, "ssldir")) csr_attributes_path = util.get_cfg_option_str( puppet_cfg, "csr_attributes_path", get_config_value(puppet_bin, "csr_attributes"), ) p_constants = PuppetConstants(conf_file, ssl_dir, csr_attributes_path, log) # ... and then update the puppet configuration if "conf" in puppet_cfg: # Add all sections from the conf object to puppet.conf contents = util.load_file(p_constants.conf_path) # Create object for reading puppet.conf values puppet_config = helpers.DefaultingConfigParser() # Read puppet.conf values from original file in order to be able to # mix the rest up. First clean them up # (TODO(harlowja) is this really needed??) cleaned_lines = [i.lstrip() for i in contents.splitlines()] cleaned_contents = "\n".join(cleaned_lines) # Move to puppet_config.read_file when dropping py2.7 puppet_config.read_file(StringIO(cleaned_contents), source=p_constants.conf_path) for (cfg_name, cfg) in puppet_cfg["conf"].items(): # Cert configuration is a special case # Dump the puppetserver ca certificate in the correct place if cfg_name == "ca_cert": # Puppet ssl sub-directory isn't created yet # Create it with the proper permissions and ownership util.ensure_dir(p_constants.ssl_dir, 0o771) util.chownbyname(p_constants.ssl_dir, puppet_user, "root") util.ensure_dir(p_constants.ssl_cert_dir) util.chownbyname(p_constants.ssl_cert_dir, puppet_user, "root") util.write_file(p_constants.ssl_cert_path, cfg) util.chownbyname(p_constants.ssl_cert_path, puppet_user, "root") else: # Iterate through the config items, we'll use ConfigParser.set # to overwrite or create new items as needed for (o, v) in cfg.items(): if o == "certname": # Expand %f as the fqdn # TODO(harlowja) should this use the cloud fqdn?? v = v.replace("%f", socket.getfqdn()) # Expand %i as the instance id v = v.replace("%i", cloud.get_instance_id()) # certname needs to be downcased v = v.lower() puppet_config.set(cfg_name, o, v) # We got all our config as wanted we'll rename # the previous puppet.conf and create our new one util.rename(p_constants.conf_path, "%s.old" % (p_constants.conf_path)) util.write_file(p_constants.conf_path, puppet_config.stringify()) if "csr_attributes" in puppet_cfg: util.write_file( p_constants.csr_attributes_path, yaml.dump(puppet_cfg["csr_attributes"], default_flow_style=False), ) # Set it up so it autostarts if start_puppetd: _autostart_puppet(log) # Run the agent if needed if run: log.debug("Running puppet-agent") cmd = [puppet_bin, "agent"] if "exec_args" in puppet_cfg: cmd_args = puppet_cfg["exec_args"] if isinstance(cmd_args, (list, tuple)): cmd.extend(cmd_args) elif isinstance(cmd_args, str): cmd.extend(cmd_args.split()) else: log.warning( "Unknown type %s provided for puppet" " 'exec_args' expected list, tuple," " or string", type(cmd_args), ) cmd.extend(PUPPET_AGENT_DEFAULT_ARGS) else: cmd.extend(PUPPET_AGENT_DEFAULT_ARGS) subp.subp(cmd, capture=False) if start_puppetd: # Start puppetd subp.subp(["service", "puppet", "start"], capture=False)