def _verify_keys(self): ''' Checks that the keys in the rh_subscription dict from the user-data are what we expect. ''' for k in self.rhel_cfg: if k not in self.valid_rh_keys: bad_key = "{0} is not a valid key for rh_subscription. "\ "Valid keys are: "\ "{1}".format(k, ', '.join(self.valid_rh_keys)) return False, bad_key # Check for bad auto-attach value if (self.auto_attach is not None) and \ not (util.is_true(self.auto_attach) or util.is_false(self.auto_attach)): not_bool = "The key auto-attach must be a boolean value "\ "(True/False " return False, not_bool if (self.servicelevel is not None) and ((not self.auto_attach) or (util.is_false(str(self.auto_attach)))): no_auto = ("The service-level key must be used in conjunction " "with the auto-attach key. Please re-run with " "auto-attach: True") return False, no_auto return True, None
def apply_hostname_bounce(hostname, policy, interface, command, hostname_command="hostname"): # set the hostname to 'hostname' if it is not already set to that. # then, if policy is not off, bounce the interface using command prev_hostname = util.subp(hostname_command, capture=True)[0].strip() util.subp([hostname_command, hostname]) msg = ("phostname=%s hostname=%s policy=%s interface=%s" % (prev_hostname, hostname, policy, interface)) if util.is_false(policy): LOG.debug("pubhname: policy false, skipping [%s]", msg) return if prev_hostname == hostname and policy != "force": LOG.debug("pubhname: no change, policy != force. skipping. [%s]", msg) return env = os.environ.copy() env['interface'] = interface env['hostname'] = hostname env['old_hostname'] = prev_hostname if command == "builtin": command = BOUNCE_COMMAND LOG.debug("pubhname: publishing hostname [%s]", msg) shell = not isinstance(command, (list, tuple)) # capture=False, see comments in bug 1202758 and bug 1206164. util.log_time(logfunc=LOG.debug, msg="publishing hostname", get_uptime=True, func=util.subp, kwargs={'args': command, 'shell': shell, 'capture': False, 'env': env})
def handle(name, cfg, cloud, log, args): cfgin = cfg.get('snappy') if not cfgin: cfgin = {} mycfg = util.mergemanydict([cfgin, BUILTIN_CFG]) sys_snappy = str(mycfg.get("system_snappy", "auto")) if util.is_false(sys_snappy): LOG.debug("%s: System is not snappy. disabling", name) return if sys_snappy.lower() == "auto" and not(system_is_snappy()): LOG.debug("%s: 'auto' mode, and system not snappy", name) return set_snappy_command() pkg_ops = get_package_ops(packages=mycfg['packages'], configs=mycfg['config'], fspath=mycfg['packages_dir']) fails = [] for pkg_op in pkg_ops: try: render_snap_op(**pkg_op) except Exception as e: fails.append((pkg_op, e,)) LOG.warn("'%s' failed for '%s': %s", pkg_op['op'], pkg_op['name'], e) disable_enable_ssh(mycfg.get('ssh_enabled', False)) if fails: raise Exception("failed to install/configure snaps")
def handle(name, cfg, cloud, log, args): """Handler method activated by cloud-init.""" if not isinstance(cloud.distro, ubuntu.Distro): log.debug("%s: distro is '%s', not ubuntu. returning", name, cloud.distro.__class__) return cfg = util.mergemanydict([cfg, DEFAULT_CONFIG]) target = cfg['init_switch']['target'] reboot = cfg['init_switch']['reboot'] if len(args) != 0: target = args[0] if len(args) > 1: reboot = util.is_true(args[1]) if not target: log.debug("%s: target=%s. nothing to do", name, target) return if not util.which('dpkg'): log.warn("%s: 'dpkg' not available. Assuming not ubuntu", name) return supported = ('upstart', 'systemd') if target not in supported: log.warn("%s: target set to %s, expected one of: %s", name, target, str(supported)) if os.path.exists("/run/systemd/system"): current = "systemd" else: current = "upstart" if current == target: log.debug("%s: current = target = %s. nothing to do", name, target) return try: util.subp(['sh', '-s', target], data=SWITCH_INIT) except util.ProcessExecutionError as e: log.warn("%s: Failed to switch to init '%s'. %s", name, target, e) return if util.is_false(reboot): log.info("%s: switched '%s' to '%s'. reboot=false, not rebooting.", name, current, target) return try: log.warn("%s: switched '%s' to '%s'. rebooting.", name, current, target) logging.flushLoggers(log) _fire_reboot(log, wait_attempts=4, initial_sleep=4) except Exception as e: util.logexc(log, "Requested reboot did not happen!") raise
def handle(_name, cfg, _cloud, log, _args): if _cloud.distro.name == "aix": return if 'growpart' not in cfg: log.debug("No 'growpart' entry in cfg. Using default: %s" % DEFAULT_CONFIG) cfg['growpart'] = DEFAULT_CONFIG mycfg = cfg.get('growpart') if not isinstance(mycfg, dict): log.warn("'growpart' in config was not a dict") return mode = mycfg.get('mode', "auto") if util.is_false(mode): log.debug("growpart disabled: mode=%s" % mode) return if util.is_false(mycfg.get('ignore_growroot_disabled', False)): if os.path.isfile("/etc/growroot-disabled"): log.debug("growpart disabled: /etc/growroot-disabled exists") log.debug("use ignore_growroot_disabled to ignore") return devices = util.get_cfg_option_list(cfg, "devices", ["/"]) if not len(devices): log.debug("growpart: empty device list") return try: resizer = resizer_factory(mode) except (ValueError, TypeError) as e: log.debug("growpart unable to find resizer for '%s': %s" % (mode, e)) if mode != "auto": raise e return resized = util.log_time(logfunc=log.debug, msg="resize_devices", func=resize_devices, args=(resizer, devices)) for (entry, action, msg) in resized: if action == RESIZE.CHANGED: log.info("'%s' resized: %s" % (entry, msg)) else: log.debug("'%s' %s: %s" % (entry, action, msg))
def handle(name, cfg, cloud, log, _args): if util.is_false(cfg.get('apt_configure_enabled', True)): log.debug("Skipping module named %s, disabled by config.", name) return release = get_release() mirrors = find_apt_mirror_info(cloud, cfg) if not mirrors or "primary" not in mirrors: log.debug(("Skipping module named %s," " no package 'mirror' located"), name) return # backwards compatibility mirror = mirrors["primary"] mirrors["mirror"] = mirror log.debug("Mirror info: %s" % mirrors) if not util.get_cfg_option_bool(cfg, 'apt_preserve_sources_list', False): generate_sources_list(release, mirrors, cloud, log) old_mirrors = cfg.get('apt_old_mirrors', {"primary": "archive.ubuntu.com/ubuntu", "security": "security.ubuntu.com/ubuntu"}) rename_apt_lists(old_mirrors, mirrors) try: apply_apt_config(cfg, APT_PROXY_FN, APT_CONFIG_FN) except Exception as e: log.warn("failed to proxy or apt config info: %s", e) # Process 'apt_sources' if 'apt_sources' in cfg: params = mirrors params['RELEASE'] = release params['MIRROR'] = mirror matchcfg = cfg.get('add_apt_repo_match', ADD_APT_REPO_MATCH) if matchcfg: matcher = re.compile(matchcfg).search else: def matcher(x): return False errors = add_sources(cfg['apt_sources'], params, aa_repo_match=matcher) for e in errors: log.warn("Add source error: %s", ':'.join(e)) dconf_sel = util.get_cfg_option_str(cfg, 'debconf_selections', False) if dconf_sel: log.debug("Setting debconf selections per cloud config") try: util.subp(('debconf-set-selections', '-'), dconf_sel) except Exception: util.logexc(log, "Failed to run debconf-set-selections")
def handle(name, cfg, cloud, log, args): cfgin = cfg.get('snappy') if not cfgin: cfgin = {} mycfg = util.mergemanydict([cfgin, BUILTIN_CFG]) sys_snappy = str(mycfg.get("system_snappy", "auto")) if util.is_false(sys_snappy): LOG.debug("%s: System is not snappy. disabling", name) return if sys_snappy.lower() == "auto" and not(system_is_snappy()): LOG.debug("%s: 'auto' mode, and system not snappy", name) return set_snappy_command() pkg_ops = get_package_ops(packages=mycfg['packages'], configs=mycfg['config'], fspath=mycfg['packages_dir']) fails = [] for pkg_op in pkg_ops: try: render_snap_op(**pkg_op) except Exception as e: fails.append((pkg_op, e,)) LOG.warn("'%s' failed for '%s': %s", pkg_op['op'], pkg_op['name'], e) # Default to disabling SSH ssh_enabled = mycfg.get('ssh_enabled', "auto") # If the user has not explicitly enabled or disabled SSH, then enable it # when password SSH authentication is requested or there are SSH keys if ssh_enabled == "auto": user_ssh_keys = cloud.get_public_ssh_keys() or None password_auth_enabled = cfg.get('ssh_pwauth', False) if user_ssh_keys: LOG.debug("Enabling SSH, ssh keys found in datasource") ssh_enabled = True elif cfg.get('ssh_authorized_keys'): LOG.debug("Enabling SSH, ssh keys found in config") elif password_auth_enabled: LOG.debug("Enabling SSH, password authentication requested") ssh_enabled = True elif ssh_enabled not in (True, False): LOG.warn("Unknown value '%s' in ssh_enabled", ssh_enabled) disable_enable_ssh(ssh_enabled) if fails: raise Exception("failed to install/configure snaps")
def handle(name, cfg, cloud, log, args): if len(args) != 0: locale = args[0] else: locale = util.get_cfg_option_str(cfg, "locale", cloud.get_locale()) if util.is_false(locale): log.debug("Skipping module named %s, disabled by config: %s", name, locale) return log.debug("Setting locale to %s", locale) locale_cfgfile = util.get_cfg_option_str(cfg, "locale_configfile") cloud.distro.apply_locale(locale, locale_cfgfile)
def handle(name, ocfg, cloud, log, _): """process the config for apt_config. This can be called from curthooks if a global apt config was provided or via the "apt" standalone command.""" # keeping code close to curtin codebase via entry handler target = None if log is not None: global LOG LOG = log # feed back converted config, but only work on the subset under 'apt' ocfg = convert_to_v3_apt_format(ocfg) cfg = ocfg.get('apt', {}) if not isinstance(cfg, dict): raise ValueError("Expected dictionary for 'apt' config, found %s", type(cfg)) LOG.debug("handling apt (module %s) with apt config '%s'", name, cfg) release = util.lsb_release(target=target)['codename'] arch = util.get_architecture(target) mirrors = find_apt_mirror_info(cfg, cloud, arch=arch) LOG.debug("Apt Mirror info: %s", mirrors) apply_debconf_selections(cfg, target) if util.is_false(cfg.get('preserve_sources_list', False)): generate_sources_list(cfg, release, mirrors, cloud) rename_apt_lists(mirrors, target) try: apply_apt_config(cfg, APT_PROXY_FN, APT_CONFIG_FN) except (IOError, OSError): LOG.exception("Failed to apply proxy or apt config info:") # Process 'apt_source -> sources {dict}' if 'sources' in cfg: params = mirrors params['RELEASE'] = release params['MIRROR'] = mirrors["MIRROR"] matcher = None matchcfg = cfg.get('add_apt_repo_match', ADD_APT_REPO_MATCH) if matchcfg: matcher = re.compile(matchcfg).search add_apt_sources(cfg['sources'], cloud, target=target, template_params=params, aa_repo_match=matcher)
def handle(name, cfg, _cloud, log, _args): mycfg = cfg.get("grub_dpkg", cfg.get("grub-dpkg", {})) if not mycfg: mycfg = {} enabled = mycfg.get('enabled', True) if util.is_false(enabled): log.debug("%s disabled by config grub_dpkg/enabled=%s", name, enabled) return idevs = util.get_cfg_option_str(mycfg, "grub-pc/install_devices", None) idevs_empty = util.get_cfg_option_str(mycfg, "grub-pc/install_devices_empty", None) if ((os.path.exists("/dev/sda1") and not os.path.exists("/dev/sda")) or (os.path.exists("/dev/xvda1") and not os.path.exists("/dev/xvda"))): if idevs is None: idevs = "" if idevs_empty is None: idevs_empty = "true" else: if idevs_empty is None: idevs_empty = "false" if idevs is None: idevs = "/dev/sda" for dev in ("/dev/sda", "/dev/vda", "/dev/xvda", "/dev/sda1", "/dev/vda1", "/dev/xvda1"): if os.path.exists(dev): idevs = dev break # now idevs and idevs_empty are set to determined values # or, those set by user dconf_sel = (("grub-pc grub-pc/install_devices string %s\n" "grub-pc grub-pc/install_devices_empty boolean %s\n") % (idevs, idevs_empty)) log.debug("Setting grub debconf-set-selections with '%s','%s'" % (idevs, idevs_empty)) try: util.subp(['debconf-set-selections'], dconf_sel) except: util.logexc(log, "Failed to run debconf-set-selections for grub-dpkg")
def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'): """ Set a temporary hostname, restoring the previous hostname on exit. Will have the value of the previous hostname when used as a context manager, or None if the hostname was not changed. """ policy = cfg['hostname_bounce']['policy'] previous_hostname = get_hostname(hostname_command) if (not util.is_true(cfg.get('set_hostname')) or util.is_false(policy) or (previous_hostname == temp_hostname and policy != 'force')): yield None return set_hostname(temp_hostname, hostname_command) try: yield previous_hostname finally: set_hostname(previous_hostname, hostname_command)
def apply_apt(cfg, cloud, target): # cfg is the 'apt' top level dictionary already in 'v3' format. if not cfg: # no config was provided. If apt configuration does not seem # necessary on this system, then return. if util.system_is_snappy(): LOG.debug("Nothing to do: No apt config and running on snappy") return if not (util.which('apt-get') or util.which('apt')): LOG.debug("Nothing to do: No apt config and no apt commands") return LOG.debug("handling apt config: %s", cfg) release = util.lsb_release(target=target)['codename'] arch = util.get_architecture(target) mirrors = find_apt_mirror_info(cfg, cloud, arch=arch) LOG.debug("Apt Mirror info: %s", mirrors) if util.is_false(cfg.get('preserve_sources_list', False)): generate_sources_list(cfg, release, mirrors, cloud) rename_apt_lists(mirrors, target) try: apply_apt_config(cfg, APT_PROXY_FN, APT_CONFIG_FN) except (IOError, OSError): LOG.exception("Failed to apply proxy or apt config info:") # Process 'apt_source -> sources {dict}' if 'sources' in cfg: params = mirrors params['RELEASE'] = release params['MIRROR'] = mirrors["MIRROR"] matcher = None matchcfg = cfg.get('add_apt_repo_match', ADD_APT_REPO_MATCH) if matchcfg: matcher = re.compile(matchcfg).search add_apt_sources(cfg['sources'], cloud, target=target, template_params=params, aa_repo_match=matcher)
def apply_apt(cfg, cloud, target): # cfg is the 'apt' top level dictionary already in 'v3' format. if not cfg: should_config, msg = _should_configure_on_empty_apt() if not should_config: LOG.debug("Nothing to do: No apt config and %s", msg) return LOG.debug("handling apt config: %s", cfg) release = util.lsb_release(target=target)["codename"] arch = util.get_dpkg_architecture(target) mirrors = find_apt_mirror_info(cfg, cloud, arch=arch) LOG.debug("Apt Mirror info: %s", mirrors) if util.is_false(cfg.get("preserve_sources_list", False)): add_mirror_keys(cfg, target) generate_sources_list(cfg, release, mirrors, cloud) rename_apt_lists(mirrors, target, arch) try: apply_apt_config(cfg, APT_PROXY_FN, APT_CONFIG_FN) except (IOError, OSError): LOG.exception("Failed to apply proxy or apt config info:") # Process 'apt_source -> sources {dict}' if "sources" in cfg: params = mirrors params["RELEASE"] = release params["MIRROR"] = mirrors["MIRROR"] matcher = None matchcfg = cfg.get("add_apt_repo_match", ADD_APT_REPO_MATCH) if matchcfg: matcher = re.compile(matchcfg).search add_apt_sources( cfg["sources"], cloud, target=target, template_params=params, aa_repo_match=matcher, )
def apply_hostname_bounce(hostname, policy, interface, command, hostname_command="hostname"): # set the hostname to 'hostname' if it is not already set to that. # then, if policy is not off, bounce the interface using command prev_hostname = util.subp(hostname_command, capture=True)[0].strip() util.subp([hostname_command, hostname]) msg = ("phostname=%s hostname=%s policy=%s interface=%s" % (prev_hostname, hostname, policy, interface)) if util.is_false(policy): LOG.debug("pubhname: policy false, skipping [%s]", msg) return if prev_hostname == hostname and policy != "force": LOG.debug("pubhname: no change, policy != force. skipping. [%s]", msg) return env = os.environ.copy() env['interface'] = interface env['hostname'] = hostname env['old_hostname'] = prev_hostname if command == "builtin": command = IB_BOUNCE_COMMAND LOG.debug("pubhname: publishing hostname [%s]", msg) shell = not isinstance(command, (list, tuple)) # capture=False, see comments in bug 1202758 and bug 1206164. util.log_time(logfunc=LOG.debug, msg="publishing hostname", get_uptime=True, func=util.subp, kwargs={ 'args': command, 'shell': shell, 'capture': False, 'env': env })
def network_config(self): """Return a network config dict for rendering ENI or netplan files.""" if self._network_config != sources.UNSET: return self._network_config # Xenial, Artful and Bionic will not provide # network_config by default unless configured in /etc/cloud/cloud.cfg*. if util.is_false(self.ds_cfg.get('apply_network_config', False)): self._network_config = None return self._network_config if self.network_json == sources.UNSET: # this would happen if get_data hadn't been called. leave as UNSET LOG.warning( 'Unexpected call to network_config when network_json is None.') return None LOG.debug('network config provided via network_json') self._network_config = openstack.convert_net_json(self.network_json, known_macs=None) return self._network_config
def test_ntp_the_whole_package(self, m_sysd, m_select, m_subp, m_dsubp): """Test enabled config renders template, and restarts service""" cfg = {"ntp": {"enabled": True}} for distro in cc_ntp.distros: mycloud = self._get_cloud(distro) ntpconfig = self._mock_ntp_client_config(distro=distro) confpath = ntpconfig["confpath"] service_name = ntpconfig["service_name"] m_select.return_value = ntpconfig hosts = cc_ntp.generate_server_names(mycloud.distro.name) uses_systemd = True expected_service_call = [ "systemctl", "reload-or-restart", service_name, ] expected_content = "servers []\npools {0}\n".format(hosts) if distro == "alpine": uses_systemd = False expected_service_call = ["rc-service", service_name, "restart"] # _mock_ntp_client_config call above did not specify a client # value and so it defaults to "ntp" which on Alpine Linux only # supports servers and not pools. expected_content = "servers {0}\npools []\n".format(hosts) m_sysd.return_value = uses_systemd with mock.patch("cloudinit.config.cc_ntp.util") as m_util: # allow use of util.mergemanydict m_util.mergemanydict.side_effect = util.mergemanydict # default client is present m_subp.which.return_value = True # use the config 'enabled' value m_util.is_false.return_value = util.is_false( cfg["ntp"]["enabled"]) cc_ntp.handle("notimportant", cfg, mycloud, None, None) m_dsubp.subp.assert_called_with(expected_service_call, capture=True) self.assertEqual(expected_content, util.load_file(confpath))
def network_config(self): """Return a network config dict for rendering ENI or netplan files.""" if self._network_config != sources.UNSET: return self._network_config # RELEASE_BLOCKER: SRU to Xenial and Artful SRU should not provide # network_config by default unless configured in /etc/cloud/cloud.cfg*. # Patch Xenial and Artful before release to default to False. if util.is_false(self.ds_cfg.get("apply_network_config", True)): self._network_config = None return self._network_config if self.network_json == sources.UNSET: # this would happen if get_data hadn't been called. leave as UNSET LOG.warning( "Unexpected call to network_config when network_json is None.") return None LOG.debug("network config provided via network_json") self._network_config = openstack.convert_net_json(self.network_json, known_macs=None) return self._network_config
def network_config(self): """Return a network config dict for rendering ENI or netplan files.""" if self._network_config != sources.UNSET: return self._network_config # RELEASE_BLOCKER: SRU to Xenial and Artful SRU should not provide # network_config by default unless configured in /etc/cloud/cloud.cfg*. # Patch Xenial and Artful before release to default to False. if util.is_false(self.ds_cfg.get('apply_network_config', True)): self._network_config = None return self._network_config if self.network_json == sources.UNSET: # this would happen if get_data hadn't been called. leave as UNSET LOG.warning( 'Unexpected call to network_config when network_json is None.') return None LOG.debug('network config provided via network_json') self._network_config = openstack.convert_net_json( self.network_json, known_macs=None) return self._network_config
def handle(name, cfg, _cloud, log, _args): mycfg = cfg.get("grub_dpkg", cfg.get("grub-dpkg", {})) if not mycfg: mycfg = {} enabled = mycfg.get("enabled", True) if util.is_false(enabled): log.debug("%s disabled by config grub_dpkg/enabled=%s", name, enabled) return idevs = util.get_cfg_option_str(mycfg, "grub-pc/install_devices", None) if idevs is None: idevs = fetch_idevs(log) idevs_empty = mycfg.get("grub-pc/install_devices_empty") if idevs_empty is None: idevs_empty = not idevs elif not isinstance(idevs_empty, bool): log.warning( "DEPRECATED: grub_dpkg: grub-pc/install_devices_empty value of " f"'{idevs_empty}' is not boolean. Use of non-boolean values " "will be removed in a future version of cloud-init.") idevs_empty = util.translate_bool(idevs_empty) idevs_empty = str(idevs_empty).lower() # now idevs and idevs_empty are set to determined values # or, those set by user dconf_sel = ("grub-pc grub-pc/install_devices string %s\n" "grub-pc grub-pc/install_devices_empty boolean %s\n" % (idevs, idevs_empty)) log.debug("Setting grub debconf-set-selections with '%s','%s'" % (idevs, idevs_empty)) try: subp.subp(["debconf-set-selections"], dconf_sel) except Exception: util.logexc(log, "Failed to run debconf-set-selections for grub-dpkg")
def handle(_name, cfg, _cloud, log, _args): if 'growpart' not in cfg: log.debug("No 'growpart' entry in cfg. Using default: %s" % DEFAULT_CONFIG) cfg['growpart'] = DEFAULT_CONFIG mycfg = cfg.get('growpart') if not isinstance(mycfg, dict): log.warn("'growpart' in config was not a dict") return mode = mycfg.get('mode', "auto") if util.is_false(mode): log.debug("growpart disabled: mode=%s" % mode) return devices = util.get_cfg_option_list(cfg, "devices", ["/"]) if not len(devices): log.debug("growpart: empty device list") return try: resizer = resizer_factory(mode) except (ValueError, TypeError) as e: log.debug("growpart unable to find resizer for '%s': %s" % (mode, e)) if mode != "auto": raise e return resized = util.log_time(logfunc=log.debug, msg="resize_devices", func=resize_devices, args=(resizer, devices)) for (entry, action, msg) in resized: if action == RESIZE.CHANGED: log.info("'%s' resized: %s" % (entry, msg)) else: log.debug("'%s' %s: %s" % (entry, action, msg))
def retrieve_metadata(path, data, cloud, log): for datum in data: if isinstance(datum, (str, unicode)): # used for fallback data return datum elif isinstance(datum, dict): kwargs = dict() if 'separator' in datum: kwargs['separator'] = datum['separator'] for dataset in EXPOSED_DICTS: if dataset in datum: if not hasattr(cloud.datasource, dataset): log.warn('there is no %s dataset', dataset) continue try: obj = getattr(cloud.datasource, dataset) value = util.dictpath(obj, datum[dataset], **kwargs) # if the value is an empty string, and the # configuration hasn't told us that's okay, go # to the next fallback if not value and \ util.is_false(datum.get('allowempty', False)): continue return value except Exception as exc: # don't return anything, we proceed to the next datum log.warn( 'using path "%(path)s" against %(dataset)s ' 'failed: %(exctype)s: %(excmsg)s', path=datum[dataset], dataset=dataset, exctype=type(exc).__name__, excmsg=str(exc)) # if we reached this point, all attempts to get the data we want # failed, and there wasn't a fallback log.warn('all attempts to retrieve metadata for %s failed', path)
def apply_apt(cfg, cloud, target): # cfg is the 'apt' top level dictionary already in 'v3' format. if not cfg: should_config, msg = _should_configure_on_empty_apt() if not should_config: LOG.debug("Nothing to do: No apt config and %s", msg) return LOG.debug("handling apt config: %s", cfg) release = util.lsb_release(target=target)['codename'] arch = util.get_architecture(target) mirrors = find_apt_mirror_info(cfg, cloud, arch=arch) LOG.debug("Apt Mirror info: %s", mirrors) if util.is_false(cfg.get('preserve_sources_list', False)): generate_sources_list(cfg, release, mirrors, cloud) rename_apt_lists(mirrors, target) try: apply_apt_config(cfg, APT_PROXY_FN, APT_CONFIG_FN) except (IOError, OSError): LOG.exception("Failed to apply proxy or apt config info:") # Process 'apt_source -> sources {dict}' if 'sources' in cfg: params = mirrors params['RELEASE'] = release params['MIRROR'] = mirrors["MIRROR"] matcher = None matchcfg = cfg.get('add_apt_repo_match', ADD_APT_REPO_MATCH) if matchcfg: matcher = re.compile(matchcfg).search add_apt_sources(cfg['sources'], cloud, target=target, template_params=params, aa_repo_match=matcher)
def handle(name, cfg, cloud, log, args): cfgin = cfg.get('snappy') if not cfgin: cfgin = {} mycfg = util.mergemanydict([cfgin, BUILTIN_CFG]) sys_snappy = str(mycfg.get("system_snappy", "auto")) if util.is_false(sys_snappy): LOG.debug("%s: System is not snappy. disabling", name) return if sys_snappy.lower() == "auto" and not (system_is_snappy()): LOG.debug("%s: 'auto' mode, and system not snappy", name) return set_snappy_command() pkg_ops = get_package_ops(packages=mycfg['packages'], configs=mycfg['config'], fspath=mycfg['packages_dir']) fails = [] for pkg_op in pkg_ops: try: render_snap_op(**pkg_op) except Exception as e: fails.append(( pkg_op, e, )) LOG.warn("'%s' failed for '%s': %s", pkg_op['op'], pkg_op['name'], e) disable_enable_ssh(mycfg.get('ssh_enabled', False)) if fails: raise Exception("failed to install/configure snaps")
def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"): """Apply sshd PasswordAuthentication changes. @param pw_auth: config setting from 'pw_auth'. Best given as True, False, or "unchanged". @param service_cmd: The service command list (['service']) @param service_name: The name of the sshd service for the system. @return: None""" cfg_name = "PasswordAuthentication" if service_cmd is None: service_cmd = ["service"] if util.is_true(pw_auth): cfg_val = 'yes' elif util.is_false(pw_auth): cfg_val = 'no' else: bmsg = "Leaving ssh config '%s' unchanged." % cfg_name if pw_auth is None or pw_auth.lower() == 'unchanged': LOG.debug("%s ssh_pwauth=%s", bmsg, pw_auth) else: LOG.warning("%s Unrecognized value: ssh_pwauth=%s", bmsg, pw_auth) return updated = update_ssh_config({cfg_name: cfg_val}) if not updated: LOG.debug("No need to restart ssh service, %s not updated.", cfg_name) return if 'systemctl' in service_cmd: cmd = list(service_cmd) + ["restart", service_name] else: cmd = list(service_cmd) + [service_name, "restart"] util.subp(cmd) LOG.debug("Restarted the ssh daemon.")
def handle(name, cfg, cloud, log, _args): if util.is_false(cfg.get("ssh", {}).get("emit_keys_to_console", True)): log.debug(("Skipping module named %s, " "logging of SSH host keys disabled"), name) return helper_path = _get_helper_tool_path(cloud.distro) if not os.path.exists(helper_path): log.warning(("Unable to activate module %s," " helper tool not found at %s"), name, helper_path) return fp_blacklist = util.get_cfg_option_list(cfg, "ssh_fp_console_blacklist", []) key_blacklist = util.get_cfg_option_list(cfg, "ssh_key_console_blacklist", ["ssh-dss"]) try: cmd = [helper_path, ','.join(fp_blacklist), ','.join(key_blacklist)] (stdout, _stderr) = subp.subp(cmd) util.multi_log("%s\n" % (stdout.strip()), stderr=False, console=True) except Exception: log.warning("Writing keys to the system console failed!") raise
def read_azure_ovf(contents): try: dom = minidom.parseString(contents) except Exception as e: raise BrokenAzureDataSource("Invalid ovf-env.xml: %s" % e) results = find_child(dom.documentElement, lambda n: n.localName == "ProvisioningSection") if len(results) == 0: raise NonAzureDataSource("No ProvisioningSection") if len(results) > 1: raise BrokenAzureDataSource("found '%d' ProvisioningSection items" % len(results)) provSection = results[0] lpcs_nodes = find_child( provSection, lambda n: n.localName == "LinuxProvisioningConfigurationSet") if len(lpcs_nodes) == 0: raise NonAzureDataSource("No LinuxProvisioningConfigurationSet") if len(lpcs_nodes) > 1: raise BrokenAzureDataSource( "found '%d' %ss" % (len(lpcs_nodes), "LinuxProvisioningConfigurationSet")) lpcs = lpcs_nodes[0] if not lpcs.hasChildNodes(): raise BrokenAzureDataSource("no child nodes of configuration set") md_props = 'seedfrom' md = {'azure_data': {}} cfg = {} ud = "" password = None username = None for child in lpcs.childNodes: if child.nodeType == dom.TEXT_NODE or not child.localName: continue name = child.localName.lower() simple = False value = "" if (len(child.childNodes) == 1 and child.childNodes[0].nodeType == dom.TEXT_NODE): simple = True value = child.childNodes[0].wholeText attrs = dict([(k, v) for k, v in child.attributes.items()]) # we accept either UserData or CustomData. If both are present # then behavior is undefined. if name == "userdata" or name == "customdata": if attrs.get('encoding') in (None, "base64"): ud = base64.b64decode(''.join(value.split())) else: ud = value elif name == "username": username = value elif name == "userpassword": password = value elif name == "hostname": md['local-hostname'] = value elif name == "dscfg": if attrs.get('encoding') in (None, "base64"): dscfg = base64.b64decode(''.join(value.split())) else: dscfg = value cfg['datasource'] = {DS_NAME: util.load_yaml(dscfg, default={})} elif name == "ssh": cfg['_pubkeys'] = load_azure_ovf_pubkeys(child) elif name == "disablesshpasswordauthentication": cfg['ssh_pwauth'] = util.is_false(value) elif simple: if name in md_props: md[name] = value else: md['azure_data'][name] = value defuser = {} if username: defuser['name'] = username if password and DEF_PASSWD_REDACTION != password: defuser['passwd'] = encrypt_pass(password) defuser['lock_passwd'] = False if defuser: cfg['system_info'] = {'default_user': defuser} if 'ssh_pwauth' not in cfg and password: cfg['ssh_pwauth'] = True cfg['PreprovisionedVm'] = _extract_preprovisioned_vm_setting(dom) return (md, ud, cfg)
def handle_ssh_pwauth(pw_auth, distro: Distro): """Apply sshd PasswordAuthentication changes. @param pw_auth: config setting from 'pw_auth'. Best given as True, False, or "unchanged". @param distro: an instance of the distro class for the target distribution @return: None""" service = distro.get_option("ssh_svcname", "ssh") restart_ssh = True try: distro.manage_service("status", service) except subp.ProcessExecutionError as e: uses_systemd = distro.uses_systemd() if not uses_systemd: LOG.debug( "Writing config 'ssh_pwauth: %s'. SSH service '%s'" " will not be restarted because it is not running or not" " available.", pw_auth, service, ) restart_ssh = False elif e.exit_code == 3: # Service is not running. Write ssh config. LOG.debug( "Writing config 'ssh_pwauth: %s'. SSH service '%s'" " will not be restarted because it is stopped.", pw_auth, service, ) restart_ssh = False elif e.exit_code == 4: # Service status is unknown LOG.warning( "Ignoring config 'ssh_pwauth: %s'." " SSH service '%s' is not installed.", pw_auth, service, ) return else: LOG.warning( "Ignoring config 'ssh_pwauth: %s'." " SSH service '%s' is not available. Error: %s.", pw_auth, service, e, ) return cfg_name = "PasswordAuthentication" if isinstance(pw_auth, str): LOG.warning( "DEPRECATION: The 'ssh_pwauth' config key should be set to " "a boolean value. The string format is deprecated and will be " "removed in a future version of cloud-init.") if util.is_true(pw_auth): cfg_val = "yes" elif util.is_false(pw_auth): cfg_val = "no" else: bmsg = "Leaving SSH config '%s' unchanged." % cfg_name if pw_auth is None or pw_auth.lower() == "unchanged": LOG.debug("%s ssh_pwauth=%s", bmsg, pw_auth) else: LOG.warning("%s Unrecognized value: ssh_pwauth=%s", bmsg, pw_auth) return updated = update_ssh_config({cfg_name: cfg_val}) if not updated: LOG.debug("No need to restart SSH service, %s not updated.", cfg_name) return if restart_ssh: distro.manage_service("restart", service) LOG.debug("Restarted the SSH daemon.") else: LOG.debug("Not restarting SSH service: service is stopped.")
def handle(_name, cfg, cloud, log, args): if len(args) != 0: # if run from command line, and give args, wipe the chpasswd['list'] password = args[0] if 'chpasswd' in cfg and 'list' in cfg['chpasswd']: del cfg['chpasswd']['list'] else: password = util.get_cfg_option_str(cfg, "password", None) expire = True plist = None if 'chpasswd' in cfg: chfg = cfg['chpasswd'] plist = util.get_cfg_option_str(chfg, 'list', plist) expire = util.get_cfg_option_bool(chfg, 'expire', expire) if not plist and password: (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro) (user, _user_config) = ug_util.extract_default(users) if user: plist = "%s:%s" % (user, password) else: log.warn("No default or defined user to change password for.") errors = [] if plist: plist_in = [] randlist = [] users = [] for line in plist.splitlines(): u, p = line.split(':', 1) if p == "R" or p == "RANDOM": p = rand_user_password() randlist.append("%s:%s" % (u, p)) plist_in.append("%s:%s" % (u, p)) users.append(u) ch_in = '\n'.join(plist_in) + '\n' try: log.debug("Changing password for %s:", users) util.subp(['chpasswd'], ch_in) except Exception as e: errors.append(e) util.logexc(log, "Failed to set passwords with chpasswd for %s", users) if len(randlist): blurb = ("Set the following 'random' passwords\n", '\n'.join(randlist)) sys.stderr.write("%s\n%s\n" % blurb) if expire: expired_users = [] for u in users: try: util.subp(['passwd', '--expire', u]) expired_users.append(u) except Exception as e: errors.append(e) util.logexc(log, "Failed to set 'expire' for %s", u) if expired_users: log.debug("Expired passwords for: %s users", expired_users) change_pwauth = False pw_auth = None if 'ssh_pwauth' in cfg: if util.is_true(cfg['ssh_pwauth']): change_pwauth = True pw_auth = 'yes' elif util.is_false(cfg['ssh_pwauth']): change_pwauth = True pw_auth = 'no' elif str(cfg['ssh_pwauth']).lower() == 'unchanged': log.debug('Leaving auth line unchanged') change_pwauth = False elif not str(cfg['ssh_pwauth']).strip(): log.debug('Leaving auth line unchanged') change_pwauth = False elif not cfg['ssh_pwauth']: log.debug('Leaving auth line unchanged') change_pwauth = False else: msg = 'Unrecognized value %s for ssh_pwauth' % cfg['ssh_pwauth'] util.logexc(log, msg) if change_pwauth: replaced_auth = False # See: man sshd_config old_lines = ssh_util.parse_ssh_config(ssh_util.DEF_SSHD_CFG) new_lines = [] i = 0 for (i, line) in enumerate(old_lines): # Keywords are case-insensitive and arguments are case-sensitive if line.key == 'passwordauthentication': log.debug("Replacing auth line %s with %s", i + 1, pw_auth) replaced_auth = True line.value = pw_auth new_lines.append(line) if not replaced_auth: log.debug("Adding new auth line %s", i + 1) replaced_auth = True new_lines.append(ssh_util.SshdConfigLine('', 'PasswordAuthentication', pw_auth)) lines = [str(l) for l in new_lines] util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines)) try: cmd = cloud.distro.init_cmd # Default service cmd.append(cloud.distro.get_option('ssh_svcname', 'ssh')) cmd.append('restart') if 'systemctl' in cmd: # Switch action ordering cmd[1], cmd[2] = cmd[2], cmd[1] cmd = filter(None, cmd) # Remove empty arguments util.subp(cmd) log.debug("Restarted the ssh daemon") except Exception: util.logexc(log, "Restarting of the ssh daemon failed") if len(errors): log.debug("%s errors occured, re-raising the last one", len(errors)) raise errors[-1]
def handle(name, cfg, cloud, log, _args): """Enable and configure ntp.""" if "ntp" not in cfg: LOG.debug("Skipping module named %s, not present or disabled by cfg", name) return ntp_cfg = cfg["ntp"] if ntp_cfg is None: ntp_cfg = {} # Allow empty config which will install the package # TODO drop this when validate_cloudconfig_schema is strict=True if not isinstance(ntp_cfg, (dict)): raise RuntimeError( "'ntp' key existed in config, but not a dictionary type," " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg))) # Allow users to explicitly enable/disable enabled = ntp_cfg.get("enabled", True) if util.is_false(enabled): LOG.debug("Skipping module named %s, disabled by cfg", name) return # Select which client is going to be used and get the configuration ntp_client_config = select_ntp_client(ntp_cfg.get("ntp_client"), cloud.distro) # Allow user ntp config to override distro configurations ntp_client_config = util.mergemanydict( [ntp_client_config, ntp_cfg.get("config", {})], reverse=True) supplemental_schema_validation(ntp_client_config) rename_ntp_conf(confpath=ntp_client_config.get("confpath")) template_fn = None if not ntp_client_config.get("template"): template_name = ntp_client_config.get("template_name").replace( "{distro}", cloud.distro.name) template_fn = cloud.get_template_filename(template_name) if not template_fn: msg = ("No template found, not rendering %s" % ntp_client_config.get("template_name")) raise RuntimeError(msg) write_ntp_config_template( cloud.distro.name, service_name=ntp_client_config.get("service_name"), servers=ntp_cfg.get("servers", []), pools=ntp_cfg.get("pools", []), path=ntp_client_config.get("confpath"), template_fn=template_fn, template=ntp_client_config.get("template"), ) install_ntp_client( cloud.distro.install_packages, packages=ntp_client_config["packages"], check_exe=ntp_client_config["check_exe"], ) try: cloud.distro.manage_service("reload", ntp_client_config.get("service_name")) except subp.ProcessExecutionError as e: LOG.exception("Failed to reload/start ntp service: %s", e) raise
def handle(_name, cfg, cloud, log, args): if len(args) != 0: # if run from command line, and give args, wipe the chpasswd['list'] password = args[0] if 'chpasswd' in cfg and 'list' in cfg['chpasswd']: del cfg['chpasswd']['list'] else: password = util.get_cfg_option_str(cfg, "password", None) expire = True plist = None if 'chpasswd' in cfg: chfg = cfg['chpasswd'] plist = util.get_cfg_option_str(chfg, 'list', plist) expire = util.get_cfg_option_bool(chfg, 'expire', expire) if not plist and password: (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro) (user, _user_config) = ds.extract_default(users) if user: plist = "%s:%s" % (user, password) else: log.warn("No default or defined user to change password for.") errors = [] if plist: plist_in = [] randlist = [] users = [] for line in plist.splitlines(): u, p = line.split(':', 1) if p == "R" or p == "RANDOM": p = rand_user_password() randlist.append("%s:%s" % (u, p)) plist_in.append("%s:%s" % (u, p)) users.append(u) ch_in = '\n'.join(plist_in) + '\n' try: log.debug("Changing password for %s:", users) util.subp(['chpasswd'], ch_in) except Exception as e: errors.append(e) util.logexc(log, "Failed to set passwords with chpasswd for %s", users) if len(randlist): blurb = ("Set the following 'random' passwords\n", '\n'.join(randlist)) sys.stderr.write("%s\n%s\n" % blurb) if expire: expired_users = [] for u in users: try: util.subp(['passwd', '--expire', u]) expired_users.append(u) except Exception as e: errors.append(e) util.logexc(log, "Failed to set 'expire' for %s", u) if expired_users: log.debug("Expired passwords for: %s users", expired_users) change_pwauth = False pw_auth = None if 'ssh_pwauth' in cfg: if util.is_true(cfg['ssh_pwauth']): change_pwauth = True pw_auth = 'yes' elif util.is_false(cfg['ssh_pwauth']): change_pwauth = True pw_auth = 'no' elif str(cfg['ssh_pwauth']).lower() == 'unchanged': log.debug('Leaving auth line unchanged') change_pwauth = False elif not str(cfg['ssh_pwauth']).strip(): log.debug('Leaving auth line unchanged') change_pwauth = False elif not cfg['ssh_pwauth']: log.debug('Leaving auth line unchanged') change_pwauth = False else: msg = 'Unrecognized value %s for ssh_pwauth' % cfg['ssh_pwauth'] util.logexc(log, msg) if change_pwauth: replaced_auth = False # See: man sshd_config old_lines = ssh_util.parse_ssh_config(ssh_util.DEF_SSHD_CFG) new_lines = [] i = 0 for (i, line) in enumerate(old_lines): # Keywords are case-insensitive and arguments are case-sensitive if line.key == 'passwordauthentication': log.debug("Replacing auth line %s with %s", i + 1, pw_auth) replaced_auth = True line.value = pw_auth new_lines.append(line) if not replaced_auth: log.debug("Adding new auth line %s", i + 1) replaced_auth = True new_lines.append(ssh_util.SshdConfigLine('', 'PasswordAuthentication', pw_auth)) lines = [str(l) for l in new_lines] util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines)) try: cmd = cloud.distro.init_cmd # Default service cmd.append(cloud.distro.get_option('ssh_svcname', 'ssh')) cmd.append('restart') if 'systemctl' in cmd: # Switch action ordering cmd[1], cmd[2] = cmd[2], cmd[1] cmd = filter(None, cmd) # Remove empty arguments util.subp(cmd) log.debug("Restarted the ssh daemon") except: util.logexc(log, "Restarting of the ssh daemon failed") if len(errors): log.debug("%s errors occured, re-raising the last one", len(errors)) raise errors[-1]
def handle(name, cfg, cloud, log, _args): """Enable and configure ntp.""" if 'ntp' not in cfg: LOG.debug( "Skipping module named %s, not present or disabled by cfg", name) return ntp_cfg = cfg['ntp'] if ntp_cfg is None: ntp_cfg = {} # Allow empty config which will install the package # TODO drop this when validate_cloudconfig_schema is strict=True if not isinstance(ntp_cfg, (dict)): raise RuntimeError( "'ntp' key existed in config, but not a dictionary type," " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg))) validate_cloudconfig_schema(cfg, schema) # Allow users to explicitly enable/disable enabled = ntp_cfg.get('enabled', True) if util.is_false(enabled): LOG.debug("Skipping module named %s, disabled by cfg", name) return # Select which client is going to be used and get the configuration ntp_client_config = select_ntp_client(ntp_cfg.get('ntp_client'), cloud.distro) # Allow user ntp config to override distro configurations ntp_client_config = util.mergemanydict( [ntp_client_config, ntp_cfg.get('config', {})], reverse=True) supplemental_schema_validation(ntp_client_config) rename_ntp_conf(confpath=ntp_client_config.get('confpath')) template_fn = None if not ntp_client_config.get('template'): template_name = ( ntp_client_config.get('template_name').replace('{distro}', cloud.distro.name)) template_fn = cloud.get_template_filename(template_name) if not template_fn: msg = ('No template found, not rendering %s' % ntp_client_config.get('template_name')) raise RuntimeError(msg) write_ntp_config_template(cloud.distro.name, servers=ntp_cfg.get('servers', []), pools=ntp_cfg.get('pools', []), path=ntp_client_config.get('confpath'), template_fn=template_fn, template=ntp_client_config.get('template')) install_ntp_client(cloud.distro.install_packages, packages=ntp_client_config['packages'], check_exe=ntp_client_config['check_exe']) try: reload_ntp(ntp_client_config['service_name'], systemd=cloud.distro.uses_systemd()) except util.ProcessExecutionError as e: LOG.exception("Failed to reload/start ntp service: %s", e) raise
def read_azure_ovf(contents): try: dom = minidom.parseString(contents) except Exception as e: raise BrokenAzureDataSource("invalid xml: %s" % e) results = find_child(dom.documentElement, lambda n: n.localName == "ProvisioningSection") if len(results) == 0: raise NonAzureDataSource("No ProvisioningSection") if len(results) > 1: raise BrokenAzureDataSource("found '%d' ProvisioningSection items" % len(results)) provSection = results[0] lpcs_nodes = find_child(provSection, lambda n: n.localName == "LinuxProvisioningConfigurationSet") if len(results) == 0: raise NonAzureDataSource("No LinuxProvisioningConfigurationSet") if len(results) > 1: raise BrokenAzureDataSource("found '%d' %ss" % ("LinuxProvisioningConfigurationSet", len(results))) lpcs = lpcs_nodes[0] if not lpcs.hasChildNodes(): raise BrokenAzureDataSource("no child nodes of configuration set") md_props = 'seedfrom' md = {'azure_data': {}} cfg = {} ud = "" password = None username = None for child in lpcs.childNodes: if child.nodeType == dom.TEXT_NODE or not child.localName: continue name = child.localName.lower() simple = False value = "" if (len(child.childNodes) == 1 and child.childNodes[0].nodeType == dom.TEXT_NODE): simple = True value = child.childNodes[0].wholeText attrs = dict([(k, v) for k, v in child.attributes.items()]) # we accept either UserData or CustomData. If both are present # then behavior is undefined. if name == "userdata" or name == "customdata": if attrs.get('encoding') in (None, "base64"): ud = base64.b64decode(''.join(value.split())) else: ud = value elif name == "username": username = value elif name == "userpassword": password = value elif name == "hostname": md['local-hostname'] = value elif name == "dscfg": if attrs.get('encoding') in (None, "base64"): dscfg = base64.b64decode(''.join(value.split())) else: dscfg = value cfg['datasource'] = {DS_NAME: util.load_yaml(dscfg, default={})} elif name == "ssh": cfg['_pubkeys'] = load_azure_ovf_pubkeys(child) elif name == "disablesshpasswordauthentication": cfg['ssh_pwauth'] = util.is_false(value) elif simple: if name in md_props: md[name] = value else: md['azure_data'][name] = value defuser = {} if username: defuser['name'] = username if password and DEF_PASSWD_REDACTION != password: defuser['passwd'] = encrypt_pass(password) defuser['lock_passwd'] = False if defuser: cfg['system_info'] = {'default_user': defuser} if 'ssh_pwauth' not in cfg and password: cfg['ssh_pwauth'] = True return (md, ud, cfg)
LOCAL_LOG_PATH = "/tmp/cloud_init_test_logs" ################################################################## # USER SETTINGS OVERRIDES ################################################################## # Bring in any user-file defined settings try: # pylint: disable=wildcard-import,unused-wildcard-import from tests.integration_tests.user_settings import * # noqa except ImportError: pass ################################################################## # ENVIRONMENT SETTINGS OVERRIDES ################################################################## # Any of the settings in this file can be overridden with an # environment variable of the same name prepended with CLOUD_INIT_ # E.g., CLOUD_INIT_PLATFORM # Perhaps a bit too hacky, but it works :) current_settings = [var for var in locals() if var.isupper()] for setting in current_settings: env_setting = os.getenv("CLOUD_INIT_{}".format(setting), globals()[setting]) if isinstance(env_setting, str): env_setting = env_setting.strip() if is_true(env_setting): env_setting = True elif is_false(env_setting): env_setting = False globals()[setting] = env_setting
def handle(name, cfg, cloud, log, _args): """Enable and configure ntp.""" if 'ntp' not in cfg: LOG.debug("Skipping module named %s, not present or disabled by cfg", name) return ntp_cfg = cfg['ntp'] if ntp_cfg is None: ntp_cfg = {} # Allow empty config which will install the package # TODO drop this when validate_cloudconfig_schema is strict=True if not isinstance(ntp_cfg, (dict)): raise RuntimeError( "'ntp' key existed in config, but not a dictionary type," " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg))) validate_cloudconfig_schema(cfg, schema) # Allow users to explicitly enable/disable enabled = ntp_cfg.get('enabled', True) if util.is_false(enabled): LOG.debug("Skipping module named %s, disabled by cfg", name) return # Select which client is going to be used and get the configuration ntp_client_config = select_ntp_client(ntp_cfg.get('ntp_client'), cloud.distro) # Allow user ntp config to override distro configurations ntp_client_config = util.mergemanydict( [ntp_client_config, ntp_cfg.get('config', {})], reverse=True) supplemental_schema_validation(ntp_client_config) rename_ntp_conf(confpath=ntp_client_config.get('confpath')) template_fn = None if not ntp_client_config.get('template'): template_name = (ntp_client_config.get('template_name').replace( '{distro}', cloud.distro.name)) template_fn = cloud.get_template_filename(template_name) if not template_fn: msg = ('No template found, not rendering %s' % ntp_client_config.get('template_name')) raise RuntimeError(msg) write_ntp_config_template(cloud.distro.name, servers=ntp_cfg.get('servers', []), pools=ntp_cfg.get('pools', []), path=ntp_client_config.get('confpath'), template_fn=template_fn, template=ntp_client_config.get('template')) install_ntp_client(cloud.distro.install_packages, packages=ntp_client_config['packages'], check_exe=ntp_client_config['check_exe']) try: reload_ntp(ntp_client_config['service_name'], systemd=cloud.distro.uses_systemd()) except util.ProcessExecutionError as e: LOG.exception("Failed to reload/start ntp service: %s", e) raise