def install_drivers(cfg, pkg_install_func): if not isinstance(cfg, dict): raise TypeError("'drivers' config expected dict, found '%s': %s" % (type_utils.obj_name(cfg), cfg)) cfgpath = "nvidia/license-accepted" # Call translate_bool to ensure that we treat string values like "yes" as # acceptance and _don't_ treat string values like "nah" as acceptance # because they're True-ish nv_acc = util.translate_bool(util.get_cfg_by_path(cfg, cfgpath)) if not nv_acc: LOG.debug("Not installing NVIDIA drivers. %s=%s", cfgpath, nv_acc) return if not subp.which("ubuntu-drivers"): LOG.debug("'ubuntu-drivers' command not available. " "Installing ubuntu-drivers-common") pkg_install_func(["ubuntu-drivers-common"]) driver_arg = "nvidia" version_cfg = util.get_cfg_by_path(cfg, "nvidia/version") if version_cfg: driver_arg += ":{}".format(version_cfg) LOG.debug( "Installing and activating NVIDIA drivers (%s=%s, version=%s)", cfgpath, nv_acc, version_cfg if version_cfg else "latest", ) # Register and set debconf selection linux/nvidia/latelink = true tdir = temp_utils.mkdtemp(needs_exe=True) debconf_file = os.path.join(tdir, "nvidia.template") debconf_script = os.path.join(tdir, "nvidia-debconf.sh") try: util.write_file(debconf_file, NVIDIA_DEBCONF_CONTENT) util.write_file( debconf_script, util.encode_text(NVIDIA_DRIVER_LATELINK_DEBCONF_SCRIPT), mode=0o755, ) subp.subp([debconf_script, debconf_file]) except Exception as e: util.logexc(LOG, "Failed to register NVIDIA debconf template: %s", str(e)) raise finally: if os.path.isdir(tdir): util.del_dir(tdir) try: subp.subp(["ubuntu-drivers", "install", "--gpgpu", driver_arg]) except subp.ProcessExecutionError as exc: if OLD_UBUNTU_DRIVERS_STDERR_NEEDLE in exc.stderr: LOG.warning("the available version of ubuntu-drivers is" " too old to perform requested driver installation") elif "No drivers found for installation." in exc.stdout: LOG.warning("ubuntu-drivers found no drivers for installation") raise
def match_ec2_url(uri, headers): path = uri.path.strip("/") if len(path) == 0: return (200, headers, "\n".join(EC2_VERSIONS)) path = uri.path.lstrip("/") if path in ec2_files: return (200, headers, ec2_files.get(path)) if path == "latest/meta-data/": buf = StringIO() for (k, v) in ec2_meta.items(): if isinstance(v, (list, tuple)): buf.write("%s/" % (k)) else: buf.write("%s" % (k)) buf.write("\n") return (200, headers, buf.getvalue()) if path.startswith("latest/meta-data/"): value = None pieces = path.split("/") if path.endswith("/"): pieces = pieces[2:-1] value = util.get_cfg_by_path(ec2_meta, pieces) else: pieces = pieces[2:] value = util.get_cfg_by_path(ec2_meta, pieces) if value is not None: return (200, headers, str(value)) return (404, headers, "")
def match_ec2_url(uri, headers): path = uri.path.strip("/") if len(path) == 0: return (200, headers, "\n".join(EC2_VERSIONS)) path = uri.path.lstrip("/") if path in ec2_files: return (200, headers, ec2_files.get(path)) if path == 'latest/meta-data/': buf = StringIO() for (k, v) in ec2_meta.items(): if isinstance(v, (list, tuple)): buf.write("%s/" % (k)) else: buf.write("%s" % (k)) buf.write("\n") return (200, headers, buf.getvalue()) if path.startswith('latest/meta-data/'): value = None pieces = path.split("/") if path.endswith("/"): pieces = pieces[2:-1] value = util.get_cfg_by_path(ec2_meta, pieces) else: pieces = pieces[2:] value = util.get_cfg_by_path(ec2_meta, pieces) if value is not None: return (200, headers, str(value)) return (404, headers, '')
def _extract_cfg(self, restriction): # Ensure actually read self.read_cfg() # Nobody gets the real config ocfg = copy.deepcopy(self._cfg) if restriction == "restricted": ocfg.pop("system_info", None) elif restriction == "system": ocfg = util.get_cfg_by_path(ocfg, ("system_info",), {}) elif restriction == "paths": ocfg = util.get_cfg_by_path(ocfg, ("system_info", "paths"), {}) if not isinstance(ocfg, (dict)): ocfg = {} return ocfg
def _extract_cfg(self, restriction): # Ensure actually read self.read_cfg() # Nobody gets the real config ocfg = copy.deepcopy(self._cfg) if restriction == 'restricted': ocfg.pop('system_info', None) elif restriction == 'system': ocfg = util.get_cfg_by_path(ocfg, ('system_info',), {}) elif restriction == 'paths': ocfg = util.get_cfg_by_path(ocfg, ('system_info', 'paths'), {}) if not isinstance(ocfg, (dict)): ocfg = {} return ocfg
def _extract_cfg(self, restriction): # Ensure actually read self.read_cfg() # Nobody gets the real config ocfg = copy.deepcopy(self._cfg) if restriction == 'restricted': ocfg.pop('system_info', None) elif restriction == 'system': ocfg = util.get_cfg_by_path(ocfg, ('system_info', ), {}) elif restriction == 'paths': ocfg = util.get_cfg_by_path(ocfg, ('system_info', 'paths'), {}) if not isinstance(ocfg, (dict)): ocfg = {} return ocfg
def handle(name, cfg, cloud, log, args): """Handler method activated by cloud-init.""" verbose = util.get_cfg_by_path(cfg, ('debug', 'verbose'), default=True) if args: # if args are provided (from cmdline) then explicitly set verbose out_file = args[0] verbose = True else: out_file = util.get_cfg_by_path(cfg, ('debug', 'output')) if not verbose: log.debug(("Skipping module named %s," " verbose printing disabled"), name) return # Clean out some keys that we just don't care about showing... dump_cfg = copy.deepcopy(cfg) for k in SKIP_KEYS: dump_cfg.pop(k, None) all_keys = list(dump_cfg.keys()) for k in all_keys: if k.startswith("_"): dump_cfg.pop(k, None) # Now dump it... to_print = StringIO() to_print.write(_make_header("Config")) to_print.write(_dumps(dump_cfg)) to_print.write("\n") to_print.write(_make_header("MetaData")) to_print.write(_dumps(cloud.datasource.metadata)) to_print.write("\n") to_print.write(_make_header("Misc")) to_print.write("Datasource: %s\n" % (type_utils.obj_name(cloud.datasource))) to_print.write("Distro: %s\n" % (type_utils.obj_name(cloud.distro))) to_print.write("Hostname: %s\n" % (cloud.get_hostname(True))) to_print.write("Instance ID: %s\n" % (cloud.get_instance_id())) to_print.write("Locale: %s\n" % (cloud.get_locale())) to_print.write("Launch IDX: %s\n" % (cloud.launch_index)) contents = to_print.getvalue() content_to_file = [] for line in contents.splitlines(): line = "ci-info: %s\n" % (line) content_to_file.append(line) if out_file: util.write_file(out_file, "".join(content_to_file), 0644, "w") else: util.multi_log("".join(content_to_file), console=True, stderr=False)
def handle(name, cfg, cloud, log, args): """Handler method activated by cloud-init.""" verbose = util.get_cfg_by_path(cfg, ('debug', 'verbose'), default=True) if args: # if args are provided (from cmdline) then explicitly set verbose out_file = args[0] verbose = True else: out_file = util.get_cfg_by_path(cfg, ('debug', 'output')) if not verbose: log.debug(("Skipping module named %s," " verbose printing disabled"), name) return # Clean out some keys that we just don't care about showing... dump_cfg = copy.deepcopy(cfg) for k in SKIP_KEYS: dump_cfg.pop(k, None) all_keys = list(dump_cfg) for k in all_keys: if k.startswith("_"): dump_cfg.pop(k, None) # Now dump it... to_print = StringIO() to_print.write(_make_header("Config")) to_print.write(_dumps(dump_cfg)) to_print.write("\n") to_print.write(_make_header("MetaData")) to_print.write(_dumps(cloud.datasource.metadata)) to_print.write("\n") to_print.write(_make_header("Misc")) to_print.write("Datasource: %s\n" % (type_utils.obj_name(cloud.datasource))) to_print.write("Distro: %s\n" % (type_utils.obj_name(cloud.distro))) to_print.write("Hostname: %s\n" % (cloud.get_hostname(True))) to_print.write("Instance ID: %s\n" % (cloud.get_instance_id())) to_print.write("Locale: %s\n" % (cloud.get_locale())) to_print.write("Launch IDX: %s\n" % (cloud.launch_index)) contents = to_print.getvalue() content_to_file = [] for line in contents.splitlines(): line = "ci-info: %s\n" % (line) content_to_file.append(line) if out_file: util.write_file(out_file, "".join(content_to_file), 0o644, "w") else: util.multi_log("".join(content_to_file), console=True, stderr=False)
def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.is_smartdc = None self.ds_cfg = util.mergemanydict([ self.ds_cfg, util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), BUILTIN_DS_CONFIG ]) self.metadata = {} # SDC LX-Brand Zones lack dmidecode (no /dev/mem) but # report 'BrandZ virtual linux' as the kernel version if os.uname()[3].lower() == 'brandz virtual linux': LOG.debug("Host is SmartOS, guest in Zone") self.is_smartdc = True self.smartos_type = 'lx-brand' self.cfg = {} self.seed = self.ds_cfg.get("metadata_sockfile") else: self.is_smartdc = True self.smartos_type = 'kvm' self.seed = self.ds_cfg.get("serial_device") self.cfg = BUILTIN_CLOUD_CONFIG self.seed_timeout = self.ds_cfg.get("serial_timeout") self.smartos_no_base64 = self.ds_cfg.get('no_base64_decode') self.b64_keys = self.ds_cfg.get('base64_keys') self.b64_all = self.ds_cfg.get('base64_all') self.script_base_d = os.path.join(self.paths.get_cpath("scripts"))
def __init__(self, sys_cfg, *args, **kwargs): super(DataSourceOracle, self).__init__(sys_cfg, *args, **kwargs) self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ['datasource', self.dsname], {}), BUILTIN_DS_CONFIG ])
def __init__(self, sys_cfg, distro, paths): super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths) self.ds_cfg = util.mergemanydict( [ util.get_cfg_by_path(sys_cfg, ["datasource", "Scaleway"], {}), BUILTIN_DS_CONFIG, ] ) self.metadata_address = self.ds_cfg.get("custom_metadata_url", "{base_url}/conf?format=json".format(base_url=self.ds_cfg.get("base_url", DS_BASE_URL))) self.userdata_address = self.ds_cfg.get("custom_userdata_url", "{base_url}/user_data/cloud-init".format(base_url=self.ds_cfg.get("base_url", DS_BASE_URL))) self.vendordata_address = self.ds_cfg.get("custom_vendordata_url", "{base_url}/vendor_data/cloud-init".format(base_url=self.ds_cfg.get("base_url", DS_BASE_URL))) self.headers_redact = None self.headers = None # Scaleway Baremetal product use X-Metadata-Auth-Token authToken = self.ds_cfg.get("token", None) if authToken is not None: self.headers_redact = SCW_METADATA_AUTH_TOKEN self.headers = {SCW_METADATA_AUTH_TOKEN: authToken} self.retries = int(self.ds_cfg.get("retries", DEF_MD_RETRIES)) self.timeout = int(self.ds_cfg.get("timeout", DEF_MD_TIMEOUT)) self._fallback_interface = None self._network_config = sources.UNSET
def _load_warn_cfg(cfg, name, mode=True, sleep=None): # parse cfg['warnings']['name'] returning boolean, sleep # expected value is form of: # (on|off|true|false|sleep)[,sleeptime] # boolean True == on, False == off default = (mode, sleep) if not cfg or not isinstance(cfg, dict): return default ncfg = util.get_cfg_by_path(cfg, ('warnings', name)) if ncfg is None: return default if ncfg in ("on", "true", True): return True, None if ncfg in ("off", "false", False): return False, None mode, _, csleep = ncfg.partition(",") if mode != "sleep": return default if csleep: try: sleep = int(csleep) except ValueError: return default return True, sleep
def _get_data(self): strict_mode, _sleep = read_strict_mode( util.get_cfg_by_path(self.sys_cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT), ("warn", None)) LOG.debug("strict_mode: %s, cloud_name=%s cloud_platform=%s", strict_mode, self.cloud_name, self.platform) if strict_mode == "true" and self.cloud_name == CloudNames.UNKNOWN: return False elif self.cloud_name == CloudNames.NO_EC2_METADATA: return False if self.perform_dhcp_setup: # Setup networking in init-local stage. if util.is_FreeBSD(): LOG.debug("FreeBSD doesn't support running dhclient with -sf") return False try: with EphemeralDHCPv4(self.fallback_interface): self._crawled_metadata = util.log_time( logfunc=LOG.debug, msg='Crawl of metadata service', func=self.crawl_metadata) except NoDHCPLeaseError: return False else: self._crawled_metadata = util.log_time( logfunc=LOG.debug, msg='Crawl of metadata service', func=self.crawl_metadata) if not self._crawled_metadata: return False self.metadata = self._crawled_metadata.get('meta-data', None) self.userdata_raw = self._crawled_metadata.get('user-data', None) self.identity = self._crawled_metadata.get( 'dynamic', {}).get('instance-identity', {}).get('document', {}) return True
def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.is_smartdc = None self.ds_cfg = util.mergemanydict([ self.ds_cfg, util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), BUILTIN_DS_CONFIG]) self.metadata = {} # SDC LX-Brand Zones lack dmidecode (no /dev/mem) but # report 'BrandZ virtual linux' as the kernel version if os.uname()[3].lower() == 'brandz virtual linux': LOG.debug("Host is SmartOS, guest in Zone") self.is_smartdc = True self.smartos_type = 'lx-brand' self.cfg = {} self.seed = self.ds_cfg.get("metadata_sockfile") else: self.is_smartdc = True self.smartos_type = 'kvm' self.seed = self.ds_cfg.get("serial_device") self.cfg = BUILTIN_CLOUD_CONFIG self.seed_timeout = self.ds_cfg.get("serial_timeout") self.smartos_no_base64 = self.ds_cfg.get('no_base64_decode') self.b64_keys = self.ds_cfg.get('base64_keys') self.b64_all = self.ds_cfg.get('base64_all') self.script_base_d = os.path.join(self.paths.get_cpath("scripts"))
def __init__(self, sys_cfg, distro, paths, ud_proc=None): self.sys_cfg = sys_cfg self.distro = distro self.paths = paths self.userdata = None self.metadata = {} self.userdata_raw = None self.vendordata = None self.vendordata_raw = None # find the datasource config name. # remove 'DataSource' from classname on front, and remove 'Net' on end. # Both Foo and FooNet sources expect config in cfg['sources']['Foo'] name = type_utils.obj_name(self) if name.startswith(DS_PREFIX): name = name[len(DS_PREFIX):] if name.endswith('Net'): name = name[0:-3] self.ds_cfg = util.get_cfg_by_path(self.sys_cfg, ("datasource", name), {}) if not self.ds_cfg: self.ds_cfg = {} if not ud_proc: self.ud_proc = ud.UserDataProcessor(self.paths) else: self.ud_proc = ud_proc
def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed_dir = os.path.join(paths.seed_dir, "azure") self.cfg = {} self.seed = None self.ds_cfg = util.mergemanydict([util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), BUILTIN_DS_CONFIG]) self.dhclient_lease_file = self.ds_cfg.get("dhclient_lease_file")
def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.metadata = dict() self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "GCE"], {}), BUILTIN_DS_CONFIG]) self.metadata_address = self.ds_cfg['metadata_url']
def __init__(self, sys_cfg, distro, paths, ud_proc=None): self.sys_cfg = sys_cfg self.distro = distro self.paths = paths self.userdata = None self.metadata = None self.userdata_raw = None self.vendordata = None self.vendordata_raw = None # find the datasource config name. # remove 'DataSource' from classname on front, and remove 'Net' on end. # Both Foo and FooNet sources expect config in cfg['sources']['Foo'] name = type_utils.obj_name(self) if name.startswith(DS_PREFIX): name = name[len(DS_PREFIX):] if name.endswith('Net'): name = name[0:-3] self.ds_cfg = util.get_cfg_by_path(self.sys_cfg, ("datasource", name), {}) if not ud_proc: self.ud_proc = ud.UserDataProcessor(self.paths) else: self.ud_proc = ud_proc
def _get_data(self): """Crawl and process datasource metadata caching metadata as attrs. @return: True on success, False on error, invalid or disabled datasource. """ if not self._is_platform_viable(): return False try: crawled_data = util.log_time(logfunc=LOG.debug, msg='Crawl of metadata service', func=self.crawl_metadata) except sources.InvalidMetaDataException as e: LOG.warning('Could not crawl Azure metadata: %s', e) return False if self.distro and self.distro.name == 'ubuntu': maybe_remove_ubuntu_network_config_scripts() # Process crawled data and augment with various config defaults self.cfg = util.mergemanydict( [crawled_data['cfg'], BUILTIN_CLOUD_CONFIG]) self._metadata_imds = crawled_data['metadata']['imds'] self.metadata = util.mergemanydict( [crawled_data['metadata'], DEFAULT_METADATA]) self.userdata_raw = crawled_data['userdata_raw'] user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {}) self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg]) # walinux agent writes files world readable, but expects # the directory to be protected. write_files(self.ds_cfg['data_dir'], crawled_data['files'], dirmode=0o700) return True
def get_data(self): seed_ret = {} if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")): self.userdata_raw = seed_ret['user-data'] self.metadata = seed_ret['meta-data'] LOG.debug("Using seeded ec2 data from %s", self.seed_dir) self._cloud_platform = Platforms.SEEDED return True strict_mode, _sleep = read_strict_mode( util.get_cfg_by_path(self.sys_cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT), ("warn", None)) LOG.debug("strict_mode: %s, cloud_platform=%s", strict_mode, self.cloud_platform) if strict_mode == "true" and self.cloud_platform == Platforms.UNKNOWN: return False try: if not self.wait_for_metadata_service(): return False start_time = time.time() self.userdata_raw = \ ec2.get_instance_userdata(self.api_ver, self.metadata_address) self.metadata = ec2.get_instance_metadata(self.api_ver, self.metadata_address) LOG.debug("Crawl of metadata service took %.3f seconds", time.time() - start_time) return True except Exception: util.logexc(LOG, "Failed reading from metadata address %s", self.metadata_address) return False
def _get_data(self): seed_ret = {} if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")): self.userdata_raw = seed_ret['user-data'] self.metadata = seed_ret['meta-data'] LOG.debug("Using seeded ec2 data from %s", self.seed_dir) self._cloud_platform = Platforms.SEEDED return True strict_mode, _sleep = read_strict_mode( util.get_cfg_by_path(self.sys_cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT), ("warn", None)) LOG.debug("strict_mode: %s, cloud_platform=%s", strict_mode, self.cloud_platform) if strict_mode == "true" and self.cloud_platform == Platforms.UNKNOWN: return False elif self.cloud_platform == Platforms.NO_EC2_METADATA: return False if self.perform_dhcp_setup: # Setup networking in init-local stage. if util.is_FreeBSD(): LOG.debug("FreeBSD doesn't support running dhclient with -sf") return False try: with EphemeralDHCPv4(self.fallback_interface): return util.log_time( logfunc=LOG.debug, msg='Crawl of metadata service', func=self._crawl_metadata) except NoDHCPLeaseError: return False else: return self._crawl_metadata()
def activate(self, cfg, is_new_instance): if not is_new_instance: return if self.cloud_name == CloudNames.UNKNOWN: warn_if_necessary( util.get_cfg_by_path(cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT), cfg)
def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed_dir = os.path.join(paths.seed_dir, 'azure') self.cfg = {} self.seed = None self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), BUILTIN_DS_CONFIG ])
def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed_dir = os.path.join(paths.seed_dir, 'azure') self.cfg = {} self.seed = None self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), BUILTIN_DS_CONFIG]) self.cloud_data_dir = paths.get_cpath('data')
def __init__(self, sys_cfg, *args, **kwargs): super(DataSourceOracle, self).__init__(sys_cfg, *args, **kwargs) self._vnics_data = None self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", self.dsname], {}), BUILTIN_DS_CONFIG, ]) self._network_config_source = KlibcOracleNetworkConfigSource()
def _write_network_state(self, network_state): priority = util.get_cfg_by_path( self._cfg, ('network', 'renderers'), None) name, render_cls = renderers.select(priority=priority) LOG.debug("Selected renderer '%s' from priority list: %s", name, priority) renderer = render_cls(config=self.renderer_configs.get(name)) renderer.render_network_state(network_state)
def check_instance_id(self, sys_cfg): """locally check if the current system is the same instance. MAAS doesn't provide a real instance-id, and if it did, it is still only available over the network. We need to check based only on local resources. So compute a hash based on Oauth tokens.""" if self.id_hash is None: return False ncfg = util.get_cfg_by_path(sys_cfg, ("datasource", self.dsname), {}) return self.id_hash == get_id_from_ds_cfg(ncfg)
def check_instance_id(self, sys_cfg): """locally check if the current system is the same instance. MAAS doesn't provide a real instance-id, and if it did, it is still only available over the network. We need to check based only on local resources. So compute a hash based on Oauth tokens.""" if self.id_hash is None: return False ncfg = util.get_cfg_by_path(sys_cfg, ("datasource", self.dsname), {}) return (self.id_hash == get_id_from_ds_cfg(ncfg))
def _supported_write_network_config(self, network_config): priority = util.get_cfg_by_path( self._cfg, ('network', 'renderers'), None) name, render_cls = renderers.select(priority=priority) LOG.debug("Selected renderer '%s' from priority list: %s", name, priority) renderer = render_cls(config=self.renderer_configs.get(name)) renderer.render_network_config(network_config) return []
def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed_dir = os.path.join(paths.seed_dir, 'azure') self.cfg = {} self.seed = None self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), BUILTIN_DS_CONFIG ]) self.dhclient_lease_file = self.ds_cfg.get('dhclient_lease_file') self._network_config = None
def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.metadata = dict() self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "DigitalOcean"], {}), BUILTIN_DS_CONFIG ]) self.metadata_address = self.ds_cfg['metadata_url'] self.retries = self.ds_cfg.get('retries', MD_RETRIES) self.timeout = self.ds_cfg.get('timeout', MD_TIMEOUT) self.wait_retry = self.ds_cfg.get('wait_retry', MD_WAIT_RETRY)
def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.default_user = None if distro: (users, _groups) = ug_util.normalize_users_groups(sys_cfg, distro) (self.default_user, _user_config) = ug_util.extract_default(users) self.metadata = dict() self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "GCE"], {}), BUILTIN_DS_CONFIG]) self.metadata_address = self.ds_cfg['metadata_url']
def __init__(self, sys_cfg=None): if not self.cfgname: name = str(self.__class__).split(".")[-1] if name.startswith("DataSource"): name = name[len("DataSource"):] self.cfgname = name if sys_cfg: self.sys_cfg = sys_cfg self.ds_cfg = util.get_cfg_by_path(self.sys_cfg, ("datasource", self.cfgname), self.ds_cfg)
def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed_dir = os.path.join(paths.seed_dir, 'azure') self.cfg = {} self.seed = None self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), BUILTIN_DS_CONFIG ]) self.dhclient_lease_file = self.ds_cfg.get('dhclient_lease_file') self._network_config = None # Regenerate network config new_instance boot and every boot self.update_events['network'].add(EventType.BOOT)
def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.distro = distro self.metadata = dict() self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "UpCloud"], {}), BUILTIN_DS_CONFIG, ]) self.metadata_address = self.ds_cfg["metadata_url"] self.retries = self.ds_cfg.get("retries", MD_RETRIES) self.timeout = self.ds_cfg.get("timeout", MD_TIMEOUT) self.wait_retry = self.ds_cfg.get("wait_retry", MD_WAIT_RETRY) self._network_config = None
def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.distro = distro self.metadata = dict() self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "DigitalOcean"], {}), BUILTIN_DS_CONFIG]) self.metadata_address = self.ds_cfg['metadata_url'] self.retries = self.ds_cfg.get('retries', MD_RETRIES) self.timeout = self.ds_cfg.get('timeout', MD_TIMEOUT) self.use_ip4LL = self.ds_cfg.get('use_ip4LL', MD_USE_IPV4LL) self.wait_retry = self.ds_cfg.get('wait_retry', MD_WAIT_RETRY) self._network_config = None
def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.distro = distro self.metadata = dict() self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "Vultr"], {}), BUILTIN_DS_CONFIG]) self.metadata_address = self.ds_cfg['metadata_url'] self.retries = self.ds_cfg['retries'] self.timeout = self.ds_cfg['timeout'] self.wait_retry = self.ds_cfg['wait_retry'] self.dns_servers = self.ds_cfg['dns_servers'] self._network_config = None
def handle(name, cfg, cloud, log, _args): # This is written to by the vendor data handlers # any vendor data shell scripts get placed in runparts_path runparts_path = os.path.join(cloud.get_ipath_cur(), 'scripts', SCRIPT_SUBDIR) prefix = util.get_cfg_by_path(cfg, ('vendor_data', 'prefix'), []) try: util.runparts(runparts_path, exe_prefix=prefix) except: log.warn("Failed to run module %s (%s in %s)", name, SCRIPT_SUBDIR, runparts_path) raise
def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.ds_cfg = util.mergemanydict([ self.ds_cfg, util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), BUILTIN_DS_CONFIG]) self.metadata = {} self.network_data = None self._network_config = None self.script_base_d = os.path.join(self.paths.get_cpath("scripts")) self._init()
def __init__(self, sys_cfg, distro, paths): super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths) self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "Scaleway"], {}), BUILTIN_DS_CONFIG ]) self.metadata_address = self.ds_cfg['metadata_url'] self.userdata_address = self.ds_cfg['userdata_url'] self.vendordata_address = self.ds_cfg['vendordata_url'] self.retries = int(self.ds_cfg.get('retries', DEF_MD_RETRIES)) self.timeout = int(self.ds_cfg.get('timeout', DEF_MD_TIMEOUT))
def handle(name, cfg, cloud, log, _args): # This is written to by the vendor data handlers # any vendor data shell scripts get placed in runparts_path runparts_path = os.path.join(cloud.get_ipath_cur(), 'scripts', SCRIPT_SUBDIR) prefix = util.get_cfg_by_path(cfg, ('vendor_data', 'prefix'), []) try: subp.runparts(runparts_path, exe_prefix=prefix) except Exception: log.warning("Failed to run module %s (%s in %s)", name, SCRIPT_SUBDIR, runparts_path) raise
def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.distro = distro self.metadata = dict() self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "Hetzner"], {}), BUILTIN_DS_CONFIG]) self.metadata_address = self.ds_cfg['metadata_url'] self.userdata_address = self.ds_cfg['userdata_url'] self.retries = self.ds_cfg.get('retries', MD_RETRIES) self.timeout = self.ds_cfg.get('timeout', MD_TIMEOUT) self.wait_retry = self.ds_cfg.get('wait_retry', MD_WAIT_RETRY) self._network_config = None self.dsmode = sources.DSMODE_NETWORK
def __init__(self, sys_cfg, distro, paths): LOG.debug('Init scw') sources.DataSource.__init__(self, sys_cfg, distro, paths) self.metadata = {} self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "Scaleway"], {}), BUILTIN_DS_CONFIG ]) self.metadata_address = self.ds_cfg['metadata_url'] self.userdata_address = self.ds_cfg['userdata_url'] self.retries = self.ds_cfg.get('retries', DEF_MD_RETRIES) self.timeout = self.ds_cfg.get('timeout', DEF_MD_TIMEOUT)
def __init__(self, sys_cfg, distro, paths, ud_proc=None): self.sys_cfg = sys_cfg self.distro = distro self.paths = paths self.userdata = None self.metadata = None self.userdata_raw = None name = type_utils.obj_name(self) if name.startswith(DS_PREFIX): name = name[len(DS_PREFIX):] self.ds_cfg = util.get_cfg_by_path(self.sys_cfg, ("datasource", name), {}) if not ud_proc: self.ud_proc = ud.UserDataProcessor(self.paths) else: self.ud_proc = ud_proc
def __init__(self, sys_cfg, distro, paths): super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths) self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "Scaleway"], {}), BUILTIN_DS_CONFIG ]) self.metadata_address = self.ds_cfg['metadata_url'] self.userdata_address = self.ds_cfg['userdata_url'] self.vendordata_address = self.ds_cfg['vendordata_url'] self.retries = int(self.ds_cfg.get('retries', DEF_MD_RETRIES)) self.timeout = int(self.ds_cfg.get('timeout', DEF_MD_TIMEOUT)) self._fallback_interface = None self._network_config = None
def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.metadata = dict() self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "DigitalOcean"], {}), BUILTIN_DS_CONFIG]) self.metadata_address = self.ds_cfg['metadata_url'] if self.ds_cfg.get('retries'): self.retries = self.ds_cfg['retries'] else: self.retries = MD_RETRIES if self.ds_cfg.get('timeout'): self.timeout = self.ds_cfg['timeout'] else: self.timeout = MD_TIMEOUT
def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.is_smartdc = None self.ds_cfg = util.mergemanydict([ self.ds_cfg, util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), BUILTIN_DS_CONFIG]) self.metadata = {} self.cfg = BUILTIN_CLOUD_CONFIG self.seed = self.ds_cfg.get("serial_device") self.seed_timeout = self.ds_cfg.get("serial_timeout") self.smartos_no_base64 = self.ds_cfg.get('no_base64_decode') self.b64_keys = self.ds_cfg.get('base64_keys') self.b64_all = self.ds_cfg.get('base64_all')
def get_data(self): # azure removes/ejects the cdrom containing the ovf-env.xml # file on reboot. So, in order to successfully reboot we # need to look in the datadir and consider that valid ddir = self.ds_cfg['data_dir'] candidates = [self.seed_dir] candidates.extend(list_possible_azure_ds_devs()) if ddir: candidates.append(ddir) found = None for cdev in candidates: try: if cdev.startswith("/dev/"): ret = util.mount_cb(cdev, load_azure_ds_dir) else: ret = load_azure_ds_dir(cdev) except NonAzureDataSource: continue except BrokenAzureDataSource as exc: raise exc except util.MountFailedError: LOG.warn("%s was not mountable", cdev) continue (md, self.userdata_raw, cfg, files) = ret self.seed = cdev self.metadata = util.mergemanydict([md, DEFAULT_METADATA]) self.cfg = util.mergemanydict([cfg, BUILTIN_CLOUD_CONFIG]) found = cdev LOG.debug("found datasource in %s", cdev) break if not found: return False if found == ddir: LOG.debug("using files cached in %s", ddir) # azure / hyper-v provides random data here seed = util.load_file("/sys/firmware/acpi/tables/OEM0", quiet=True, decode=False) if seed: self.metadata['random_seed'] = seed # now update ds_cfg to reflect contents pass in config user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {}) self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg]) # walinux agent writes files world readable, but expects # the directory to be protected. write_files(ddir, files, dirmode=0o700) if self.ds_cfg['agent_command'] == '__builtin__': metadata_func = get_metadata_from_fabric else: metadata_func = self.get_metadata_from_agent try: fabric_data = metadata_func() except Exception as exc: LOG.info("Error communicating with Azure fabric; assume we aren't" " on Azure.", exc_info=True) return False self.metadata['instance-id'] = util.read_dmi_data('system-uuid') self.metadata.update(fabric_data) found_ephemeral = find_fabric_formatted_ephemeral_disk() if found_ephemeral: self.ds_cfg['disk_aliases']['ephemeral0'] = found_ephemeral LOG.debug("using detected ephemeral0 of %s", found_ephemeral) cc_modules_override = support_new_ephemeral(self.sys_cfg) if cc_modules_override: self.cfg['cloud_config_modules'] = cc_modules_override return True
def get_data(self): # azure removes/ejects the cdrom containing the ovf-env.xml # file on reboot. So, in order to successfully reboot we # need to look in the datadir and consider that valid ddir = self.ds_cfg["data_dir"] candidates = [self.seed_dir] candidates.extend(list_possible_azure_ds_devs()) if ddir: candidates.append(ddir) found = None for cdev in candidates: try: if cdev.startswith("/dev/"): ret = util.mount_cb(cdev, load_azure_ds_dir) else: ret = load_azure_ds_dir(cdev) except NonAzureDataSource: continue except BrokenAzureDataSource as exc: raise exc except util.MountFailedError: LOG.warn("%s was not mountable", cdev) continue (md, self.userdata_raw, cfg, files) = ret self.seed = cdev self.metadata = util.mergemanydict([md, DEFAULT_METADATA]) self.cfg = util.mergemanydict([cfg, BUILTIN_CLOUD_CONFIG]) found = cdev LOG.debug("found datasource in %s", cdev) break if not found: return False if found == ddir: LOG.debug("using files cached in %s", ddir) # azure / hyper-v provides random data here seed = util.load_file("/sys/firmware/acpi/tables/OEM0", quiet=True, decode=False) if seed: self.metadata["random_seed"] = seed # now update ds_cfg to reflect contents pass in config user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {}) self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg]) if found != ddir: cached_ovfenv = util.load_file(os.path.join(ddir, "ovf-env.xml"), quiet=True, decode=False) if cached_ovfenv != files["ovf-env.xml"]: # source was not walinux-agent's datadir, so we have to clean # up so 'wait_for_files' doesn't return early due to stale data cleaned = [] for f in [os.path.join(ddir, f) for f in DATA_DIR_CLEAN_LIST]: if os.path.exists(f): util.del_file(f) cleaned.append(f) if cleaned: LOG.info("removed stale file(s) in '%s': %s", ddir, str(cleaned)) # walinux agent writes files world readable, but expects # the directory to be protected. write_files(ddir, files, dirmode=0o700) if self.ds_cfg["agent_command"] == "__builtin__": metadata_func = get_metadata_from_fabric else: metadata_func = self.get_metadata_from_agent try: fabric_data = metadata_func() except Exception as exc: LOG.info("Error communicating with Azure fabric; assume we aren't" " on Azure.", exc_info=True) return False self.metadata.update(fabric_data) found_ephemeral = find_fabric_formatted_ephemeral_disk() if found_ephemeral: self.ds_cfg["disk_aliases"]["ephemeral0"] = found_ephemeral LOG.debug("using detected ephemeral0 of %s", found_ephemeral) cc_modules_override = support_new_ephemeral(self.sys_cfg) if cc_modules_override: self.cfg["cloud_config_modules"] = cc_modules_override return True
def get_data(self): # azure removes/ejects the cdrom containing the ovf-env.xml # file on reboot. So, in order to successfully reboot we # need to look in the datadir and consider that valid ddir = self.ds_cfg['data_dir'] candidates = [self.seed_dir] candidates.extend(list_possible_azure_ds_devs()) if ddir: candidates.append(ddir) found = None for cdev in candidates: try: if cdev.startswith("/dev/"): ret = util.mount_cb(cdev, load_azure_ds_dir) else: ret = load_azure_ds_dir(cdev) except NonAzureDataSource: continue except BrokenAzureDataSource as exc: raise exc except util.MountFailedError: LOG.warn("%s was not mountable" % cdev) continue (md, self.userdata_raw, cfg, files) = ret self.seed = cdev self.metadata = util.mergemanydict([md, DEFAULT_METADATA]) self.cfg = util.mergemanydict([cfg, BUILTIN_CLOUD_CONFIG]) found = cdev LOG.debug("found datasource in %s", cdev) break if not found: return False if found == ddir: LOG.debug("using files cached in %s", ddir) # azure / hyper-v provides random data here seed = util.load_file("/sys/firmware/acpi/tables/OEM0", quiet=True) if seed: self.metadata['random_seed'] = seed # now update ds_cfg to reflect contents pass in config user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {}) self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg]) mycfg = self.ds_cfg # walinux agent writes files world readable, but expects # the directory to be protected. write_files(mycfg['data_dir'], files, dirmode=0700) # handle the hostname 'publishing' try: handle_set_hostname(mycfg.get('set_hostname'), self.metadata.get('local-hostname'), mycfg['hostname_bounce']) except Exception as e: LOG.warn("Failed publishing hostname: %s" % e) util.logexc(LOG, "handling set_hostname failed") try: invoke_agent(mycfg['agent_command']) except util.ProcessExecutionError: # claim the datasource even if the command failed util.logexc(LOG, "agent command '%s' failed.", mycfg['agent_command']) shcfgxml = os.path.join(mycfg['data_dir'], "SharedConfig.xml") wait_for = [shcfgxml] fp_files = [] for pk in self.cfg.get('_pubkeys', []): bname = str(pk['fingerprint'] + ".crt") fp_files += [os.path.join(mycfg['data_dir'], bname)] missing = util.log_time(logfunc=LOG.debug, msg="waiting for files", func=wait_for_files, args=(wait_for + fp_files,)) if len(missing): LOG.warn("Did not find files, but going on: %s", missing) if shcfgxml in missing: LOG.warn("SharedConfig.xml missing, using static instance-id") else: try: self.metadata['instance-id'] = iid_from_shared_config(shcfgxml) except ValueError as e: LOG.warn("failed to get instance id in %s: %s" % (shcfgxml, e)) pubkeys = pubkeys_from_crt_files(fp_files) self.metadata['public-keys'] = pubkeys return True