def get_data(self): found = [] md = {} ud = "" defaults = { "instance-id": "iid-dsovf", } (seedfile, contents) = get_ovf_env(self.paths.seed_dir) if seedfile: # Found a seed dir seed = os.path.join(self.paths.seed_dir, seedfile) (md, ud, cfg) = read_ovf_environment(contents) self.environment = contents found.append(seed) else: np = {'iso': transport_iso9660, 'vmware-guestd': transport_vmware_guestd, } name = None for (name, transfunc) in np.iteritems(): (contents, _dev, _fname) = transfunc() if contents: break if contents: (md, ud, cfg) = read_ovf_environment(contents) self.environment = contents found.append(name) # There was no OVF transports found if len(found) == 0: return False if 'seedfrom' in md and md['seedfrom']: seedfrom = md['seedfrom'] seedfound = False for proto in self.supported_seed_starts: if seedfrom.startswith(proto): seedfound = proto break if not seedfound: LOG.debug("Seed from %s not supported by %s", seedfrom, self) return False (md_seed, ud) = util.read_seeded(seedfrom, timeout=None) LOG.debug("Using seeded cache data from %s", seedfrom) md = util.mergemanydict([md, md_seed]) found.append(seedfrom) # Now that we have exhausted any other places merge in the defaults md = util.mergemanydict([md, defaults]) self.seed = ",".join(found) self.metadata = md self.userdata_raw = ud self.cfg = cfg return True
def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.metadata = dict() self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "GCE"], {}), BUILTIN_DS_CONFIG]) self.metadata_address = self.ds_cfg['metadata_url']
def test_compat_merges_str(self): a = {'b': "hi"} b = {'b': "howdy"} c = {'b': "hallo"} e = _old_mergemanydict(a, b, c) f = util.mergemanydict([a, b, c]) self.assertEquals(e, f)
def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.is_smartdc = None self.ds_cfg = util.mergemanydict([ self.ds_cfg, util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), BUILTIN_DS_CONFIG]) self.metadata = {} # SDC LX-Brand Zones lack dmidecode (no /dev/mem) but # report 'BrandZ virtual linux' as the kernel version if os.uname()[3].lower() == 'brandz virtual linux': LOG.debug("Host is SmartOS, guest in Zone") self.is_smartdc = True self.smartos_type = 'lx-brand' self.cfg = {} self.seed = self.ds_cfg.get("metadata_sockfile") else: self.is_smartdc = True self.smartos_type = 'kvm' self.seed = self.ds_cfg.get("serial_device") self.cfg = BUILTIN_CLOUD_CONFIG self.seed_timeout = self.ds_cfg.get("serial_timeout") self.smartos_no_base64 = self.ds_cfg.get('no_base64_decode') self.b64_keys = self.ds_cfg.get('base64_keys') self.b64_all = self.ds_cfg.get('base64_all') self.script_base_d = os.path.join(self.paths.get_cpath("scripts"))
def handle(name, cfg, cloud, log, args): cfgin = cfg.get('snappy') if not cfgin: cfgin = {} mycfg = util.mergemanydict([cfgin, BUILTIN_CFG]) sys_snappy = str(mycfg.get("system_snappy", "auto")) if util.is_false(sys_snappy): LOG.debug("%s: System is not snappy. disabling", name) return if sys_snappy.lower() == "auto" and not(system_is_snappy()): LOG.debug("%s: 'auto' mode, and system not snappy", name) return set_snappy_command() pkg_ops = get_package_ops(packages=mycfg['packages'], configs=mycfg['config'], fspath=mycfg['packages_dir']) fails = [] for pkg_op in pkg_ops: try: render_snap_op(**pkg_op) except Exception as e: fails.append((pkg_op, e,)) LOG.warn("'%s' failed for '%s': %s", pkg_op['op'], pkg_op['name'], e) disable_enable_ssh(mycfg.get('ssh_enabled', False)) if fails: raise Exception("failed to install/configure snaps")
def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed_dir = os.path.join(paths.seed_dir, "azure") self.cfg = {} self.seed = None self.ds_cfg = util.mergemanydict([util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), BUILTIN_DS_CONFIG]) self.dhclient_lease_file = self.ds_cfg.get("dhclient_lease_file")
def test_compat_merges_list(self): a = {'b': [1, 2, 3]} b = {'b': [4, 5]} c = {'b': [6, 7]} e = _old_mergemanydict(a, b, c) f = util.mergemanydict([a, b, c]) self.assertEquals(e, f)
def _merge_new_seed(cur, seeded): ret = cur.copy() ret['meta-data'] = util.mergemanydict([cur['meta-data'], util.load_yaml(seeded['meta-data'])]) ret['user-data'] = seeded['user-data'] if 'vendor-data' in seeded: ret['vendor-data'] = seeded['vendor-data'] return ret
def handle(name, cfg, cloud, log, args): """Handler method activated by cloud-init.""" if not isinstance(cloud.distro, ubuntu.Distro): log.debug("%s: distro is '%s', not ubuntu. returning", name, cloud.distro.__class__) return cfg = util.mergemanydict([cfg, DEFAULT_CONFIG]) target = cfg['init_switch']['target'] reboot = cfg['init_switch']['reboot'] if len(args) != 0: target = args[0] if len(args) > 1: reboot = util.is_true(args[1]) if not target: log.debug("%s: target=%s. nothing to do", name, target) return if not util.which('dpkg'): log.warn("%s: 'dpkg' not available. Assuming not ubuntu", name) return supported = ('upstart', 'systemd') if target not in supported: log.warn("%s: target set to %s, expected one of: %s", name, target, str(supported)) if os.path.exists("/run/systemd/system"): current = "systemd" else: current = "upstart" if current == target: log.debug("%s: current = target = %s. nothing to do", name, target) return try: util.subp(['sh', '-s', target], data=SWITCH_INIT) except util.ProcessExecutionError as e: log.warn("%s: Failed to switch to init '%s'. %s", name, target, e) return if util.is_false(reboot): log.info("%s: switched '%s' to '%s'. reboot=false, not rebooting.", name, current, target) return try: log.warn("%s: switched '%s' to '%s'. rebooting.", name, current, target) logging.flushLoggers(log) _fire_reboot(log, wait_attempts=4, initial_sleep=4) except Exception as e: util.logexc(log, "Requested reboot did not happen!") raise
def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed_dir = os.path.join(paths.seed_dir, 'azure') self.cfg = {} self.seed = None self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), BUILTIN_DS_CONFIG]) self.dhclient_lease_file = self.ds_cfg.get('dhclient_lease_file')
def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed_dir = os.path.join(paths.seed_dir, 'azure') self.cfg = {} self.seed = None self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), BUILTIN_DS_CONFIG]) self.cloud_data_dir = paths.get_cpath('data')
def _merge_new_seed(cur, seeded): ret = cur.copy() ret['meta-data'] = util.mergemanydict( [cur['meta-data'], util.load_yaml(seeded['meta-data'])]) ret['user-data'] = seeded['user-data'] if 'vendor-data' in seeded: ret['vendor-data'] = seeded['vendor-data'] return ret
def __init__(self, sys_cfg, *args, **kwargs): super(DataSourceOracle, self).__init__(sys_cfg, *args, **kwargs) self._vnics_data = None self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", self.dsname], {}), BUILTIN_DS_CONFIG, ]) self._network_config_source = KlibcOracleNetworkConfigSource()
def _get_data(self): """Crawl metadata, parse and persist that data for this instance. @return: True when metadata discovered indicates OpenStack datasource. False when unable to contact metadata service or when metadata format is invalid or disabled. """ oracle_considered = 'Oracle' in self.sys_cfg.get('datasource_list') if not detect_openstack(accept_oracle=not oracle_considered): return False if self.perform_dhcp_setup: # Setup networking in init-local stage. try: with EphemeralDHCPv4(self.fallback_interface): results = util.log_time(logfunc=LOG.debug, msg='Crawl of metadata service', func=self._crawl_metadata) except (NoDHCPLeaseError, sources.InvalidMetaDataException) as e: util.logexc(LOG, str(e)) return False else: try: results = self._crawl_metadata() except sources.InvalidMetaDataException as e: util.logexc(LOG, str(e)) return False self.dsmode = self._determine_dsmode([results.get('dsmode')]) if self.dsmode == sources.DSMODE_DISABLED: return False md = results.get('metadata', {}) md = util.mergemanydict([md, DEFAULT_METADATA]) self.metadata = md self.ec2_metadata = results.get('ec2-metadata') self.network_json = results.get('networkdata') self.userdata_raw = results.get('userdata') self.version = results['version'] self.files.update(results.get('files', {})) vd = results.get('vendordata') self.vendordata_pure = vd try: self.vendordata_raw = sources.convert_vendordata(vd) except ValueError as e: LOG.warning("Invalid content in vendor-data: %s", e) self.vendordata_raw = None vd2 = results.get('vendordata2') self.vendordata2_pure = vd2 try: self.vendordata2_raw = sources.convert_vendordata(vd2) except ValueError as e: LOG.warning("Invalid content in vendor-data2: %s", e) self.vendordata2_raw = None return True
def normalize_users_groups(cfg, distro): if not cfg: cfg = {} # Handle the previous style of doing this where the first user # overrides the concept of the default user if provided in the user: XYZ # format. old_user = {} if 'user' in cfg and cfg['user']: old_user = cfg['user'] # Translate it into a format that will be more useful going forward if isinstance(old_user, str): old_user = {'name': old_user} elif not isinstance(old_user, dict): LOG.warning(("Format for 'user' key must be a string or dictionary" " and not %s"), type_utils.obj_name(old_user)) old_user = {} # If no old user format, then assume the distro provides what the 'default' # user maps to, but notice that if this is provided, we won't automatically # inject a 'default' user into the users list, while if an old user format # is provided we will. distro_user_config = {} try: distro_user_config = distro.get_default_user() except NotImplementedError: LOG.warning(('Distro has not implemented default user access. No ' 'distribution provided default user will be normalized.')) # Merge the old user (which may just be an empty dict when not present) # with the distro provided default user configuration so that the old user # style picks up all the distribution specific attributes (if any) default_user_config = util.mergemanydict([old_user, distro_user_config]) base_users = cfg.get('users', []) if not isinstance(base_users, (list, dict, str)): LOG.warning(("Format for 'users' key must be a comma separated string" " or a dictionary or a list but found %s"), type_utils.obj_name(base_users)) base_users = [] if old_user: # When 'user:'******'name': 'default'}) elif isinstance(base_users, dict): base_users['default'] = dict(base_users).get('default', True) elif isinstance(base_users, str): base_users += ',default' groups = {} if 'groups' in cfg: groups = _normalize_groups(cfg['groups']) users = _normalize_users(base_users, default_user_config) return (users, groups)
def handle(name, cfg, cloud, log, args): cfgin = cfg.get('snappy') if not cfgin: cfgin = {} mycfg = util.mergemanydict([cfgin, BUILTIN_CFG]) sys_snappy = str(mycfg.get("system_snappy", "auto")) if util.is_false(sys_snappy): LOG.debug("%s: System is not snappy. disabling", name) return if sys_snappy.lower() == "auto" and not (system_is_snappy()): LOG.debug("%s: 'auto' mode, and system not snappy", name) return set_snappy_command() pkg_ops = get_package_ops(packages=mycfg['packages'], configs=mycfg['config'], fspath=mycfg['packages_dir']) fails = [] for pkg_op in pkg_ops: try: render_snap_op(**pkg_op) except Exception as e: fails.append(( pkg_op, e, )) LOG.warn("'%s' failed for '%s': %s", pkg_op['op'], pkg_op['name'], e) # Default to disabling SSH ssh_enabled = mycfg.get('ssh_enabled', "auto") # If the user has not explicitly enabled or disabled SSH, then enable it # when password SSH authentication is requested or there are SSH keys if ssh_enabled == "auto": user_ssh_keys = cloud.get_public_ssh_keys() or None password_auth_enabled = cfg.get('ssh_pwauth', False) if user_ssh_keys: LOG.debug("Enabling SSH, ssh keys found in datasource") ssh_enabled = True elif cfg.get('ssh_authorized_keys'): LOG.debug("Enabling SSH, ssh keys found in config") elif password_auth_enabled: LOG.debug("Enabling SSH, password authentication requested") ssh_enabled = True elif ssh_enabled not in (True, False): LOG.warn("Unknown value '%s' in ssh_enabled", ssh_enabled) disable_enable_ssh(ssh_enabled) if fails: raise Exception("failed to install/configure snaps")
def fetch_base_config(): return util.mergemanydict( [ # builtin config util.get_builtin_cfg(), # Anything in your conf.d or 'default' cloud.cfg location. util.read_conf_with_confd(CLOUD_CONFIG), # Kernel/cmdline parameters override system config util.read_conf_from_cmdline(), ], reverse=True)
def merge_dicts(a, b): merge_strategy = os.getenv(_MERGE_STRATEGY_ENV_VAR) if merge_strategy == _MERGE_STRATEGY_DEEPMERGE: try: LOG.info('merging dictionaries with deepmerge strategy') return merge_dicts_with_deep_merge(a, b) except Exception as err: LOG.error("deep merge failed: %s" % err) LOG.info('merging dictionaries with stdlib strategy') return util.mergemanydict([a, b])
def test_compat_merges_dict(self): a = { '1': '2', 'b': 'c', } b = { 'b': 'e', } c = _old_mergedict(a, b) d = util.mergemanydict([a, b]) self.assertEquals(c, d)
def test_compat_merges_dict(self): a = { "1": "2", "b": "c", } b = { "b": "e", } c = _old_mergedict(a, b) d = util.mergemanydict([a, b]) self.assertEqual(c, d)
def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.metadata = dict() self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "DigitalOcean"], {}), BUILTIN_DS_CONFIG ]) self.metadata_address = self.ds_cfg['metadata_url'] self.retries = self.ds_cfg.get('retries', MD_RETRIES) self.timeout = self.ds_cfg.get('timeout', MD_TIMEOUT) self.wait_retry = self.ds_cfg.get('wait_retry', MD_WAIT_RETRY)
def test_seed_runs(self): test_dicts = [] for i in range(1, 10): base_dicts = [] for j in range(1, 10): base_dicts.append(make_dict(5, i * j)) test_dicts.append(base_dicts) for test in test_dicts: c = _old_mergemanydict(*test) d = util.mergemanydict(test) self.assertEqual(c, d)
def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.default_user = None if distro: (users, _groups) = ug_util.normalize_users_groups(sys_cfg, distro) (self.default_user, _user_config) = ug_util.extract_default(users) self.metadata = dict() self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "GCE"], {}), BUILTIN_DS_CONFIG]) self.metadata_address = self.ds_cfg['metadata_url']
def test_seed_runs(self): test_dicts = [] for i in range(1, 50): base_dicts = [] for j in range(1, 50): base_dicts.append(make_dict(5, i * j)) test_dicts.append(base_dicts) for test in test_dicts: c = _old_mergemanydict(*test) d = util.mergemanydict(test) self.assertEquals(c, d)
def test_compat_merges_dict(self): a = { '1': '2', 'b': 'c', } b = { 'b': 'e', } c = _old_mergedict(a, b) d = util.mergemanydict([a, b]) self.assertEqual(c, d)
def distro_ntp_client_configs(distro): """Construct a distro-specific ntp client config dictionary by merging distro specific changes into base config. @param distro: String providing the distro class name. @returns: Dict of distro configurations for ntp clients. """ dcfg = DISTRO_CLIENT_CONFIG cfg = copy.copy(NTP_CLIENT_CONFIG) if distro in dcfg: cfg = util.mergemanydict([cfg, dcfg[distro]], reverse=True) return cfg
def fetch_base_config(): return util.mergemanydict( [ # builtin config util.get_builtin_cfg(), # Anything in your conf.d or 'default' cloud.cfg location. util.read_conf_with_confd(CLOUD_CONFIG), # runtime config read_runtime_config(), # Kernel/cmdline parameters override system config util.read_conf_from_cmdline(), ], reverse=True)
def handle(name, cfg, cloud, log, args): cfgin = cfg.get('snappy') if not cfgin: cfgin = {} mycfg = util.mergemanydict([cfgin, BUILTIN_CFG]) sys_snappy = str(mycfg.get("system_snappy", "auto")) if util.is_false(sys_snappy): LOG.debug("%s: System is not snappy. disabling", name) return if sys_snappy.lower() == "auto" and not(system_is_snappy()): LOG.debug("%s: 'auto' mode, and system not snappy", name) return set_snappy_command() pkg_ops = get_package_ops(packages=mycfg['packages'], configs=mycfg['config'], fspath=mycfg['packages_dir']) fails = [] for pkg_op in pkg_ops: try: render_snap_op(**pkg_op) except Exception as e: fails.append((pkg_op, e,)) LOG.warn("'%s' failed for '%s': %s", pkg_op['op'], pkg_op['name'], e) # Default to disabling SSH ssh_enabled = mycfg.get('ssh_enabled', "auto") # If the user has not explicitly enabled or disabled SSH, then enable it # when password SSH authentication is requested or there are SSH keys if ssh_enabled == "auto": user_ssh_keys = cloud.get_public_ssh_keys() or None password_auth_enabled = cfg.get('ssh_pwauth', False) if user_ssh_keys: LOG.debug("Enabling SSH, ssh keys found in datasource") ssh_enabled = True elif cfg.get('ssh_authorized_keys'): LOG.debug("Enabling SSH, ssh keys found in config") elif password_auth_enabled: LOG.debug("Enabling SSH, password authentication requested") ssh_enabled = True elif ssh_enabled not in (True, False): LOG.warn("Unknown value '%s' in ssh_enabled", ssh_enabled) disable_enable_ssh(ssh_enabled) if fails: raise Exception("failed to install/configure snaps")
def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed_dir = os.path.join(paths.seed_dir, 'azure') self.cfg = {} self.seed = None self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), BUILTIN_DS_CONFIG ]) self.dhclient_lease_file = self.ds_cfg.get('dhclient_lease_file') self._network_config = None # Regenerate network config new_instance boot and every boot self.update_events['network'].add(EventType.BOOT)
def update_event_enabled( datasource: sources.DataSource, cfg: dict, event_source_type: EventType, scope: EventScope = None, ) -> bool: """Determine if a particular EventType is enabled. For the `event_source_type` passed in, check whether this EventType is enabled in the `updates` section of the userdata. If `updates` is not enabled in userdata, check if defined as one of the `default_events` on the datasource. `scope` may be used to narrow the check to a particular `EventScope`. Note that on first boot, userdata may NOT be available yet. In this case, we only have the data source's `default_update_events`, so an event that should be enabled in userdata may be denied. """ default_events = ( datasource.default_update_events ) # type: Dict[EventScope, Set[EventType]] user_events = userdata_to_events( cfg.get("updates", {}) ) # type: Dict[EventScope, Set[EventType]] # A value in the first will override a value in the second allowed = util.mergemanydict( [ copy.deepcopy(user_events), copy.deepcopy(default_events), ] ) LOG.debug("Allowed events: %s", allowed) if not scope: scopes = allowed.keys() else: scopes = [scope] scope_values = [s.value for s in scopes] for evt_scope in scopes: if event_source_type in allowed.get(evt_scope, []): LOG.debug( "Event Allowed: scope=%s EventType=%s", evt_scope.value, event_source_type, ) return True LOG.debug( "Event Denied: scopes=%s EventType=%s", scope_values, event_source_type ) return False
def _get_data(self): defaults = {"instance-id": DEFAULT_IID} results = None seed = None # decide parseuser for context.sh shell reader parseuser = DEFAULT_PARSEUSER if 'parseuser' in self.ds_cfg: parseuser = self.ds_cfg.get('parseuser') candidates = [self.seed_dir] candidates.extend(find_candidate_devs()) for cdev in candidates: try: if os.path.isdir(self.seed_dir): results = read_context_disk_dir(cdev, asuser=parseuser) elif cdev.startswith("/dev"): results = util.mount_cb(cdev, read_context_disk_dir, data=parseuser) except NonContextDiskDir: continue except BrokenContextDiskDir as exc: raise exc except util.MountFailedError: LOG.warning("%s was not mountable", cdev) if results: seed = cdev LOG.debug("found datasource in %s", cdev) break if not seed: return False # merge fetched metadata with datasource defaults md = results['metadata'] md = util.mergemanydict([md, defaults]) # check for valid user specified dsmode self.dsmode = self._determine_dsmode( [results.get('DSMODE'), self.ds_cfg.get('dsmode')]) if self.dsmode == sources.DSMODE_DISABLED: return False self.seed = seed self.network_eni = results.get('network-interfaces') self.metadata = md self.userdata_raw = results.get('userdata') return True
def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.distro = distro self.metadata = dict() self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "UpCloud"], {}), BUILTIN_DS_CONFIG, ]) self.metadata_address = self.ds_cfg["metadata_url"] self.retries = self.ds_cfg.get("retries", MD_RETRIES) self.timeout = self.ds_cfg.get("timeout", MD_TIMEOUT) self.wait_retry = self.ds_cfg.get("wait_retry", MD_WAIT_RETRY) self._network_config = None
def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.distro = distro self.metadata = dict() self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "Vultr"], {}), BUILTIN_DS_CONFIG]) self.metadata_address = self.ds_cfg['metadata_url'] self.retries = self.ds_cfg['retries'] self.timeout = self.ds_cfg['timeout'] self.wait_retry = self.ds_cfg['wait_retry'] self.dns_servers = self.ds_cfg['dns_servers'] self._network_config = None
def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.distro = distro self.metadata = dict() self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "DigitalOcean"], {}), BUILTIN_DS_CONFIG]) self.metadata_address = self.ds_cfg['metadata_url'] self.retries = self.ds_cfg.get('retries', MD_RETRIES) self.timeout = self.ds_cfg.get('timeout', MD_TIMEOUT) self.use_ip4LL = self.ds_cfg.get('use_ip4LL', MD_USE_IPV4LL) self.wait_retry = self.ds_cfg.get('wait_retry', MD_WAIT_RETRY) self._network_config = None
def test_compat_merges_dict2(self): a = { 'Blah': 1, 'Blah2': 2, 'Blah3': 3, } b = { 'Blah': 1, 'Blah2': 2, 'Blah3': [1], } c = _old_mergedict(a, b) d = util.mergemanydict([a, b]) self.assertEqual(c, d)
def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.ds_cfg = util.mergemanydict([ self.ds_cfg, util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), BUILTIN_DS_CONFIG]) self.metadata = {} self.network_data = None self._network_config = None self.script_base_d = os.path.join(self.paths.get_cpath("scripts")) self._init()
def fetch_base_config(): return util.mergemanydict( [ # builtin config, hardcoded in settings.py. util.get_builtin_cfg(), # Anything in your conf.d or 'default' cloud.cfg location. util.read_conf_with_confd(CLOUD_CONFIG), # runtime config. I.e., /run/cloud-init/cloud.cfg read_runtime_config(), # Kernel/cmdline parameters override system config util.read_conf_from_cmdline(), ], reverse=True, )
def test_compat_merges_dict2(self): a = { 'Blah': 1, 'Blah2': 2, 'Blah3': 3, } b = { 'Blah': 1, 'Blah2': 2, 'Blah3': [1], } c = _old_mergedict(a, b) d = util.mergemanydict([a, b]) self.assertEquals(c, d)
def __init__(self, sys_cfg, distro, paths): super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths) self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "Scaleway"], {}), BUILTIN_DS_CONFIG ]) self.metadata_address = self.ds_cfg['metadata_url'] self.userdata_address = self.ds_cfg['userdata_url'] self.vendordata_address = self.ds_cfg['vendordata_url'] self.retries = int(self.ds_cfg.get('retries', DEF_MD_RETRIES)) self.timeout = int(self.ds_cfg.get('timeout', DEF_MD_TIMEOUT))
def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.distro = distro self.metadata = dict() self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "Hetzner"], {}), BUILTIN_DS_CONFIG]) self.metadata_address = self.ds_cfg['metadata_url'] self.userdata_address = self.ds_cfg['userdata_url'] self.retries = self.ds_cfg.get('retries', MD_RETRIES) self.timeout = self.ds_cfg.get('timeout', MD_TIMEOUT) self.wait_retry = self.ds_cfg.get('wait_retry', MD_WAIT_RETRY) self._network_config = None self.dsmode = sources.DSMODE_NETWORK
def test_compat_merges_dict2(self): a = { "Blah": 1, "Blah2": 2, "Blah3": 3, } b = { "Blah": 1, "Blah2": 2, "Blah3": [1], } c = _old_mergedict(a, b) d = util.mergemanydict([a, b]) self.assertEqual(c, d)
def get_data(self): defaults = {"instance-id": DEFAULT_IID} results = None seed = None # decide parseuser for context.sh shell reader parseuser = DEFAULT_PARSEUSER if 'parseuser' in self.ds_cfg: parseuser = self.ds_cfg.get('parseuser') candidates = [self.seed_dir] candidates.extend(find_candidate_devs()) for cdev in candidates: try: if os.path.isdir(self.seed_dir): results = read_context_disk_dir(cdev, asuser=parseuser) elif cdev.startswith("/dev"): results = util.mount_cb(cdev, read_context_disk_dir, data=parseuser) except NonContextDiskDir: continue except BrokenContextDiskDir as exc: raise exc except util.MountFailedError: LOG.warn("%s was not mountable" % cdev) if results: seed = cdev LOG.debug("found datasource in %s", cdev) break if not seed: return False # merge fetched metadata with datasource defaults md = results['metadata'] md = util.mergemanydict([md, defaults]) # check for valid user specified dsmode self.dsmode = self._determine_dsmode( [results.get('DSMODE'), self.ds_cfg.get('dsmode')]) if self.dsmode == sources.DSMODE_DISABLED: return False self.seed = seed self.network_eni = results.get("network_config") self.metadata = md self.userdata_raw = results.get('userdata') return True
def test_compat_merge_sub_list(self): a = { '1': '2', 'b': { 'f': ['1'], } } b = { 'b': { 'f': [], } } c = _old_mergedict(a, b) d = util.mergemanydict([a, b]) self.assertEqual(c, d)
def __init__(self, sys_cfg, distro, paths): LOG.debug('Init scw') sources.DataSource.__init__(self, sys_cfg, distro, paths) self.metadata = {} self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "Scaleway"], {}), BUILTIN_DS_CONFIG ]) self.metadata_address = self.ds_cfg['metadata_url'] self.userdata_address = self.ds_cfg['userdata_url'] self.retries = self.ds_cfg.get('retries', DEF_MD_RETRIES) self.timeout = self.ds_cfg.get('timeout', DEF_MD_TIMEOUT)
def test_compat_merge_sub_list(self): a = { "1": "2", "b": { "f": ["1"], }, } b = { "b": { "f": [], } } c = _old_mergedict(a, b) d = util.mergemanydict([a, b]) self.assertEqual(c, d)
def test_compat_merge_sub_dict2(self): a = { "1": "2", "b": { "f": "g", }, } b = { "b": { "e": "c", } } c = _old_mergedict(a, b) d = util.mergemanydict([a, b]) self.assertEqual(c, d)
def test_compat_merge_sub_dict2(self): a = { '1': '2', 'b': { 'f': 'g', } } b = { 'b': { 'e': 'c', } } c = _old_mergedict(a, b) d = util.mergemanydict([a, b]) self.assertEqual(c, d)
def test_compat_merge_sub_dict2(self): a = { '1': '2', 'b': { 'f': 'g', } } b = { 'b': { 'e': 'c', } } c = _old_mergedict(a, b) d = util.mergemanydict([a, b]) self.assertEquals(c, d)
def test_compat_merge_sub_list(self): a = { '1': '2', 'b': { 'f': ['1'], } } b = { 'b': { 'f': [], } } c = _old_mergedict(a, b) d = util.mergemanydict([a, b]) self.assertEquals(c, d)
def _get_data(self): """Crawl and process datasource metadata caching metadata as attrs. @return: True on success, False on error, invalid or disabled datasource. """ if not self._is_platform_viable(): return False try: crawled_data = util.log_time(logfunc=LOG.debug, msg='Crawl of metadata service', func=self.crawl_metadata) except sources.InvalidMetaDataException as e: LOG.warning('Could not crawl Azure metadata: %s', e) return False if (self.distro and self.distro.name == 'ubuntu' and self.ds_cfg.get('apply_network_config')): maybe_remove_ubuntu_network_config_scripts() # Process crawled data and augment with various config defaults self.cfg = util.mergemanydict( [crawled_data['cfg'], BUILTIN_CLOUD_CONFIG]) self._metadata_imds = crawled_data['metadata']['imds'] self.metadata = util.mergemanydict( [crawled_data['metadata'], DEFAULT_METADATA]) self.userdata_raw = crawled_data['userdata_raw'] user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {}) self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg]) # walinux agent writes files world readable, but expects # the directory to be protected. write_files(self.ds_cfg['data_dir'], crawled_data['files'], dirmode=0o700) return True
def _get_data(self): """Crawl metadata, parse and persist that data for this instance. @return: True when metadata discovered indicates OpenStack datasource. False when unable to contact metadata service or when metadata format is invalid or disabled. """ oracle_considered = 'Oracle' in self.sys_cfg.get('datasource_list') if not detect_openstack(accept_oracle=not oracle_considered): return False if self.perform_dhcp_setup: # Setup networking in init-local stage. try: with EphemeralDHCPv4(self.fallback_interface): results = util.log_time( logfunc=LOG.debug, msg='Crawl of metadata service', func=self._crawl_metadata) except (NoDHCPLeaseError, sources.InvalidMetaDataException) as e: util.logexc(LOG, str(e)) return False else: try: results = self._crawl_metadata() except sources.InvalidMetaDataException as e: util.logexc(LOG, str(e)) return False self.dsmode = self._determine_dsmode([results.get('dsmode')]) if self.dsmode == sources.DSMODE_DISABLED: return False md = results.get('metadata', {}) md = util.mergemanydict([md, DEFAULT_METADATA]) self.metadata = md self.ec2_metadata = results.get('ec2-metadata') self.network_json = results.get('networkdata') self.userdata_raw = results.get('userdata') self.version = results['version'] self.files.update(results.get('files', {})) vd = results.get('vendordata') self.vendordata_pure = vd try: self.vendordata_raw = sources.convert_vendordata(vd) except ValueError as e: LOG.warning("Invalid content in vendor-data: %s", e) self.vendordata_raw = None return True
def _merge_new_seed(cur, seeded): ret = cur.copy() newmd = seeded.get('meta-data', {}) if not isinstance(seeded['meta-data'], dict): newmd = util.load_yaml(seeded['meta-data']) ret['meta-data'] = util.mergemanydict([cur['meta-data'], newmd]) if seeded.get('network-config'): ret['network-config'] = util.load_yaml(seeded['network-config']) if 'user-data' in seeded: ret['user-data'] = seeded['user-data'] if 'vendor-data' in seeded: ret['vendor-data'] = seeded['vendor-data'] return ret
def __init__(self, sys_cfg, distro, paths): super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths) self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "Scaleway"], {}), BUILTIN_DS_CONFIG ]) self.metadata_address = self.ds_cfg['metadata_url'] self.userdata_address = self.ds_cfg['userdata_url'] self.vendordata_address = self.ds_cfg['vendordata_url'] self.retries = int(self.ds_cfg.get('retries', DEF_MD_RETRIES)) self.timeout = int(self.ds_cfg.get('timeout', DEF_MD_TIMEOUT)) self._fallback_interface = None self._network_config = None
def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.metadata = dict() self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "DigitalOcean"], {}), BUILTIN_DS_CONFIG]) self.metadata_address = self.ds_cfg['metadata_url'] if self.ds_cfg.get('retries'): self.retries = self.ds_cfg['retries'] else: self.retries = MD_RETRIES if self.ds_cfg.get('timeout'): self.timeout = self.ds_cfg['timeout'] else: self.timeout = MD_TIMEOUT
def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.is_smartdc = None self.ds_cfg = util.mergemanydict([ self.ds_cfg, util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), BUILTIN_DS_CONFIG]) self.metadata = {} self.cfg = BUILTIN_CLOUD_CONFIG self.seed = self.ds_cfg.get("serial_device") self.seed_timeout = self.ds_cfg.get("serial_timeout") self.smartos_no_base64 = self.ds_cfg.get('no_base64_decode') self.b64_keys = self.ds_cfg.get('base64_keys') self.b64_all = self.ds_cfg.get('base64_all')
def get_data(self): try: if not self.wait_for_metadata_service(): return False except IOError: return False try: results = util.log_time(LOG.debug, 'Crawl of openstack metadata service', read_metadata_service, args=[self.metadata_address], kwargs={'ssl_details': self.ssl_details, 'version': openstack.OS_LATEST}) except openstack.NonReadable: return False except (openstack.BrokenMetadata, IOError): util.logexc(LOG, "Broken metadata address %s", self.metadata_address) return False user_dsmode = results.get('dsmode', None) if user_dsmode not in VALID_DSMODES + (None,): LOG.warn("User specified invalid mode: %s", user_dsmode) user_dsmode = None if user_dsmode == 'disabled': return False md = results.get('metadata', {}) md = util.mergemanydict([md, DEFAULT_METADATA]) self.metadata = md self.ec2_metadata = results.get('ec2-metadata') self.userdata_raw = results.get('userdata') self.version = results['version'] self.files.update(results.get('files', {})) # if vendordata includes 'cloud-init', then read that explicitly # for cloud-init (for namespacing). vd = results.get('vendordata') if isinstance(vd, dict) and 'cloud-init' in vd: self.vendordata_raw = vd['cloud-init'] else: self.vendordata_raw = vd return True