def unmounter(umount): try: yield umount finally: if umount: umount_cmd = ["/usr/sbin/umount", umount] util.subp(umount_cmd)
def _write_hostname(self, hostname, out_fn): # Permanently change the hostname for inet0 device in the ODM util.subp(['/usr/sbin/chdev', '-l', 'inet0', '-a', 'hostname=' + str(hostname)]) shortname = hostname.split('.')[0] # Change the node for the uname process util.subp(['/usr/bin/uname', '-S', str(shortname)[0:32]])
def handle(name, cfg, cloud, log, _args): if "bootcmd" not in cfg: log.debug(("Skipping module named %s," " no 'bootcmd' key in configuration"), name) return validate_cloudconfig_schema(cfg, schema) with temp_utils.ExtendedTemporaryFile(suffix=".sh") as tmpf: try: content = util.shellify(cfg["bootcmd"]) tmpf.write(util.encode_text(content)) tmpf.flush() except Exception as e: util.logexc(log, "Failed to shellify bootcmd: %s", str(e)) raise try: env = os.environ.copy() iid = cloud.get_instance_id() if iid: env['INSTANCE_ID'] = str(iid) cmd = ['/bin/sh', tmpf.name] util.subp(cmd, env=env, capture=False) except Exception: util.logexc(log, "Failed to run bootcmd module %s", name) raise
def generate_resolv_conf(cloud, log, params): template_fn = cloud.get_template_filename('resolv.conf') if not template_fn: log.warn("No template found, not rendering /etc/resolv.conf") return flags = [] false_flags = [] if 'options' in params: for key, val in params['options'].iteritems(): if type(val) == bool: if val: flags.append(key) else: false_flags.append(key) for flag in flags + false_flags: del params['options'][flag] params['flags'] = flags log.debug("Writing resolv.conf from template %s" % template_fn) if cloud.distro.name == "aix": templater.render_to_file(template_fn, '/etc/resolv.conf', params) else: # Network Manager likes to overwrite the resolv.conf file, so make sure # it is immutable after write util.subp(['chattr', '-i', '/etc/resolv.conf']) templater.render_to_file(template_fn, '/etc/resolv.conf', params) util.subp(['chattr', '+i', '/etc/resolv.conf'])
def add_assertions(assertions=None): """Import list of assertions. Import assertions by concatenating each assertion into a string separated by a '\n'. Write this string to a instance file and then invoke `snap ack /path/to/file` and check for errors. If snap exits 0, then all assertions are imported. """ if not assertions: assertions = [] if not isinstance(assertions, list): raise ValueError( 'assertion parameter was not a list: {assertions}'.format( assertions=assertions)) snap_cmd = [SNAPPY_CMD, 'ack'] combined = "\n".join(assertions) if len(combined) == 0: raise ValueError("Assertion list is empty") for asrt in assertions: LOG.debug('Acking: %s', asrt.split('\n')[0:2]) util.write_file(ASSERTIONS_FILE, combined.encode('utf-8')) util.subp(snap_cmd + [ASSERTIONS_FILE], capture=True)
def run_commands(commands): """Run the provided commands provided in snap:commands configuration. Commands are run individually. Any errors are collected and reported after attempting all commands. @param commands: A list or dict containing commands to run. Keys of a dict will be used to order the commands provided as dict values. """ if not commands: return LOG.debug('Running user-provided snap commands') if isinstance(commands, dict): # Sort commands based on dictionary key commands = [v for _, v in sorted(commands.items())] elif not isinstance(commands, list): raise TypeError( 'commands parameter was not a list or dict: {commands}'.format( commands=commands)) fixed_snap_commands = prepend_base_command('snap', commands) cmd_failures = [] for command in fixed_snap_commands: shell = isinstance(command, str) try: util.subp(command, shell=shell, status_cb=sys.stderr.write) except util.ProcessExecutionError as e: cmd_failures.append(str(e)) if cmd_failures: msg = 'Failures running snap commands:\n{cmd_failures}'.format( cmd_failures=cmd_failures) util.logexc(LOG, msg) raise RuntimeError(msg)
def execute(self): """ This method executes post-customization script before or after reboot based on the presence of rc local. """ self.prepare_script() self.install_agent() if not self.postreboot: LOG.warning("Executing post-customization script inline") util.subp(["/bin/sh", self.scriptpath, "postcustomization"]) else: LOG.debug("Scheduling custom script to run post reboot") if not os.path.isdir(CustomScriptConstant.POST_CUST_TMP_DIR): os.mkdir(CustomScriptConstant.POST_CUST_TMP_DIR) # Script "post-customize-guest.sh" and user uploaded script are # are present in the same directory and needs to copied to a temp # directory to be executed post reboot. User uploaded script is # saved as customize.sh in the temp directory. # post-customize-guest.sh excutes customize.sh after reboot. LOG.debug("Copying post-customization script") util.copy(self.scriptpath, CustomScriptConstant.POST_CUST_TMP_DIR + "/customize.sh") LOG.debug("Copying script to run post-customization script") util.copy( os.path.join(self.directory, CustomScriptConstant.POST_CUST_RUN_SCRIPT_NAME), CustomScriptConstant.POST_CUST_RUN_SCRIPT) LOG.info("Creating post-reboot pending marker") util.ensure_file(CustomScriptConstant.POST_REBOOT_PENDING_MARKER)
def handle_part(self, data, ctype, filename, payload, frequency): if ctype in handlers.CONTENT_SIGNALS: return # See: https://bugs.launchpad.net/bugs/819507 if frequency != PER_INSTANCE: return if not self.upstart_dir: return filename = util.clean_filename(filename) (_name, ext) = os.path.splitext(filename) if not ext: ext = '' ext = ext.lower() if ext != ".conf": filename = filename + ".conf" payload = util.dos2unix(payload) path = os.path.join(self.upstart_dir, filename) util.write_file(path, payload, 0o644) if SUITABLE_UPSTART: util.subp(["initctl", "reload-configuration"], capture=False)
def package_command(self, command, args=None, pkgs=None): if pkgs is None: pkgs = [] if util.which('dnf'): LOG.debug('Using DNF for package management') cmd = ['dnf'] else: LOG.debug('Using YUM for package management') # the '-t' argument makes yum tolerant of errors on the command # line with regard to packages. # # For example: if you request to install foo, bar and baz and baz # is installed; yum won't error out complaining that baz is already # installed. cmd = ['yum', '-t'] # Determines whether or not yum prompts for confirmation # of critical actions. We don't want to prompt... cmd.append("-y") if args and isinstance(args, str): cmd.append(args) elif args and isinstance(args, list): cmd.extend(args) cmd.append(command) pkglist = util.expand_package_list('%s-%s', pkgs) cmd.extend(pkglist) # Allow the output of this to flow outwards (ie not be captured) util.subp(cmd, capture=False)
def add_sources(srclist, template_params=None, aa_repo_match=None): """ add entries in /etc/apt/sources.list.d for each abbreviated sources.list entry in 'srclist'. When rendering template, also include the values in dictionary searchList """ if template_params is None: template_params = {} if aa_repo_match is None: aa_repo_match = lambda f: False errorlist = [] for ent in srclist: if 'source' not in ent: errorlist.append(["", "missing source"]) continue source = ent['source'] source = templater.render_string(source, template_params) if aa_repo_match(source): try: util.subp(["add-apt-repository", source]) except util.ProcessExecutionError as e: errorlist.append([source, ("add-apt-repository failed. " + str(e))]) continue if 'filename' not in ent: ent['filename'] = 'cloud_config_sources.list' if not ent['filename'].startswith("/"): ent['filename'] = os.path.join("/etc/apt/sources.list.d/", ent['filename']) if ('keyid' in ent and 'key' not in ent): ks = "keyserver.ubuntu.com" if 'keyserver' in ent: ks = ent['keyserver'] try: ent['key'] = getkeybyid(ent['keyid'], ks) except: errorlist.append([source, "failed to get key from %s" % ks]) continue if 'key' in ent: try: util.subp(('apt-key', 'add', '-'), ent['key']) except: errorlist.append([source, "failed add key"]) try: contents = "%s\n" % (source) util.write_file(ent['filename'], contents, omode="ab") except: errorlist.append([source, "failed write to file %s" % ent['filename']]) return errorlist
def handle(name, cfg, _cloud, log, _args): disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False) if disabled: util.subp(REJECT_CMD, capture=False) else: log.debug(("Skipping module named %s," " disabling the ec2 route not enabled"), name)
def handle(_name, cfg, cloud, log, _args): """ Basically turn a top level 'landscape' entry with a 'client' dict and render it to ConfigObj format under '[client]' section in /etc/landscape/client.conf """ ls_cloudcfg = cfg.get("landscape", {}) if not isinstance(ls_cloudcfg, (dict)): raise RuntimeError(("'landscape' key existed in config," " but not a dictionary type," " is a %s instead"), type_utils.obj_name(ls_cloudcfg)) if not ls_cloudcfg: return cloud.distro.install_packages(('landscape-client',)) merge_data = [ LSC_BUILTIN_CFG, LSC_CLIENT_CFG_FILE, ls_cloudcfg, ] merged = merge_together(merge_data) contents = StringIO() merged.write(contents) util.ensure_dir(os.path.dirname(LSC_CLIENT_CFG_FILE)) util.write_file(LSC_CLIENT_CFG_FILE, contents.getvalue()) log.debug("Wrote landscape config file to %s", LSC_CLIENT_CFG_FILE) util.write_file(LS_DEFAULT_FILE, "RUN=1\n") util.subp(["service", "landscape-client", "restart"])
def recycle_srcmstr_process(log): try: out = util.subp([PIDOF, SRCMSTR])[0] except util.ProcessExecutionError: util.logexc(log, 'Failed to get PID of srcmstr process.') raise srcmstr_pid_before = int(out) log.debug('Recycling srcmstr process with PID of %d.' % srcmstr_pid_before) try: os.kill(srcmstr_pid_before, 9) except: util.logexc(log, 'Failed to kill the srcmstr process.') raise # wait for srcmstr to come back up start_time = time.time() while True: time.sleep(0.5) if time.time() - start_time >= SRCMSTR_TIMEOUT_SECONDS: msg = ('srcmstr process failed to come back up within %d seconds.' % SRCMSTR_TIMEOUT_SECONDS) log.error(msg) raise Exception(msg) try: new_srcmstr_pid = int(util.subp([PIDOF, SRCMSTR])[0]) log.debug('srcmstr process came back up with PID of %d.' % new_srcmstr_pid) break except util.ProcessExecutionError: log.debug('Still waiting for srcmstr process to come ' 'back up...') continue
def handle(name, _cfg, _cloud, log, _args): required_tools = [RMCCTRL, RECFGCT] for tool in required_tools: if not os.path.isfile(tool): log.debug('%s is not found but is required, therefore not ' 'attempting to reset RMC.' % tool) return log.debug('Attempting to reset RMC.') system_info = util.system_info() node_id_before = get_node_id(log) log.debug('Node ID at beginning of module: %s' % node_id_before) # Stop the RMC subsystem and all resource managers so that we can make # some changes to it try: util.subp([RMCCTRL, '-z']) except: util.logexc(log, 'Failed to stop the RMC subsystem.') raise if 'linux' in system_info['platform'].lower(): recycle_srcmstr_process(log) reconfigure_rsct_subsystems(log) node_id_after = get_node_id(log) log.debug('Node ID at end of module: %s' % node_id_after) if node_id_after == node_id_before: msg = 'New node ID did not get generated.' log.error(msg) raise Exception(msg)
def add_assertions(assertions): """Import list of assertions. Import assertions by concatenating each assertion into a string separated by a '\n'. Write this string to a instance file and then invoke `snap ack /path/to/file` and check for errors. If snap exits 0, then all assertions are imported. """ if not assertions: return LOG.debug('Importing user-provided snap assertions') if isinstance(assertions, dict): assertions = assertions.values() elif not isinstance(assertions, list): raise TypeError( 'assertion parameter was not a list or dict: {assertions}'.format( assertions=assertions)) snap_cmd = [SNAP_CMD, 'ack'] combined = "\n".join(assertions) for asrt in assertions: LOG.debug('Snap acking: %s', asrt.split('\n')[0:2]) util.write_file(ASSERTIONS_FILE, combined.encode('utf-8')) util.subp(snap_cmd + [ASSERTIONS_FILE], capture=True)
def generate_certificate(self): LOG.debug("Generating certificate for communication with fabric...") if self.certificate is not None: LOG.debug("Certificate already generated.") return with cd(self.tmpdir): util.subp( [ "openssl", "req", "-x509", "-nodes", "-subj", "/CN=LinuxTransport", "-days", "32768", "-newkey", "rsa:2048", "-keyout", self.certificate_names["private_key"], "-out", self.certificate_names["certificate"], ] ) certificate = "" for line in open(self.certificate_names["certificate"]): if "CERTIFICATE" not in line: certificate += line.rstrip() self.certificate = certificate LOG.debug("New certificate generated.")
def apply_hostname_bounce(hostname, policy, interface, command, hostname_command="hostname"): # set the hostname to 'hostname' if it is not already set to that. # then, if policy is not off, bounce the interface using command prev_hostname = util.subp(hostname_command, capture=True)[0].strip() util.subp([hostname_command, hostname]) msg = ("phostname=%s hostname=%s policy=%s interface=%s" % (prev_hostname, hostname, policy, interface)) if util.is_false(policy): LOG.debug("pubhname: policy false, skipping [%s]", msg) return if prev_hostname == hostname and policy != "force": LOG.debug("pubhname: no change, policy != force. skipping. [%s]", msg) return env = os.environ.copy() env['interface'] = interface env['hostname'] = hostname env['old_hostname'] = prev_hostname if command == "builtin": command = BOUNCE_COMMAND LOG.debug("pubhname: publishing hostname [%s]", msg) shell = not isinstance(command, (list, tuple)) # capture=False, see comments in bug 1202758 and bug 1206164. util.log_time(logfunc=LOG.debug, msg="publishing hostname", get_uptime=True, func=util.subp, kwargs={'args': command, 'shell': shell, 'capture': False, 'env': env})
def package_command(self, command, args=None, pkgs=None): if pkgs is None: pkgs = [] cmd = ['yum'] # If enabled, then yum will be tolerant of errors on the command line # with regard to packages. # For example: if you request to install foo, bar and baz and baz is # installed; yum won't error out complaining that baz is already # installed. cmd.append("-t") # Determines whether or not yum prompts for confirmation # of critical actions. We don't want to prompt... cmd.append("-y") if args and isinstance(args, str): cmd.append(args) elif args and isinstance(args, list): cmd.extend(args) cmd.append(command) pkglist = util.expand_package_list('%s-%s', pkgs) cmd.extend(pkglist) # Allow the output of this to flow outwards (ie not be captured) util.subp(cmd, capture=False)
def resize(self, diskdev, partnum, partdev): """ GPT disks store metadata at the beginning (primary) and at the end (secondary) of the disk. When launching an image with a larger disk compared to the original image, the secondary copy is lost. Thus, the metadata will be marked CORRUPT, and need to be recovered. """ try: util.subp(["gpart", "recover", diskdev]) except util.ProcessExecutionError as e: if e.exit_code != 0: util.logexc(LOG, "Failed: gpart recover %s", diskdev) raise ResizeFailedException(e) before = get_size(partdev) try: util.subp(["gpart", "resize", "-i", partnum, diskdev]) except util.ProcessExecutionError as e: util.logexc(LOG, "Failed: gpart resize -i %s %s", partnum, diskdev) raise ResizeFailedException(e) # Since growing the FS requires a reboot, make sure we reboot # first when this module has finished. open('/var/run/reboot-required', 'a').close() return (before, get_size(partdev))
def _bringup_device(self): """Perform the ip comands to fully setup the device.""" cidr = '{0}/{1}'.format(self.ip, self.prefix) LOG.debug( 'Attempting setup of ephemeral network on %s with %s brd %s', self.interface, cidr, self.broadcast) try: util.subp( ['ip', '-family', 'inet', 'addr', 'add', cidr, 'broadcast', self.broadcast, 'dev', self.interface], capture=True, update_env={'LANG': 'C'}) except util.ProcessExecutionError as e: if "File exists" not in e.stderr: raise LOG.debug( 'Skip ephemeral network setup, %s already has address %s', self.interface, self.ip) else: # Address creation success, bring up device and queue cleanup util.subp( ['ip', '-family', 'inet', 'link', 'set', 'dev', self.interface, 'up'], capture=True) self.cleanup_cmds.append( ['ip', '-family', 'inet', 'link', 'set', 'dev', self.interface, 'down']) self.cleanup_cmds.append( ['ip', '-family', 'inet', 'addr', 'del', cidr, 'dev', self.interface])
def create_group(self, name, members=None): group_add_cmd = ['groupadd', name] if not members: members = [] # Check if group exists, and then add it doesn't if util.is_group(name): LOG.warn("Skipping creation of existing group '%s'" % name) else: try: util.subp(group_add_cmd) LOG.info("Created new group %s" % name) except Exception: util.logexc(LOG, "Failed to create group %s", name) # Add members to the group, if so defined if len(members) > 0: for member in members: if not util.is_user(member): LOG.warn("Unable to add group member '%s' to group '%s'" "; user does not exist.", member, name) continue util.subp(['usermod', '-a', '-G', name, member]) LOG.info("Added user '%s' to group '%s'" % (member, name))
def apply_locale(self, locale, out_fn=None): # Adjust the locals value to the new value newconf = StringIO() for line in util.load_file(self.login_conf_fn).splitlines(): newconf.write(re.sub(r'^default:', r'default:lang=%s:' % locale, line)) newconf.write("\n") # Make a backup of login.conf. util.copy(self.login_conf_fn, self.login_conf_fn_bak) # And write the new login.conf. util.write_file(self.login_conf_fn, newconf.getvalue()) try: LOG.debug("Running cap_mkdb for %s", locale) util.subp(['cap_mkdb', self.login_conf_fn]) except util.ProcessExecutionError: # cap_mkdb failed, so restore the backup. util.logexc(LOG, "Failed to apply locale %s", locale) try: util.copy(self.login_conf_fn_bak, self.login_conf_fn) except IOError: util.logexc(LOG, "Failed to restore %s backup", self.login_conf_fn)
def install_chef(cloud, chef_cfg, log): # If chef is not installed, we install chef based on 'install_type' install_type = util.get_cfg_option_str(chef_cfg, 'install_type', 'packages') run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False) if install_type == "gems": # This will install and run the chef-client from gems chef_version = util.get_cfg_option_str(chef_cfg, 'version', None) ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version', RUBY_VERSION_DEFAULT) install_chef_from_gems(cloud.distro, ruby_version, chef_version) # Retain backwards compat, by preferring True instead of False # when not provided/overriden... run = util.get_cfg_option_bool(chef_cfg, 'exec', default=True) elif install_type == 'packages': # This will install and run the chef-client from packages cloud.distro.install_packages(('chef',)) elif install_type == 'omnibus': # This will install as a omnibus unified package url = util.get_cfg_option_str(chef_cfg, "omnibus_url", OMNIBUS_URL) retries = max(0, util.get_cfg_option_int(chef_cfg, "omnibus_url_retries", default=OMNIBUS_URL_RETRIES)) content = url_helper.readurl(url=url, retries=retries) with util.tempdir() as tmpd: # Use tmpdir over tmpfile to avoid 'text file busy' on execute tmpf = "%s/chef-omnibus-install" % tmpd util.write_file(tmpf, str(content), mode=0700) util.subp([tmpf], capture=False) else: log.warn("Unknown chef install type '%s'", install_type) run = False return run
def delete_key(key): """Delete the specified key from the local gpg ring""" try: util.subp(["gpg", "--batch", "--yes", "--delete-keys", key], capture=True) except util.ProcessExecutionError as error: LOG.warn('Failed delete key "%s": %s', key, error)
def update_locale_conf(locale, sys_path, keyname='LANG'): """Update system locale config""" LOG.debug('Updating %s with locale setting %s=%s', sys_path, keyname, locale) util.subp( ['update-locale', '--locale-file=' + sys_path, '%s=%s' % (keyname, locale)], capture=False)
def invoke_agent(cmd): # this is a function itself to simplify patching it for test if cmd: LOG.debug("invoking agent: %s", cmd) util.subp(cmd, shell=(not isinstance(cmd, list))) else: LOG.debug("not invoking agent")
def package_command(self, command, args=None, pkgs=None): if pkgs is None: pkgs = [] e = os.environ.copy() # See: http://tiny.cc/kg91fw # Or: http://tiny.cc/mh91fw e['DEBIAN_FRONTEND'] = 'noninteractive' cmd = list(self.get_option("apt_get_command", APT_GET_COMMAND)) if args and isinstance(args, str): cmd.append(args) elif args and isinstance(args, list): cmd.extend(args) subcmd = command if command == "upgrade": subcmd = self.get_option("apt_get_upgrade_subcommand", "dist-upgrade") cmd.append(subcmd) pkglist = util.expand_package_list('%s=%s', pkgs) cmd.extend(pkglist) # Allow the output of this to flow outwards (ie not be captured) util.subp(cmd, env=e, capture=False)
def resize(self, diskdev, partnum, partdev): before = get_size(partdev) try: util.subp(["parted", diskdev, "resizepart", partnum]) except util.ProcessExecutionError as e: raise ResizeFailedException(e) return (before, get_size(partdev))
def get_hdd_size(device): try: size_in_bytes, _ = util.subp([BLKDEV_CMD, '--getsize64', device]) sector_size, _ = util.subp([BLKDEV_CMD, '--getss', device]) except Exception as e: raise Exception("Failed to get %s size\n%s" % (device, e)) return int(size_in_bytes) / int(sector_size)
def _write_hostname(self, hostname, out_fn): if self._dist_uses_systemd(): util.subp(['hostnamectl', 'set-hostname', str(hostname)]) else: host_cfg = { 'HOSTNAME': hostname, } rhel_util.update_sysconfig_file(out_fn, host_cfg)
def add_user(self, name, **kwargs): """ Add a user to the system using standard GNU tools """ # XXX need to make add_user idempotent somehow as we # still want to add groups or modify ssh keys on pre-existing # users in the image. if util.is_user(name): LOG.info("User %s already exists, skipping.", name) return if 'create_groups' in kwargs: create_groups = kwargs.pop('create_groups') else: create_groups = True useradd_cmd = ['useradd', name] log_useradd_cmd = ['useradd', name] if util.system_is_snappy(): useradd_cmd.append('--extrausers') log_useradd_cmd.append('--extrausers') # Since we are creating users, we want to carefully validate the # inputs. If something goes wrong, we can end up with a system # that nobody can login to. useradd_opts = { "gecos": '--comment', "homedir": '--home', "primary_group": '--gid', "uid": '--uid', "groups": '--groups', "passwd": '--password', "shell": '--shell', "expiredate": '--expiredate', "inactive": '--inactive', "selinux_user": '******', } useradd_flags = { "no_user_group": '--no-user-group', "system": '--system', "no_log_init": '--no-log-init', } redact_opts = ['passwd'] # support kwargs having groups=[list] or groups="g1,g2" groups = kwargs.get('groups') if groups: if isinstance(groups, six.string_types): groups = groups.split(",") # remove any white spaces in group names, most likely # that came in as a string like: groups: group1, group2 groups = [g.strip() for g in groups] # kwargs.items loop below wants a comma delimeted string # that can go right through to the command. kwargs['groups'] = ",".join(groups) primary_group = kwargs.get('primary_group') if primary_group: groups.append(primary_group) if create_groups and groups: for group in groups: if not util.is_group(group): self.create_group(group) LOG.debug("created group '%s' for user '%s'", group, name) # Check the values and create the command for key, val in sorted(kwargs.items()): if key in useradd_opts and val and isinstance(val, str): useradd_cmd.extend([useradd_opts[key], val]) # Redact certain fields from the logs if key in redact_opts: log_useradd_cmd.extend([useradd_opts[key], 'REDACTED']) else: log_useradd_cmd.extend([useradd_opts[key], val]) elif key in useradd_flags and val: useradd_cmd.append(useradd_flags[key]) log_useradd_cmd.append(useradd_flags[key]) # Don't create the home directory if directed so or if the user is a # system user if kwargs.get('no_create_home') or kwargs.get('system'): useradd_cmd.append('-M') log_useradd_cmd.append('-M') else: useradd_cmd.append('-m') log_useradd_cmd.append('-m') # Run the command LOG.debug("Adding user %s", name) try: util.subp(useradd_cmd, logstring=log_useradd_cmd) except Exception as e: util.logexc(LOG, "Failed to create user %s", name) raise e
def _netdev_route_info_iproute(iproute_data): """ Get network route dicts from ip route info. @param iproute_data: Output string from ip route command. @returns: A dict containing ipv4 and ipv6 route entries as lists. Each item in the list is a route dictionary representing destination, gateway, flags, genmask and interface information. """ routes = {} routes['ipv4'] = [] routes['ipv6'] = [] entries = iproute_data.splitlines() default_route_entry = { 'destination': '', 'flags': '', 'gateway': '', 'genmask': '', 'iface': '', 'metric': '' } for line in entries: entry = copy(default_route_entry) if not line: continue toks = line.split() flags = ['U'] if toks[0] == "default": entry['destination'] = "0.0.0.0" entry['genmask'] = "0.0.0.0" else: if '/' in toks[0]: (addr, cidr) = toks[0].split("/") else: addr = toks[0] cidr = '32' flags.append("H") entry['genmask'] = net_prefix_to_ipv4_mask(cidr) entry['destination'] = addr entry['genmask'] = net_prefix_to_ipv4_mask(cidr) entry['gateway'] = "0.0.0.0" for i in range(len(toks)): if toks[i] == "via": entry['gateway'] = toks[i + 1] flags.insert(1, "G") if toks[i] == "dev": entry["iface"] = toks[i + 1] if toks[i] == "metric": entry['metric'] = toks[i + 1] entry['flags'] = ''.join(flags) routes['ipv4'].append(entry) try: (iproute_data6, _err6) = util.subp( ["ip", "--oneline", "-6", "route", "list", "table", "all"], rcs=[0, 1]) except util.ProcessExecutionError: pass else: entries6 = iproute_data6.splitlines() for line in entries6: entry = {} if not line: continue toks = line.split() if toks[0] == "default": entry['destination'] = "::/0" entry['flags'] = "UG" else: entry['destination'] = toks[0] entry['gateway'] = "::" entry['flags'] = "U" for i in range(len(toks)): if toks[i] == "via": entry['gateway'] = toks[i + 1] entry['flags'] = "UG" if toks[i] == "dev": entry["iface"] = toks[i + 1] if toks[i] == "metric": entry['metric'] = toks[i + 1] if toks[i] == "expires": entry['flags'] = entry['flags'] + 'e' routes['ipv6'].append(entry) return routes
def parse_shell_config(content, keylist=None, bash=None, asuser=None, switch_user_cb=None): if isinstance(bash, str): bash = [bash] elif bash is None: bash = ['bash', '-e'] if switch_user_cb is None: switch_user_cb = switch_user_cmd # allvars expands to all existing variables by using '${!x*}' notation # where x is lower or upper case letters or '_' allvars = ["${!%s*}" % x for x in string.letters + "_"] keylist_in = keylist if keylist is None: keylist = allvars keylist_in = [] setup = '\n'.join(( '__v="";', '', )) def varprinter(vlist): # output '\0'.join(['_start_', key=value NULL for vars in vlist] return '\n'.join( ('printf "%s\\0" _start_', 'for __v in %s; do' % ' '.join(vlist), ' printf "%s=%s\\0" "$__v" "${!__v}";', 'done', '')) # the rendered 'bcmd' is bash syntax that does # setup: declare variables we use (so they show up in 'all') # varprinter(allvars): print all variables known at beginning # content: execute the provided content # varprinter(keylist): print all variables known after content # # output is then a null terminated array of: # literal '_start_' # key=value (for each preset variable) # literal '_start_' # key=value (for each post set variable) bcmd = ('unset IFS\n' + setup + varprinter(allvars) + '{\n%s\n\n:\n} > /dev/null\n' % content + 'unset IFS\n' + varprinter(keylist) + "\n") cmd = [] if asuser is not None: cmd = switch_user_cb(asuser) cmd.extend(bash) (output, _error) = util.subp(cmd, data=bcmd) # exclude vars in bash that change on their own or that we used excluded = ("RANDOM", "LINENO", "SECONDS", "_", "__v") preset = {} ret = {} target = None output = output[0:-1] # remove trailing null # go through output. First _start_ is for 'preset', second for 'target'. # Add to target only things were changed and not in volitile for line in output.split("\x00"): try: (key, val) = line.split("=", 1) if target is preset: target[key] = val elif (key not in excluded and (key in keylist_in or preset.get(key) != val)): ret[key] = val except ValueError: if line != "_start_": raise if target is None: target = preset elif target is preset: target = ret return ret
def clear_dhcp(self): logger.info('Clearing DHCP leases') # Ignore the return code 1. util.subp(["pkill", "dhclient"], rcs=[0, 1]) util.subp(["rm", "-f", "/var/lib/dhcp/*"])
def get_hostname(hostname_command='hostname'): if not isinstance(hostname_command, (list, tuple)): hostname_command = (hostname_command, ) return util.subp(hostname_command, capture=True)[0].strip()
def set_hostname(hostname, hostname_command='hostname'): util.subp([hostname_command, hostname])
def update_ca_certs(): """ Updates the CA certificate cache on the current machine. """ util.subp(["update-ca-certificates"], capture=False)
def call(self, rootd=None, mocks=None, func="main", args=None, files=None, policy_dmi=DI_DEFAULT_POLICY, policy_no_dmi=DI_DEFAULT_POLICY_NO_DMI, ec2_strict_id=DI_EC2_STRICT_ID_DEFAULT): if args is None: args = [] if mocks is None: mocks = [] if files is None: files = {} if rootd is None: rootd = self.tmp_dir() unset = '_unset' wrap = self.tmp_path(path="_shwrap", dir=rootd) populate_dir(rootd, files) # DI_DEFAULT_POLICY* are declared always as to not rely # on the default in the code. This is because SRU releases change # the value in the code, and thus tests would fail there. head = [ "DI_MAIN=noop", "DEBUG_LEVEL=2", "DI_LOG=stderr", "PATH_ROOT='%s'" % rootd, ". " + self.dsid_path, 'DI_DEFAULT_POLICY="%s"' % policy_dmi, 'DI_DEFAULT_POLICY_NO_DMI="%s"' % policy_no_dmi, 'DI_EC2_STRICT_ID_DEFAULT="%s"' % ec2_strict_id, "" ] def write_mock(data): ddata = {'out': None, 'err': None, 'ret': 0, 'RET': None} ddata.update(data) for k in ddata: if ddata[k] is None: ddata[k] = unset return SHELL_MOCK_TMPL % ddata mocklines = [] defaults = [ { 'name': 'detect_virt', 'RET': 'none', 'ret': 1 }, { 'name': 'uname', 'out': UNAME_MYSYS }, { 'name': 'blkid', 'out': BLKID_EFI_ROOT }, { 'name': 'ovf_vmware_transport_guestinfo', 'out': 'No value found', 'ret': 1 }, ] written = [d['name'] for d in mocks] for data in mocks: mocklines.append(write_mock(data)) for d in defaults: if d['name'] not in written: mocklines.append(write_mock(d)) endlines = [func + ' ' + ' '.join(['"%s"' % s for s in args])] with open(wrap, "w") as fp: fp.write('\n'.join(head + mocklines + endlines) + "\n") rc = 0 try: out, err = util.subp(['sh', '-c', '. %s' % wrap], capture=True) except util.ProcessExecutionError as e: rc = e.exit_code out = e.stdout err = e.stderr cfg = None cfg_out = os.path.join(rootd, 'run/cloud-init/cloud.cfg') if os.path.exists(cfg_out): contents = util.load_file(cfg_out) try: cfg = safeyaml.load(contents) except Exception as e: cfg = {"_INVALID_YAML": contents, "_EXCEPTION": str(e)} return CallReturn(rc, out, err, cfg, dir2dict(rootd))
def run(cmd, msg): try: return util.subp(cmd, capture=True) except util.ProcessExecutionError as e: LOG.warn("failed: %s (%s): %s", service, cmd, e) return False
def disable_autoconf6(): cmd = ['/usr/sbin/chrctcp', '-d', 'autoconf6'] util.subp(cmd, capture=False)
def rename(cur, new): util.subp(["ip", "link", "set", cur, "name", new], capture=True)
def __exit__(self, excp_type, excp_value, excp_traceback): """Teardown anything we set up.""" for cmd in self.cleanup_cmds: util.subp(cmd, capture=True)
def start_autoconf6(device_name): if device_name == "any": cmd = ['/usr/sbin/autoconf6', '-A'] else: cmd = ['/usr/sbin/autoconf6', '-i', device_name] util.subp(cmd, capture=False)
def _get_gpart_output(part): return util.subp(['gpart', 'show', part])[0]
def _get_dumpfs_output(mount_point): return util.subp(['dumpfs', '-m', mount_point])[0]
def handle(name, cfg, cloud, log, _args): # If there isn't a puppet key in the configuration don't do anything if 'puppet' not in cfg: log.debug(("Skipping module named %s," " no 'puppet' configuration found"), name) return puppet_cfg = cfg['puppet'] # Start by installing the puppet package if necessary... install = util.get_cfg_option_bool(puppet_cfg, 'install', True) version = util.get_cfg_option_str(puppet_cfg, 'version', None) package_name = util.get_cfg_option_str(puppet_cfg, 'package_name', PUPPET_PACKAGE_NAME) conf_file = util.get_cfg_option_str(puppet_cfg, 'conf_file', PUPPET_CONF_PATH) ssl_dir = util.get_cfg_option_str(puppet_cfg, 'ssl_dir', PUPPET_SSL_DIR) csr_attributes_path = util.get_cfg_option_str(puppet_cfg, 'csr_attributes_path', PUPPET_CSR_ATTRIBUTES_PATH) p_constants = PuppetConstants(conf_file, ssl_dir, csr_attributes_path, log) if not install and version: log.warning(("Puppet install set false but version supplied," " doing nothing.")) elif install: log.debug(("Attempting to install puppet %s,"), version if version else 'latest') cloud.distro.install_packages((package_name, version)) # ... and then update the puppet configuration if 'conf' in puppet_cfg: # Add all sections from the conf object to puppet.conf contents = util.load_file(p_constants.conf_path) # Create object for reading puppet.conf values puppet_config = helpers.DefaultingConfigParser() # Read puppet.conf values from original file in order to be able to # mix the rest up. First clean them up # (TODO(harlowja) is this really needed??) cleaned_lines = [i.lstrip() for i in contents.splitlines()] cleaned_contents = '\n'.join(cleaned_lines) # Move to puppet_config.read_file when dropping py2.7 puppet_config.readfp( # pylint: disable=W1505 StringIO(cleaned_contents), filename=p_constants.conf_path) for (cfg_name, cfg) in puppet_cfg['conf'].items(): # Cert configuration is a special case # Dump the puppet master ca certificate in the correct place if cfg_name == 'ca_cert': # Puppet ssl sub-directory isn't created yet # Create it with the proper permissions and ownership util.ensure_dir(p_constants.ssl_dir, 0o771) util.chownbyname(p_constants.ssl_dir, 'puppet', 'root') util.ensure_dir(p_constants.ssl_cert_dir) util.chownbyname(p_constants.ssl_cert_dir, 'puppet', 'root') util.write_file(p_constants.ssl_cert_path, cfg) util.chownbyname(p_constants.ssl_cert_path, 'puppet', 'root') else: # Iterate through the config items, we'll use ConfigParser.set # to overwrite or create new items as needed for (o, v) in cfg.items(): if o == 'certname': # Expand %f as the fqdn # TODO(harlowja) should this use the cloud fqdn?? v = v.replace("%f", socket.getfqdn()) # Expand %i as the instance id v = v.replace("%i", cloud.get_instance_id()) # certname needs to be downcased v = v.lower() puppet_config.set(cfg_name, o, v) # We got all our config as wanted we'll rename # the previous puppet.conf and create our new one util.rename(p_constants.conf_path, "%s.old" % (p_constants.conf_path)) util.write_file(p_constants.conf_path, puppet_config.stringify()) if 'csr_attributes' in puppet_cfg: util.write_file( p_constants.csr_attributes_path, yaml.dump(puppet_cfg['csr_attributes'], default_flow_style=False)) # Set it up so it autostarts _autostart_puppet(log) # Start puppetd util.subp(['service', 'puppet', 'start'], capture=False)
def do_resize(resize_cmd, log): try: util.subp(resize_cmd) except util.ProcessExecutionError: util.logexc(log, "Failed to resize filesystem (cmd=%s)", resize_cmd) raise
def test_subp_handles_utf8(self): # The given bytes contain utf-8 accented characters as seen in e.g. # the "deja dup" package in Ubuntu. cmd = self.printf_cmd(self.utf8_valid_2) (out, _err) = util.subp(cmd, capture=True) self.assertEqual(out, self.utf8_valid_2.decode('utf-8'))
def test_subp_decode_ignore(self): # this executes a string that writes invalid utf-8 to stdout (out, _err) = util.subp(self.printf_cmd('abc\\xaadef'), capture=True, decode='ignore') self.assertEqual(out, 'abcdef')
def disable_dhcpcd(): cmd = ['/usr/sbin/chrctcp', '-S', '-d', 'dhcpcd'] util.subp(cmd, capture=False)
def execute_or_debug(cmd, fail_ret=None): try: return util.subp(cmd)[0] except util.ProcessExecutionError: LOG.debug("Failed to execute: %s", ' '.join(cmd)) return fail_ret
def disable_ndpd_host(): cmd = ['/usr/sbin/chrctcp', '-S', '-d', 'ndpd-host'] util.subp(cmd, capture=False)
def read_context_disk_dir(source_dir, asuser=None): """ read_context_disk_dir(source_dir): read source_dir and return a tuple with metadata dict and user-data string populated. If not a valid dir, raise a NonContextDiskDir """ found = {} for af in CONTEXT_DISK_FILES: fn = os.path.join(source_dir, af) if os.path.isfile(fn): found[af] = fn if not found: raise NonContextDiskDir("%s: %s" % (source_dir, "no files found")) context = {} results = {'userdata': None, 'metadata': {}} if "context.sh" in found: if asuser is not None: try: pwd.getpwnam(asuser) except KeyError as e: raise BrokenContextDiskDir( "configured user '%s' " "does not exist", asuser) try: with open(os.path.join(source_dir, 'context.sh'), 'r') as f: content = f.read().strip() context = parse_shell_config(content, asuser=asuser) except util.ProcessExecutionError as e: raise BrokenContextDiskDir("Error processing context.sh: %s" % (e)) except IOError as e: raise NonContextDiskDir("Error reading context.sh: %s" % (e)) else: raise NonContextDiskDir("Missing context.sh") if not context: return results results['metadata'] = context # process single or multiple SSH keys ssh_key_var = None if "SSH_KEY" in context: ssh_key_var = "SSH_KEY" elif "SSH_PUBLIC_KEY" in context: ssh_key_var = "SSH_PUBLIC_KEY" if ssh_key_var: lines = context.get(ssh_key_var).splitlines() results['metadata']['public-keys'] = [ l for l in lines if len(l) and not l.startswith("#") ] # custom hostname -- try hostname or leave cloud-init # itself create hostname from IP address later for k in ('HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', 'ETH0_IP'): if k in context: results['metadata']['local-hostname'] = context[k] break # raw user data if "USER_DATA" in context: results['userdata'] = context["USER_DATA"] elif "USERDATA" in context: results['userdata'] = context["USERDATA"] # b64decode user data if necessary (default) if 'userdata' in results: encoding = context.get('USERDATA_ENCODING', context.get('USER_DATA_ENCODING')) if encoding == "base64": try: results['userdata'] = base64.b64decode(results['userdata']) except TypeError: LOG.warn("Failed base64 decoding of userdata") # generate static /etc/network/interfaces # only if there are any required context variables # http://opennebula.org/documentation:rel3.8:cong#network_configuration for k in context.keys(): if re.match(r'^ETH\d+_IP$', k): (out, _) = util.subp(['/sbin/ip', 'link']) net = OpenNebulaNetwork(out, context) results['network-interfaces'] = net.gen_conf() break return results
def get_primary_arch(self): (arch, _err) = util.subp(['dpkg', '--print-architecture']) return str(arch).strip()
def _netdev_route_info_netstat(route_data): routes = {} routes['ipv4'] = [] routes['ipv6'] = [] entries = route_data.splitlines() for line in entries: if not line: continue toks = line.split() # FreeBSD shows 6 items in the routing table: # Destination Gateway Flags Refs Use Netif Expire # default 10.65.0.1 UGS 0 34920 vtnet0 # # Linux netstat shows 2 more: # Destination Gateway Genmask Flags Metric Ref Use Iface # 0.0.0.0 10.65.0.1 0.0.0.0 UG 0 0 0 eth0 if (len(toks) < 6 or toks[0] == "Kernel" or toks[0] == "Destination" or toks[0] == "Internet" or toks[0] == "Internet6" or toks[0] == "Routing"): continue if len(toks) < 8: toks.append("-") toks.append("-") toks[7] = toks[5] toks[5] = "-" entry = { 'destination': toks[0], 'gateway': toks[1], 'genmask': toks[2], 'flags': toks[3], 'metric': toks[4], 'ref': toks[5], 'use': toks[6], 'iface': toks[7], } routes['ipv4'].append(entry) try: (route_data6, _err6) = util.subp(["netstat", "-A", "inet6", "--route", "--numeric"], rcs=[0, 1]) except util.ProcessExecutionError: pass else: entries6 = route_data6.splitlines() for line in entries6: if not line: continue toks = line.split() if (len(toks) < 7 or toks[0] == "Kernel" or toks[0] == "Destination" or toks[0] == "Internet" or toks[0] == "Proto" or toks[0] == "Active"): continue entry = { 'destination': toks[0], 'gateway': toks[1], 'flags': toks[2], 'metric': toks[3], 'ref': toks[4], 'use': toks[5], 'iface': toks[6], } # skip lo interface on ipv6 if entry['iface'] == "lo": continue # strip /128 from address if it's included if entry['destination'].endswith('/128'): entry['destination'] = re.sub(r'\/128$', '', entry['destination']) routes['ipv6'].append(entry) return routes
def handle(_name, cfg, cloud, log, args): if len(args) != 0: # if run from command line, and give args, wipe the chpasswd['list'] password = args[0] if 'chpasswd' in cfg and 'list' in cfg['chpasswd']: del cfg['chpasswd']['list'] else: password = util.get_cfg_option_str(cfg, "password", None) # use admin_pass key from metadata if not password: metadata = cloud.datasource.metadata if metadata and 'admin_pass' in metadata: password = metadata['admin_pass'] expire = True pw_auth = "no" change_pwauth = False plist = None if 'chpasswd' in cfg: chfg = cfg['chpasswd'] plist = util.get_cfg_option_str(chfg, 'list', plist) expire = util.get_cfg_option_bool(chfg, 'expire', expire) if not plist and password: (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro) (user, _user_config) = ds.extract_default(users) if user: plist = "%s:%s" % (user, password) # change root's password plist = plist + "\nroot:%s" % password else: log.warn("No default or defined user to change password for.") errors = [] if plist: plist_in = [] randlist = [] users = [] for line in plist.splitlines(): u, p = line.split(':', 1) if p == "R" or p == "RANDOM": p = rand_user_password() randlist.append("%s:%s" % (u, p)) plist_in.append("%s:%s" % (u, p)) users.append(u) ch_in = '\n'.join(plist_in) + '\n' try: log.debug("Changing password for %s:", users) util.subp(['chpasswd'], ch_in) except Exception as e: errors.append(e) util.logexc(log, "Failed to set passwords with chpasswd for %s", users) if len(randlist): blurb = ("Set the following 'random' passwords\n", '\n'.join(randlist)) sys.stderr.write("%s\n%s\n" % blurb) if expire: expired_users = [] for u in users: try: util.subp(['passwd', '--expire', u]) expired_users.append(u) except Exception as e: errors.append(e) util.logexc(log, "Failed to set 'expire' for %s", u) if expired_users: log.debug("Expired passwords for: %s users", expired_users) change_pwauth = False pw_auth = None if 'ssh_pwauth' in cfg: change_pwauth = True if util.is_true(cfg['ssh_pwauth']): pw_auth = 'yes' if util.is_false(cfg['ssh_pwauth']): pw_auth = 'no' if change_pwauth: replaced_auth = False # See: man sshd_config old_lines = ssh_util.parse_ssh_config(ssh_util.DEF_SSHD_CFG) new_lines = [] i = 0 for (i, line) in enumerate(old_lines): # Keywords are case-insensitive and arguments are case-sensitive if line.key == 'passwordauthentication': log.debug("Replacing auth line %s with %s", i + 1, pw_auth) replaced_auth = True line.value = pw_auth new_lines.append(line) if not replaced_auth: log.debug("Adding new auth line %s", i + 1) replaced_auth = True new_lines.append( ssh_util.SshdConfigLine('', 'PasswordAuthentication', pw_auth)) lines = [str(e) for e in new_lines] util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines)) try: cmd = cloud.distro.init_cmd # Default service cmd.append(cloud.distro.get_option('ssh_svcname', 'ssh')) cmd.append('restart') if 'systemctl' in cmd: # Switch action ordering cmd[1], cmd[2] = cmd[2], cmd[1] cmd = filter(None, cmd) # Remove empty arguments util.subp(cmd) log.debug("Restarted the ssh daemon") except: util.logexc(log, "Restarting of the ssh daemon failed") if len(errors): log.debug("%s errors occured, re-raising the last one", len(errors)) raise errors[-1]
def down(name): util.subp(["ip", "link", "set", name, "down"], capture=True)
def modprobe_floppy(): out, _err = util.subp(CMD_PROBE_FLOPPY) LOG.debug('Command: %s\nOutput%s', ' '.join(CMD_PROBE_FLOPPY), out)
def expire_passwd(self, user): try: util.subp(['passwd', '--expire', user]) except Exception as e: util.logexc(LOG, "Failed to set 'expire' for %s", user) raise e
def up(name): util.subp(["ip", "link", "set", name, "up"], capture=True)