예제 #1
0
    def _get_data(self):
        mcfg = self.ds_cfg

        try:
            self._set_data(self.seed_dir, read_maas_seed_dir(self.seed_dir))
            return True
        except MAASSeedDirNone:
            pass
        except MAASSeedDirMalformed as exc:
            LOG.warning("%s was malformed: %s", self.seed_dir, exc)
            raise

        # If there is no metadata_url, then we're not configured
        url = mcfg.get('metadata_url', None)
        if not url:
            return False

        try:
            # doing this here actually has a side affect of
            # getting oauth time-fix in place.  As no where else would
            # retry by default, so even if we could fix the timestamp
            # we would not.
            if not self.wait_for_metadata_service(url):
                return False

            self._set_data(
                url, read_maas_seed_url(
                    url, read_file_or_url=self.oauth_helper.readurl,
                    paths=self.paths, retries=1))
            return True
        except Exception:
            util.logexc(LOG, "Failed fetching metadata from url %s", url)
            return False
예제 #2
0
def handle(name, cfg, cloud, log, _args):

    if "bootcmd" not in cfg:
        log.debug(("Skipping module named %s,"
                   " no 'bootcmd' key in configuration"), name)
        return

    validate_cloudconfig_schema(cfg, schema)
    with temp_utils.ExtendedTemporaryFile(suffix=".sh") as tmpf:
        try:
            content = util.shellify(cfg["bootcmd"])
            tmpf.write(util.encode_text(content))
            tmpf.flush()
        except Exception as e:
            util.logexc(log, "Failed to shellify bootcmd: %s", str(e))
            raise

        try:
            env = os.environ.copy()
            iid = cloud.get_instance_id()
            if iid:
                env['INSTANCE_ID'] = str(iid)
            cmd = ['/bin/sh', tmpf.name]
            util.subp(cmd, env=env, capture=False)
        except Exception:
            util.logexc(log, "Failed to run bootcmd module %s", name)
            raise
예제 #3
0
    def create_group(self, name, members=None):
        group_add_cmd = ['groupadd', name]
        if not members:
            members = []

        # Check if group exists, and then add it doesn't
        if util.is_group(name):
            LOG.warn("Skipping creation of existing group '%s'" % name)
        else:
            try:
                util.subp(group_add_cmd)
                LOG.info("Created new group %s" % name)
            except Exception:
                util.logexc(LOG, "Failed to create group %s", name)

        # Add members to the group, if so defined
        if len(members) > 0:
            for member in members:
                if not util.is_user(member):
                    LOG.warn("Unable to add group member '%s' to group '%s'"
                             "; user does not exist.", member, name)
                    continue

                util.subp(['usermod', '-a', '-G', name, member])
                LOG.info("Added user '%s' to group '%s'" % (member, name))
예제 #4
0
    def get_data_source(self):
        if self.datasource is not None:
            return True

        if self.restore_from_cache():
            log.debug("restored from cache type %s" % self.datasource)
            return True

        cfglist = self.cfg['datasource_list']
        dslist = list_sources(cfglist, self.ds_deps)
        dsnames = [f.__name__ for f in dslist]

        log.debug("searching for data source in %s" % dsnames)
        for cls in dslist:
            ds = cls.__name__
            try:
                s = cls(sys_cfg=self.cfg)
                if s.get_data():
                    self.datasource = s
                    self.datasource_name = ds
                    log.debug("found data source %s" % ds)
                    return True
            except Exception as e:
                log.warn("get_data of %s raised %s" % (ds, e))
                util.logexc(log)
        msg = "Did not find data source. searched classes: %s" % dsnames
        log.debug(msg)
        raise DataSourceNotFoundException(msg)
예제 #5
0
def recycle_srcmstr_process(log):
    try:
        out = util.subp([PIDOF, SRCMSTR])[0]
    except util.ProcessExecutionError:
        util.logexc(log, 'Failed to get PID of srcmstr process.')
        raise

    srcmstr_pid_before = int(out)
    log.debug('Recycling srcmstr process with PID of %d.' %
              srcmstr_pid_before)
    try:
        os.kill(srcmstr_pid_before, 9)
    except:
        util.logexc(log, 'Failed to kill the srcmstr process.')
        raise

    # wait for srcmstr to come back up
    start_time = time.time()
    while True:
        time.sleep(0.5)
        if time.time() - start_time >= SRCMSTR_TIMEOUT_SECONDS:
            msg = ('srcmstr process failed to come back up within %d seconds.'
                   % SRCMSTR_TIMEOUT_SECONDS)
            log.error(msg)
            raise Exception(msg)
        try:
            new_srcmstr_pid = int(util.subp([PIDOF, SRCMSTR])[0])
            log.debug('srcmstr process came back up with PID of %d.' %
                      new_srcmstr_pid)
            break
        except util.ProcessExecutionError:
            log.debug('Still waiting for srcmstr process to come '
                      'back up...')
            continue
예제 #6
0
    def wait_for_metadata_service(self, url):
        mcfg = self.ds_cfg
        max_wait = 120
        try:
            max_wait = int(mcfg.get("max_wait", max_wait))
        except Exception:
            util.logexc(LOG, "Failed to get max wait. using %s", max_wait)

        if max_wait == 0:
            return False

        timeout = 50
        try:
            if timeout in mcfg:
                timeout = int(mcfg.get("timeout", timeout))
        except Exception:
            LOG.warn("Failed to get timeout, using %s" % timeout)

        starttime = time.time()
        check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION)
        urls = [check_url]
        url = self.oauth_helper.wait_for_url(
            urls=urls, max_wait=max_wait, timeout=timeout)

        if url:
            LOG.debug("Using metadata source: '%s'", url)
        else:
            LOG.critical("Giving up on md from %s after %i seconds",
                         urls, int(time.time() - starttime))

        return bool(url)
예제 #7
0
def get_node_id(log):
    try:
        node_id = util.subp(['/usr/bin/head', '-n1', NODE_ID_FILE])[0].strip()
        return node_id
    except util.ProcessExecutionError:
        util.logexc(log, 'Failed to get node ID from file %s.' % NODE_ID_FILE)
        raise
def read_user_data_callback(mount_dir):
    '''
    Description:
        This callback will be applied by util.mount_cb() on the mounted
        file.

        Deltacloud file name contains deltacloud. Those not using
        Deltacloud but instead instrumenting the injection, could
        drop deltacloud from the file name.

    Input:
        mount_dir - Mount directory

    Returns:
        User Data

    '''

    deltacloud_user_data_file = mount_dir + '/deltacloud-user-data.txt'
    user_data_file = mount_dir + '/user-data.txt'

    # First try deltacloud_user_data_file. On failure try user_data_file.
    try:
        user_data = util.load_file(deltacloud_user_data_file).strip()
    except IOError:
        try:
            user_data = util.load_file(user_data_file).strip()
        except IOError:
            util.logexc(LOG, 'Failed accessing user data file.')
            return None

    return user_data
예제 #9
0
def is_powerkvm(log):
    try:
        out = util.subp(['cat', CPUINFO])[0]
        return QEMU_STRING in out.lower()
    except:
        util.logexc(log, 'Failed to determine if VM is running on PowerKVM.')
        raise
예제 #10
0
def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter):
    ds_list = list_sources(cfg_list, ds_deps, pkg_list)
    ds_names = [type_utils.obj_name(f) for f in ds_list]
    mode = "network" if DEP_NETWORK in ds_deps else "local"
    LOG.debug("Searching for %s data source in: %s", mode, ds_names)

    for name, cls in zip(ds_names, ds_list):
        myrep = events.ReportEventStack(
            name="search-%s" % name.replace("DataSource", ""),
            description="searching for %s data from %s" % (mode, name),
            message="no %s data found from %s" % (mode, name),
            parent=reporter)
        try:
            with myrep:
                LOG.debug("Seeing if we can get any data from %s", cls)
                s = cls(sys_cfg, distro, paths)
                if s.get_data():
                    myrep.message = "found %s data from %s" % (mode, name)
                    return (s, type_utils.obj_name(cls))
        except Exception:
            util.logexc(LOG, "Getting data from %s failed", cls)

    msg = ("Did not find any data source,"
           " searched classes: (%s)") % (", ".join(ds_names))
    raise DataSourceNotFoundException(msg)
예제 #11
0
def setup_user_keys(keys, user, key_prefix, log=None):
    import pwd
    saved_umask = os.umask(077)

    pwent = pwd.getpwnam(user)

    ssh_dir = '%s/.ssh' % pwent.pw_dir
    if not os.path.exists(ssh_dir):
        os.mkdir(ssh_dir)
        os.chown(ssh_dir, pwent.pw_uid, pwent.pw_gid)

    try:
        ssh_cfg = parse_ssh_config()
        akeys = ssh_cfg.get("AuthorizedKeysFile", "%h/.ssh/authorized_keys")
        akeys = akeys.replace("%h", pwent.pw_dir)
        akeys = akeys.replace("%u", user)
        authorized_keys = akeys
    except Exception:
        authorized_keys = '%s/.ssh/authorized_keys' % pwent.pw_dir
        if log:
            util.logexc(log)

    key_entries = []
    for k in keys:
        ke = AuthKeyEntry(k, def_opt=key_prefix)
        key_entries.append(ke)

    content = update_authorized_keys(authorized_keys, key_entries)
    util.write_file(authorized_keys, content, 0600)

    os.chown(authorized_keys, pwent.pw_uid, pwent.pw_gid)
    util.restorecon_if_possible(ssh_dir, recursive=True)

    os.umask(saved_umask)
예제 #12
0
    def get_data(self):
        """
        Metadata is the whole server context and /meta/cloud-config is used
        as userdata.
        """
        dsmode = None
        try:
            server_context = self.cepko.all().result
            server_meta = server_context['meta']
        except:
            util.logexc(LOG, "Failed reading from the serial port")
            return False

        dsmode = server_meta.get('cloudinit-dsmode', self.dsmode)
        if dsmode not in VALID_DSMODES:
            LOG.warn("Invalid dsmode %s, assuming default of 'net'", dsmode)
            dsmode = 'net'
        if dsmode == "disabled" or dsmode != self.dsmode:
            return False

        base64_fields = server_meta.get('base64_fields', '').split(',')
        self.userdata_raw = server_meta.get('cloudinit-user-data', "")
        if 'cloudinit-user-data' in base64_fields:
            self.userdata_raw = b64decode(self.userdata_raw)

        self.metadata = server_context
        self.ssh_public_key = server_meta['ssh_public_key']

        return True
def on_first_boot(data, distro=None):
    """Performs any first-boot actions using data read from a config-drive."""
    if not isinstance(data, dict):
        raise TypeError("Config-drive data expected to be a dict; not %s"
                        % (type(data)))
    networkapplied = False
    jsonnet_conf = data.get('vendordata', {}).get('network_info')
    if jsonnet_conf:
        try:
            LOG.debug("Updating network interfaces from JSON in config drive")
            distro_user_config = distro.apply_network_json(jsonnet_conf)
            networkapplied = True
        except NotImplementedError:
            LOG.debug(
                "Distro does not implement networking setup via Vendor JSON.")
            pass

    net_conf = data.get("network_config", '')
    if networkapplied is False and net_conf and distro:
        LOG.debug("Updating network interfaces from config drive")
        distro.apply_network(net_conf)
    files = data.get('files', {})
    if files:
        LOG.debug("Writing %s injected files", len(files))
        for (filename, content) in files.items():
            if not filename.startswith(os.sep):
                filename = os.sep + filename
            try:
                util.write_file(filename, content, mode=0o660)
            except IOError:
                util.logexc(LOG, "Failed writing file: %s", filename)
예제 #14
0
def run_part(mod, data, filename, payload, frequency, headers):
    mod_freq = mod.frequency
    if not (mod_freq == PER_ALWAYS or
            (frequency == PER_INSTANCE and mod_freq == PER_INSTANCE)):
        return
    # Sanity checks on version (should be an int convertable)
    try:
        mod_ver = mod.handler_version
        mod_ver = int(mod_ver)
    except (TypeError, ValueError, AttributeError):
        mod_ver = 1
    content_type = headers['Content-Type']
    try:
        LOG.debug("Calling handler %s (%s, %s, %s) with frequency %s",
                  mod, content_type, filename, mod_ver, frequency)
        if mod_ver == 3:
            # Treat as v. 3 which does get a frequency + headers
            mod.handle_part(data, content_type, filename,
                            payload, frequency, headers)
        elif mod_ver == 2:
            # Treat as v. 2 which does get a frequency
            mod.handle_part(data, content_type, filename,
                            payload, frequency)
        elif mod_ver == 1:
            # Treat as v. 1 which gets no frequency
            mod.handle_part(data, content_type, filename, payload)
        else:
            raise ValueError("Unknown module version %s" % (mod_ver))
    except:
        util.logexc(LOG, "Failed calling handler %s (%s, %s, %s) with "
                    "frequency %s", mod, content_type, filename, mod_ver,
                    frequency)
예제 #15
0
    def add_snap_user(self, name, **kwargs):
        """
        Add a snappy user to the system using snappy tools
        """

        snapuser = kwargs.get('snapuser')
        known = kwargs.get('known', False)
        adduser_cmd = ["snap", "create-user", "--sudoer", "--json"]
        if known:
            adduser_cmd.append("--known")
        adduser_cmd.append(snapuser)

        # Run the command
        LOG.debug("Adding snap user %s", name)
        try:
            (out, err) = util.subp(adduser_cmd, logstring=adduser_cmd,
                                   capture=True)
            LOG.debug("snap create-user returned: %s:%s", out, err)
            jobj = util.load_json(out)
            username = jobj.get('username', None)
        except Exception as e:
            util.logexc(LOG, "Failed to create snap user %s", name)
            raise e

        return username
예제 #16
0
    def write_sudo_rules(self, user, rules, sudo_file=None):
        if not sudo_file:
            sudo_file = self.ci_sudoers_fn

        lines = ["", "# User rules for %s" % user]
        if isinstance(rules, (list, tuple)):
            for rule in rules:
                lines.append("%s %s" % (user, rule))
        elif isinstance(rules, (basestring, str)):
            lines.append("%s %s" % (user, rules))
        else:
            msg = "Can not create sudoers rule addition with type %r"
            raise TypeError(msg % (type_utils.obj_name(rules)))
        content = "\n".join(lines)
        content += "\n"  # trailing newline

        self.ensure_sudo_dir(os.path.dirname(sudo_file))
        if not os.path.exists(sudo_file):
            contents = [util.make_header(), content]
            try:
                util.write_file(sudo_file, "\n".join(contents), 0440)
            except IOError as e:
                util.logexc(LOG, "Failed to write sudoers file %s", sudo_file)
                raise e
        else:
            try:
                util.append_file(sudo_file, content)
            except IOError as e:
                util.logexc(LOG, "Failed to append sudoers file %s", sudo_file)
                raise e
예제 #17
0
def get_instance_metadata(api_version='latest',
                          metadata_address='http://169.254.169.254',
                          ssl_details=None, timeout=5, retries=5,
                          leaf_decoder=None):
    md_url = url_helper.combine_url(metadata_address, api_version)
    # Note, 'meta-data' explicitly has trailing /.
    # this is required for CloudStack (LP: #1356855)
    md_url = url_helper.combine_url(md_url, 'meta-data/')
    caller = functools.partial(util.read_file_or_url,
                               ssl_details=ssl_details, timeout=timeout,
                               retries=retries)

    def mcaller(url):
        return caller(url).contents

    try:
        response = caller(md_url)
        materializer = MetadataMaterializer(response.contents,
                                            md_url, mcaller,
                                            leaf_decoder=leaf_decoder)
        md = materializer.materialize()
        if not isinstance(md, (dict)):
            md = {}
        return md
    except Exception:
        util.logexc(LOG, "Failed fetching metadata from url %s", md_url)
        return {}
예제 #18
0
파일: stages.py 프로젝트: brint/cloud-init
    def _run_modules(self, mostly_mods):
        cc = self.init.cloudify()
        # Return which ones ran
        # and which ones failed + the exception of why it failed
        failures = []
        which_ran = []
        for (mod, name, freq, args) in mostly_mods:
            try:
                # Try the modules frequency, otherwise fallback to a known one
                if not freq:
                    freq = mod.frequency
                if freq not in FREQUENCIES:
                    freq = PER_INSTANCE
                LOG.debug("Running module %s (%s) with frequency %s",
                          name, mod, freq)

                # Use the configs logger and not our own
                # TODO(harlowja): possibly check the module
                # for having a LOG attr and just give it back
                # its own logger?
                func_args = [name, self.cfg,
                             cc, config.LOG, args]
                # Mark it as having started running
                which_ran.append(name)
                # This name will affect the semaphore name created
                run_name = "config-%s" % (name)
                cc.run(run_name, mod.handle, func_args, freq=freq)
            except Exception as e:
                util.logexc(LOG, "Running module %s (%s) failed", name, mod)
                failures.append((name, e))
        return (which_ran, failures)
예제 #19
0
    def user_data_rhevm(self):
        '''
        RHEVM specific userdata read

         If on RHEV-M the user data will be contained on the
         floppy device in file <user_data_file>
         To access it:
           modprobe floppy

           Leverage util.mount_cb to:
               mkdir <tmp mount dir>
               mount /dev/fd0 <tmp mount dir>
               The call back passed to util.mount_cb will do:
                   read <tmp mount dir>/<user_data_file>
        '''

        return_str = None

        # modprobe floppy
        try:
            cmd = CMD_PROBE_FLOPPY
            (cmd_out, _err) = util.subp(cmd)
            LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out))
        except ProcessExecutionError, _err:
            util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
                        _err.message)
            return False
    def get_data(self):
        mcfg = self.ds_cfg

        try:
            (userdata, metadata) = read_maas_seed_dir(self.seeddir)
            self.userdata_raw = userdata
            self.metadata = metadata
            self.baseurl = self.seeddir
            return True
        except MAASSeedDirNone:
            pass
        except MAASSeedDirMalformed as exc:
            log.warn("%s was malformed: %s\n" % (self.seeddir, exc))
            raise

        try:
            # if there is no metadata_url, then we're not configured
            url = mcfg.get('metadata_url', None)
            if url == None:
                return False

            if not self.wait_for_metadata_service(url):
                return False

            self.baseurl = url

            (userdata, metadata) = read_maas_seed_url(self.baseurl,
                self.md_headers)
            self.userdata_raw = userdata
            self.metadata = metadata
            return True
        except Exception:
            util.logexc(log)
            return False
예제 #21
0
    def crawl_metadata(self):
        """Crawl metadata service when available.

        @returns: Dictionary of crawled metadata content containing the keys:
          meta-data, user-data and dynamic.
        """
        if not self.wait_for_metadata_service():
            return {}
        api_version = self.get_metadata_api_version()
        crawled_metadata = {}
        try:
            crawled_metadata['user-data'] = ec2.get_instance_userdata(
                api_version, self.metadata_address)
            crawled_metadata['meta-data'] = ec2.get_instance_metadata(
                api_version, self.metadata_address)
            if self.cloud_name == CloudNames.AWS:
                identity = ec2.get_instance_identity(
                    api_version, self.metadata_address)
                crawled_metadata['dynamic'] = {'instance-identity': identity}
        except Exception:
            util.logexc(
                LOG, "Failed reading from metadata address %s",
                self.metadata_address)
            return {}
        crawled_metadata['_metadata_api_version'] = api_version
        return crawled_metadata
    def wait_for_metadata_service(self, url):
        mcfg = self.ds_cfg

        max_wait = 120
        try:
            max_wait = int(mcfg.get("max_wait", max_wait))
        except Exception:
            util.logexc(log)
            log.warn("Failed to get max wait. using %s" % max_wait)

        if max_wait == 0:
            return False

        timeout = 50
        try:
            timeout = int(mcfg.get("timeout", timeout))
        except Exception:
            util.logexc(log)
            log.warn("Failed to get timeout, using %s" % timeout)

        starttime = time.time()
        check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION)
        url = util.wait_for_url(urls=[check_url], max_wait=max_wait,
            timeout=timeout, status_cb=log.warn,
            headers_cb=self.md_headers)

        if url:
            log.debug("Using metadata source: '%s'" % url)
        else:
            log.critical("giving up on md after %i seconds\n" %
                         int(time.time() - starttime))

        return (bool(url))
예제 #23
0
    def apply_locale(self, locale, out_fn=None):
        # Adjust the locals value to the new value
        newconf = StringIO()
        for line in util.load_file(self.login_conf_fn).splitlines():
            newconf.write(re.sub(r'^default:',
                                 r'default:lang=%s:' % locale, line))
            newconf.write("\n")

        # Make a backup of login.conf.
        util.copy(self.login_conf_fn, self.login_conf_fn_bak)

        # And write the new login.conf.
        util.write_file(self.login_conf_fn, newconf.getvalue())

        try:
            LOG.debug("Running cap_mkdb for %s", locale)
            util.subp(['cap_mkdb', self.login_conf_fn])
        except util.ProcessExecutionError:
            # cap_mkdb failed, so restore the backup.
            util.logexc(LOG, "Failed to apply locale %s", locale)
            try:
                util.copy(self.login_conf_fn_bak, self.login_conf_fn)
            except IOError:
                util.logexc(LOG, "Failed to restore %s backup",
                            self.login_conf_fn)
예제 #24
0
def extract_authorized_keys(username):
    (ssh_dir, pw_ent) = users_ssh_info(username)
    auth_key_fn = None
    with util.SeLinuxGuard(ssh_dir, recursive=True):
        try:
            # The 'AuthorizedKeysFile' may contain tokens
            # of the form %T which are substituted during connection set-up.
            # The following tokens are defined: %% is replaced by a literal
            # '%', %h is replaced by the home directory of the user being
            # authenticated and %u is replaced by the username of that user.
            ssh_cfg = parse_ssh_config_map(DEF_SSHD_CFG)
            auth_key_fn = ssh_cfg.get("authorizedkeysfile", '').strip()
            if not auth_key_fn:
                auth_key_fn = "%h/.ssh/authorized_keys"
            auth_key_fn = auth_key_fn.replace("%h", pw_ent.pw_dir)
            auth_key_fn = auth_key_fn.replace("%u", username)
            auth_key_fn = auth_key_fn.replace("%%", '%')
            if not auth_key_fn.startswith('/'):
                auth_key_fn = os.path.join(pw_ent.pw_dir, auth_key_fn)
        except (IOError, OSError):
            # Give up and use a default key filename
            auth_key_fn = os.path.join(ssh_dir, 'authorized_keys')
            util.logexc(LOG, "Failed extracting 'AuthorizedKeysFile' in ssh "
                        "config from %r, using 'AuthorizedKeysFile' file "
                        "%r instead", DEF_SSHD_CFG, auth_key_fn)
    return (auth_key_fn, parse_authorized_keys(auth_key_fn))
    def user_data_vsphere(self):
        '''
        vSphere specific userdata read

        If on vSphere the user data will be contained on the
        cdrom device in file <user_data_file>
        To access it:
           Leverage util.mount_cb to:
               mkdir <tmp mount dir>
               mount /dev/fd0 <tmp mount dir>
               The call back passed to util.mount_cb will do:
                   read <tmp mount dir>/<user_data_file>
        '''

        return_str = None
        cdrom_list = util.find_devs_with('LABEL=CDROM')
        for cdrom_dev in cdrom_list:
            try:
                return_str = util.mount_cb(cdrom_dev, read_user_data_callback)
                if return_str:
                    break
            except OSError as err:
                if err.errno != errno.ENOENT:
                    raise
            except util.MountFailedError:
                util.logexc(LOG, "Failed to mount %s when looking for user "
                            "data", cdrom_dev)

        self.userdata_raw = return_str
        self.metadata = META_DATA_NOT_SUPPORTED

        if return_str:
            return True
        else:
            return False
예제 #26
0
def handle(name, cfg, cloud, log, _args):
    if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
        log.debug(("Configuration option 'preserve_hostname' is set,"
                   " not setting the hostname in module %s"), name)
        return
    (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
    # Check for previous successful invocation of set-hostname

    # set-hostname artifact file accounts for both hostname and fqdn
    # deltas. As such, it's format is different than cc_update_hostname's
    # previous-hostname file which only contains the base hostname.
    # TODO consolidate previous-hostname and set-hostname artifact files and
    # distro._read_hostname implementation so we only validate  one artifact.
    prev_fn = os.path.join(cloud.get_cpath('data'), "set-hostname")
    prev_hostname = {}
    if os.path.exists(prev_fn):
        prev_hostname = util.load_json(util.load_file(prev_fn))
    hostname_changed = (hostname != prev_hostname.get('hostname') or
                        fqdn != prev_hostname.get('fqdn'))
    if not hostname_changed:
        log.debug('No hostname changes. Skipping set-hostname')
        return
    log.debug("Setting the hostname to %s (%s)", fqdn, hostname)
    try:
        cloud.distro.set_hostname(hostname, fqdn)
    except Exception as e:
        msg = "Failed to set the hostname to %s (%s)" % (fqdn, hostname)
        util.logexc(log, msg)
        raise SetHostnameError("%s: %s" % (msg, e))
    write_json(prev_fn, {'hostname': hostname, 'fqdn': fqdn})
예제 #27
0
    def get_data(self):
        mcfg = self.ds_cfg

        try:
            (userdata, metadata) = read_maas_seed_dir(self.seed_dir)
            self.userdata_raw = userdata
            self.metadata = metadata
            self.base_url = self.seed_dir
            return True
        except MAASSeedDirNone:
            pass
        except MAASSeedDirMalformed as exc:
            LOG.warn("%s was malformed: %s" % (self.seed_dir, exc))
            raise

        # If there is no metadata_url, then we're not configured
        url = mcfg.get('metadata_url', None)
        if not url:
            return False

        try:
            if not self.wait_for_metadata_service(url):
                return False

            self.base_url = url

            (userdata, metadata) = read_maas_seed_url(self.base_url,
                                                      self._md_headers,
                                                      paths=self.paths)
            self.userdata_raw = userdata
            self.metadata = metadata
            return True
        except Exception:
            util.logexc(LOG, "Failed fetching metadata from url %s", url)
            return False
예제 #28
0
def handle(name, _cfg, _cloud, log, _args):
    required_tools = [RMCCTRL, RECFGCT]
    for tool in required_tools:
        if not os.path.isfile(tool):
            log.debug('%s is not found but is required, therefore not '
                      'attempting to reset RMC.' % tool)
            return

    log.debug('Attempting to reset RMC.')
    system_info = util.system_info()

    node_id_before = get_node_id(log)
    log.debug('Node ID at beginning of module: %s' % node_id_before)

    # Stop the RMC subsystem and all resource managers so that we can make
    # some changes to it
    try:
        util.subp([RMCCTRL, '-z'])
    except:
        util.logexc(log, 'Failed to stop the RMC subsystem.')
        raise

    if 'linux' in system_info['platform'].lower():
        recycle_srcmstr_process(log)

    reconfigure_rsct_subsystems(log)

    node_id_after = get_node_id(log)
    log.debug('Node ID at end of module: %s' % node_id_after)

    if node_id_after == node_id_before:
        msg = 'New node ID did not get generated.'
        log.error(msg)
        raise Exception(msg)
예제 #29
0
    def resize(self, diskdev, partnum, partdev):
        """
        GPT disks store metadata at the beginning (primary) and at the
        end (secondary) of the disk. When launching an image with a
        larger disk compared to the original image, the secondary copy
        is lost. Thus, the metadata will be marked CORRUPT, and need to
        be recovered.
        """
        try:
            util.subp(["gpart", "recover", diskdev])
        except util.ProcessExecutionError as e:
            if e.exit_code != 0:
                util.logexc(LOG, "Failed: gpart recover %s", diskdev)
                raise ResizeFailedException(e)

        before = get_size(partdev)
        try:
            util.subp(["gpart", "resize", "-i", partnum, diskdev])
        except util.ProcessExecutionError as e:
            util.logexc(LOG, "Failed: gpart resize -i %s %s", partnum, diskdev)
            raise ResizeFailedException(e)

        # Since growing the FS requires a reboot, make sure we reboot
        # first when this module has finished.
        open('/var/run/reboot-required', 'a').close()

        return (before, get_size(partdev))
예제 #30
0
def run_commands(commands):
    """Run the provided commands provided in snap:commands configuration.

     Commands are run individually. Any errors are collected and reported
     after attempting all commands.

     @param commands: A list or dict containing commands to run. Keys of a
         dict will be used to order the commands provided as dict values.
     """
    if not commands:
        return
    LOG.debug('Running user-provided snap commands')
    if isinstance(commands, dict):
        # Sort commands based on dictionary key
        commands = [v for _, v in sorted(commands.items())]
    elif not isinstance(commands, list):
        raise TypeError(
            'commands parameter was not a list or dict: {commands}'.format(
                commands=commands))

    fixed_snap_commands = prepend_base_command('snap', commands)

    cmd_failures = []
    for command in fixed_snap_commands:
        shell = isinstance(command, str)
        try:
            util.subp(command, shell=shell, status_cb=sys.stderr.write)
        except util.ProcessExecutionError as e:
            cmd_failures.append(str(e))
    if cmd_failures:
        msg = 'Failures running snap commands:\n{cmd_failures}'.format(
            cmd_failures=cmd_failures)
        util.logexc(LOG, msg)
        raise RuntimeError(msg)
예제 #31
0
def update_resolve_conf_file(fn, dns_servers, search_servers):
    try:
        r_conf = ResolvConf(util.load_file(fn))
        r_conf.parse()
    except IOError:
        util.logexc(LOG, "Failed at parsing %s reverting to an empty "
                    "instance", fn)
        r_conf = ResolvConf('')
        r_conf.parse()
    if dns_servers:
        for s in dns_servers:
            try:
                r_conf.add_nameserver(s)
            except ValueError:
                util.logexc(LOG, "Failed at adding nameserver %s", s)
    if search_servers:
        for s in search_servers:
            try:
                r_conf.add_search_domain(s)
            except ValueError:
                util.logexc(LOG, "Failed at adding search domain %s", s)
    write_resolv_conf_file(fn, r_conf)
예제 #32
0
    def apt_key_add():
        """apt-key add <file>

        returns filepath to new keyring, or '/dev/null' when an error occurs
        """
        file_name = "/dev/null"
        if not output_file:
            util.logexc(
                LOG, 'Unknown filename, failed to add key: "{}"'.format(data))
        else:
            try:
                key_dir = (CLOUD_INIT_GPG_DIR
                           if hardened else APT_TRUSTED_GPG_DIR)
                stdout = gpg.dearmor(data)
                file_name = "{}{}.gpg".format(key_dir, output_file)
                util.write_file(file_name, stdout)
            except subp.ProcessExecutionError:
                util.logexc(LOG,
                            "Gpg error, failed to add key: {}".format(data))
            except UnicodeDecodeError:
                util.logexc(LOG,
                            "Decode error, failed to add key: {}".format(data))
        return file_name
예제 #33
0
파일: bsd.py 프로젝트: ssahani/cloud-init
    def _resolve_conf(self, settings, target=None):
        nameservers = settings.dns_nameservers
        searchdomains = settings.dns_searchdomains
        for interface in settings.iter_interfaces():
            for subnet in interface.get("subnets", []):
                if 'dns_nameservers' in subnet:
                    nameservers.extend(subnet['dns_nameservers'])
                if 'dns_search' in subnet:
                    searchdomains.extend(subnet['dns_search'])
        # Try to read the /etc/resolv.conf or just start from scratch if that
        # fails.
        try:
            resolvconf = ResolvConf(util.load_file(subp.target_path(
                target, self.resolv_conf_fn)))
            resolvconf.parse()
        except IOError:
            util.logexc(LOG, "Failed to parse %s, use new empty file",
                        subp.target_path(target, self.resolv_conf_fn))
            resolvconf = ResolvConf('')
            resolvconf.parse()

        # Add some nameservers
        for server in nameservers:
            try:
                resolvconf.add_nameserver(server)
            except ValueError:
                util.logexc(LOG, "Failed to add nameserver %s", server)

        # And add any searchdomains.
        for domain in searchdomains:
            try:
                resolvconf.add_search_domain(domain)
            except ValueError:
                util.logexc(LOG, "Failed to add search domain %s", domain)
        util.write_file(
            subp.target_path(target, self.resolv_conf_fn),
            str(resolvconf), 0o644)
예제 #34
0
    def get_data(self):
        # azure removes/ejects the cdrom containing the ovf-env.xml
        # file on reboot.  So, in order to successfully reboot we
        # need to look in the datadir and consider that valid
        ddir = self.ds_cfg['data_dir']

        candidates = [self.seed_dir]
        candidates.extend(list_possible_azure_ds_devs())
        if ddir:
            candidates.append(ddir)

        found = None

        for cdev in candidates:
            try:
                if cdev.startswith("/dev/"):
                    ret = util.mount_cb(cdev, load_azure_ds_dir)
                else:
                    ret = load_azure_ds_dir(cdev)

            except NonAzureDataSource:
                continue
            except BrokenAzureDataSource as exc:
                raise exc
            except util.MountFailedError:
                LOG.warn("%s was not mountable", cdev)
                continue

            (md, self.userdata_raw, cfg, files) = ret
            self.seed = cdev
            self.metadata = util.mergemanydict([md, IB_DEFAULT_METADATA])
            self.cfg = util.mergemanydict([cfg, IB_BUILTIN_CLOUD_CONFIG])
            found = cdev

            LOG.debug("found datasource in %s", cdev)
            break

        if not found:
            return False

        if found == ddir:
            LOG.debug("using files cached in %s", ddir)

        # azure / hyper-v provides random data here
        seed = util.load_file("/sys/firmware/acpi/tables/OEM0", quiet=True)
        if seed:
            self.metadata['random_seed'] = seed

        # now update ds_cfg to reflect contents pass in config
        user_ds_cfg = util.get_cfg_by_path(self.cfg, IB_DS_CFG_PATH, {})
        self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])
        mycfg = self.ds_cfg
        ddir = mycfg['data_dir']

        if found != ddir:
            cached_ovfenv = util.load_file(os.path.join(ddir, 'ovf-env.xml'),
                                           quiet=True)
            if cached_ovfenv != files['ovf-env.xml']:
                # source was not walinux-agent's datadir, so we have to clean
                # up so 'wait_for_files' doesn't return early due to stale data
                cleaned = []
                for f in [
                        os.path.join(ddir, f) for f in IB_DATA_DIR_CLEAN_LIST
                ]:
                    if os.path.exists(f):
                        util.del_file(f)
                        cleaned.append(f)
                if cleaned:
                    LOG.info("removed stale file(s) in '%s': %s", ddir,
                             str(cleaned))

        # walinux agent writes files world readable, but expects
        # the directory to be protected.
        write_files(ddir, files, dirmode=0700)

        # handle the hostname 'publishing'
        try:
            handle_set_hostname(mycfg.get('set_hostname'),
                                self.metadata.get('local-hostname'),
                                mycfg['hostname_bounce'])
        except Exception as e:
            LOG.warn("Failed publishing hostname: %s", e)
            util.logexc(LOG, "handling set_hostname failed")

        try:
            invoke_agent(mycfg['agent_command'])
        except util.ProcessExecutionError:
            # claim the datasource even if the command failed
            util.logexc(LOG, "agent command '%s' failed.",
                        mycfg['agent_command'])

        # code is commented out because we have no WAAgent and
        #   we do not use SSH keys. Thus, no files to wait for.

        # shcfgxml = os.path.join(ddir, "SharedConfig.xml")
        # wait_for = [shcfgxml]

        # fp_files = []
        # for pk in self.cfg.get('_pubkeys', []):
        #     bname = str(pk['fingerprint'] + ".crt")
        #     fp_files += [os.path.join(ddir, bname)]

        # missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
        #                         func=wait_for_files,
        #                         args=(wait_for + fp_files,))
        # if len(missing):
        #     LOG.warn("Did not find files, but going on: %s", missing)

        # if shcfgxml in missing:
        #     LOG.warn("SharedConfig.xml missing, using static instance-id")
        # else:
        #     try:
        #         self.metadata['instance-id'] = \
        #             iid_from_shared_config(shcfgxml)
        #     except ValueError as e:
        #         LOG.warn("failed to get instance id in %s: %s", shcfgxml, e)

        # pubkeys = pubkeys_from_crt_files(fp_files)
        # self.metadata['public-keys'] = pubkeys

        found_ephemeral = find_ephemeral_disk()
        if found_ephemeral:
            self.ds_cfg['disk_aliases']['ephemeral0'] = found_ephemeral
            LOG.debug("using detected ephemeral0 of %s", found_ephemeral)

        cc_modules_override = support_new_ephemeral(self.sys_cfg)
        if cc_modules_override:
            self.cfg['cloud_config_modules'] = cc_modules_override

        return True
예제 #35
0
def handle(name, cfg, cloud, log, args):
    # Get config
    lxd_cfg = cfg.get('lxd')
    if not lxd_cfg:
        log.debug("Skipping module named %s, not present or disabled by cfg")
        return
    if not isinstance(lxd_cfg, dict):
        log.warn("lxd config must be a dictionary. found a '%s'",
                 type(lxd_cfg))
        return

    # Grab the configuration
    init_cfg = lxd_cfg.get('init')
    if not isinstance(init_cfg, dict):
        log.warn("lxd/init config must be a dictionary. found a '%s'",
                 type(init_cfg))
        init_cfg = {}

    bridge_cfg = lxd_cfg.get('bridge')
    if not isinstance(bridge_cfg, dict):
        log.warn("lxd/bridge config must be a dictionary. found a '%s'",
                 type(bridge_cfg))
        bridge_cfg = {}

    # Install the needed packages
    packages = []
    if not util.which("lxd"):
        packages.append('lxd')

    if init_cfg.get("storage_backend") == "zfs" and not util.which('zfs'):
        packages.append('zfs')

    if len(packages):
        try:
            cloud.distro.install_packages(packages)
        except util.ProcessExecutionError as exc:
            log.warn("failed to install packages %s: %s", packages, exc)
            return

    # Set up lxd if init config is given
    if init_cfg:
        init_keys = ('network_address', 'network_port', 'storage_backend',
                     'storage_create_device', 'storage_create_loop',
                     'storage_pool', 'trust_password')
        cmd = ['lxd', 'init', '--auto']
        for k in init_keys:
            if init_cfg.get(k):
                cmd.extend(
                    ["--%s=%s" % (k.replace('_', '-'), str(init_cfg[k]))])
        util.subp(cmd)

    # Set up lxd-bridge if bridge config is given
    dconf_comm = "debconf-communicate"
    if bridge_cfg and util.which(dconf_comm):
        debconf = bridge_to_debconf(bridge_cfg)

        # Update debconf database
        try:
            log.debug("Setting lxd debconf via " + dconf_comm)
            data = "\n".join(
                ["set %s %s" % (k, v) for k, v in debconf.items()]) + "\n"
            util.subp(['debconf-communicate'], data)
        except Exception:
            util.logexc(log, "Failed to run '%s' for lxd with" % dconf_comm)

        # Remove the existing configuration file (forces re-generation)
        util.del_file("/etc/default/lxd-bridge")

        # Run reconfigure
        log.debug("Running dpkg-reconfigure for lxd")
        util.subp(['dpkg-reconfigure', 'lxd', '--frontend=noninteractive'])
    elif bridge_cfg:
        raise RuntimeError("Unable to configure lxd bridge without %s." +
                           dconf_comm)
예제 #36
0
 def unlock_passwd(self, name):
     try:
         util.subp(['usermod', '-C', 'no', name])
     except Exception:
         util.logexc(LOG, "Failed to unlock user %s", name)
         raise
예제 #37
0
 def clear_all(self):
     try:
         util.del_dir(self.sem_path)
     except (IOError, OSError):
         util.logexc(LOG, "Failed deleting semaphore directory %s",
                     self.sem_path)
예제 #38
0
def main_single(name, args):
    # Cloud-init single stage is broken up into the following sub-stages
    # 1. Ensure that the init object fetches its config without errors
    # 2. Attempt to fetch the datasource (warn if it doesn't work)
    # 3. Construct the modules object
    # 4. Adjust any subsequent logging/output redirections using
    #    the modules objects configuration
    # 5. Run the single module
    # 6. Done!
    mod_name = args.name
    w_msg = welcome_format(name)
    init = stages.Init(ds_deps=[], reporter=args.reporter)
    # Stage 1
    init.read_cfg(extract_fns(args))
    # Stage 2
    try:
        init.fetch(existing="trust")
    except sources.DataSourceNotFoundException:
        # There was no datasource found,
        # that might be bad (or ok) depending on
        # the module being ran (so continue on)
        util.logexc(LOG, ("Failed to fetch your datasource,"
                          " likely bad things to come!"))
        print_exc(("Failed to fetch your datasource,"
                   " likely bad things to come!"))
        if not args.force:
            return 1
    # Stage 3
    mods = stages.Modules(init, extract_fns(args), reporter=args.reporter)
    mod_args = args.module_args
    if mod_args:
        LOG.debug("Using passed in arguments %s", mod_args)
    mod_freq = args.frequency
    if mod_freq:
        LOG.debug("Using passed in frequency %s", mod_freq)
        mod_freq = FREQ_SHORT_NAMES.get(mod_freq)
    # Stage 4
    try:
        LOG.debug("Closing stdin")
        util.close_stdin()
        util.fixup_output(mods.cfg, None)
    except Exception:
        util.logexc(LOG, "Failed to setup output redirection!")
    if args.debug:
        # Reset so that all the debug handlers are closed out
        LOG.debug(("Logging being reset, this logger may no"
                   " longer be active shortly"))
        logging.resetLogging()
    logging.setupLogging(mods.cfg)
    apply_reporting_cfg(init.cfg)

    # now that logging is setup and stdout redirected, send welcome
    welcome(name, msg=w_msg)

    # Stage 5
    (which_ran, failures) = mods.run_single(mod_name, mod_args, mod_freq)
    if failures:
        LOG.warn("Ran %s but it failed!", mod_name)
        return 1
    elif not which_ran:
        LOG.warn("Did not run %s, does it exist?", mod_name)
        return 1
    else:
        # Guess it worked
        return 0
예제 #39
0
def handle(_name, cfg, cloud, log, _args):

    # remove the static keys from the pristine image
    if cfg.get("ssh_deletekeys", True):
        key_pth = os.path.join("/etc/ssh/", "ssh_host_*key*")
        for f in glob.glob(key_pth):
            try:
                util.del_file(f)
            except:
                util.logexc(log, "Failed deleting key file %s", f)

    if "ssh_keys" in cfg:
        # if there are keys in cloud-config, use them
        for (key, val) in cfg["ssh_keys"].items():
            if key in KEY_2_FILE:
                tgt_fn = KEY_2_FILE[key][0]
                tgt_perms = KEY_2_FILE[key][1]
                util.write_file(tgt_fn, val, tgt_perms)

        for (priv, pub) in PRIV_2_PUB.items():
            if pub in cfg['ssh_keys'] or priv not in cfg['ssh_keys']:
                continue
            pair = (KEY_2_FILE[priv][0], KEY_2_FILE[pub][0])
            cmd = ['sh', '-xc', KEY_GEN_TPL % pair]
            try:
                # TODO(harlowja): Is this guard needed?
                with util.SeLinuxGuard("/etc/ssh", recursive=True):
                    util.subp(cmd, capture=False)
                log.debug("Generated a key for %s from %s", pair[0], pair[1])
            except:
                util.logexc(log, "Failed generated a key for %s from %s",
                            pair[0], pair[1])
    else:
        # if not, generate them
        genkeys = util.get_cfg_option_list(cfg,
                                           'ssh_genkeytypes',
                                           GENERATE_KEY_NAMES)
        for keytype in genkeys:
            keyfile = KEY_FILE_TPL % (keytype)
            util.ensure_dir(os.path.dirname(keyfile))
            if not os.path.exists(keyfile):
                cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile]
                try:
                    # TODO(harlowja): Is this guard needed?
                    with util.SeLinuxGuard("/etc/ssh", recursive=True):
                        util.subp(cmd, capture=False)
                except:
                    util.logexc(log, "Failed generating key type %s to "
                                "file %s", keytype, keyfile)

    try:
        (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
        (user, _user_config) = ds.extract_default(users)
        disable_root = util.get_cfg_option_bool(cfg, "disable_root", True)
        disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts",
                                                    DISABLE_ROOT_OPTS)

        keys = cloud.get_public_ssh_keys() or []
        if "ssh_authorized_keys" in cfg:
            cfgkeys = cfg["ssh_authorized_keys"]
            keys.extend(cfgkeys)

        apply_credentials(keys, user, disable_root, disable_root_opts)
    except:
        util.logexc(log, "Applying ssh credentials failed!")
예제 #40
0
 def lock_passwd(self, name):
     try:
         subp.subp(["pw", "usermod", name, "-h", "-"])
     except Exception:
         util.logexc(LOG, "Failed to lock user %s", name)
         raise
예제 #41
0
 def force_passwd_change(self, user):
     try:
         util.subp(['usermod', '-F', user])
     except Exception:
         util.logexc(LOG, "Failed to set pw expiration for %s", user)
         raise
예제 #42
0
    def _get_data(self):
        found = None
        md = {}
        results = {}
        for sdir in (self.seed_dir, "/config-drive"):
            if not os.path.isdir(sdir):
                continue
            try:
                results = read_config_drive(sdir)
                found = sdir
                break
            except openstack.NonReadable:
                util.logexc(LOG, "Failed reading config drive from %s", sdir)

        if not found:
            dslist = self.sys_cfg.get('datasource_list')
            for dev in find_candidate_devs(dslist=dslist):
                try:
                    # Set mtype if freebsd and turn off sync
                    if dev.startswith("/dev/cd"):
                        mtype = "cd9660"
                        sync = False
                    else:
                        mtype = None
                        sync = True
                    results = util.mount_cb(dev,
                                            read_config_drive,
                                            mtype=mtype,
                                            sync=sync)
                    found = dev
                except openstack.NonReadable:
                    pass
                except util.MountFailedError:
                    pass
                except openstack.BrokenMetadata:
                    util.logexc(LOG, "Broken config drive: %s", dev)
                if found:
                    break
        if not found:
            return False

        md = results.get('metadata', {})
        md = util.mergemanydict([md, DEFAULT_METADATA])

        self.dsmode = self._determine_dsmode([
            results.get('dsmode'),
            self.ds_cfg.get('dsmode'),
            sources.DSMODE_PASS if results['version'] == 1 else None
        ])

        if self.dsmode == sources.DSMODE_DISABLED:
            return False

        prev_iid = get_previous_iid(self.paths)
        cur_iid = md['instance-id']
        if prev_iid != cur_iid:
            # better would be to handle this centrally, allowing
            # the datasource to do something on new instance id
            # note, networking is only rendered here if dsmode is DSMODE_PASS
            # which means "DISABLED, but render files and networking"
            on_first_boot(results,
                          distro=self.distro,
                          network=self.dsmode == sources.DSMODE_PASS)

        # This is legacy and sneaky.  If dsmode is 'pass' then do not claim
        # the datasource was used, even though we did run on_first_boot above.
        if self.dsmode == sources.DSMODE_PASS:
            LOG.debug("%s: not claiming datasource, dsmode=%s", self,
                      self.dsmode)
            return False

        self.source = found
        self.metadata = md
        self.ec2_metadata = results.get('ec2-metadata')
        self.userdata_raw = results.get('userdata')
        self.version = results['version']
        self.files.update(results.get('files', {}))

        vd = results.get('vendordata')
        self.vendordata_pure = vd
        try:
            self.vendordata_raw = sources.convert_vendordata(vd)
        except ValueError as e:
            LOG.warning("Invalid content in vendor-data: %s", e)
            self.vendordata_raw = None

        # network_config is an /etc/network/interfaces formated file and is
        # obsolete compared to networkdata (from network_data.json) but both
        # might be present.
        self.network_eni = results.get("network_config")
        self.network_json = results.get('networkdata')
        return True
예제 #43
0
def handle(name, cfg, cloud, log, args):
    # Get config
    lxd_cfg = cfg.get("lxd")
    if not lxd_cfg:
        log.debug(
            "Skipping module named %s, not present or disabled by cfg", name
        )
        return
    if not isinstance(lxd_cfg, dict):
        log.warning(
            "lxd config must be a dictionary. found a '%s'", type(lxd_cfg)
        )
        return

    # Grab the configuration
    init_cfg = lxd_cfg.get("init")
    if not isinstance(init_cfg, dict):
        log.warning(
            "lxd/init config must be a dictionary. found a '%s'",
            type(init_cfg),
        )
        init_cfg = {}

    bridge_cfg = lxd_cfg.get("bridge", {})
    if not isinstance(bridge_cfg, dict):
        log.warning(
            "lxd/bridge config must be a dictionary. found a '%s'",
            type(bridge_cfg),
        )
        bridge_cfg = {}

    # Install the needed packages
    packages = []
    if not subp.which("lxd"):
        packages.append("lxd")

    if init_cfg.get("storage_backend") == "zfs" and not subp.which("zfs"):
        packages.append("zfsutils-linux")

    if len(packages):
        try:
            cloud.distro.install_packages(packages)
        except subp.ProcessExecutionError as exc:
            log.warning("failed to install packages %s: %s", packages, exc)
            return

    # Set up lxd if init config is given
    if init_cfg:
        init_keys = (
            "network_address",
            "network_port",
            "storage_backend",
            "storage_create_device",
            "storage_create_loop",
            "storage_pool",
            "trust_password",
        )
        subp.subp(["lxd", "waitready", "--timeout=300"])
        cmd = ["lxd", "init", "--auto"]
        for k in init_keys:
            if init_cfg.get(k):
                cmd.extend(
                    ["--%s=%s" % (k.replace("_", "-"), str(init_cfg[k]))]
                )
        subp.subp(cmd)

    # Set up lxd-bridge if bridge config is given
    dconf_comm = "debconf-communicate"
    if bridge_cfg:
        net_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME)
        if os.path.exists("/etc/default/lxd-bridge") and subp.which(
            dconf_comm
        ):
            # Bridge configured through packaging

            debconf = bridge_to_debconf(bridge_cfg)

            # Update debconf database
            try:
                log.debug("Setting lxd debconf via " + dconf_comm)
                data = (
                    "\n".join(
                        ["set %s %s" % (k, v) for k, v in debconf.items()]
                    )
                    + "\n"
                )
                subp.subp(["debconf-communicate"], data)
            except Exception:
                util.logexc(
                    log, "Failed to run '%s' for lxd with" % dconf_comm
                )

            # Remove the existing configuration file (forces re-generation)
            util.del_file("/etc/default/lxd-bridge")

            # Run reconfigure
            log.debug("Running dpkg-reconfigure for lxd")
            subp.subp(["dpkg-reconfigure", "lxd", "--frontend=noninteractive"])
        else:
            # Built-in LXD bridge support
            cmd_create, cmd_attach = bridge_to_cmd(bridge_cfg)
            maybe_cleanup_default(
                net_name=net_name,
                did_init=bool(init_cfg),
                create=bool(cmd_create),
                attach=bool(cmd_attach),
            )
            if cmd_create:
                log.debug("Creating lxd bridge: %s" % " ".join(cmd_create))
                _lxc(cmd_create)

            if cmd_attach:
                log.debug(
                    "Setting up default lxd bridge: %s" % " ".join(cmd_attach)
                )
                _lxc(cmd_attach)

    elif bridge_cfg:
        raise RuntimeError(
            "Unable to configure lxd bridge without %s." + dconf_comm
        )
예제 #44
0
    def get_data(self):
        found = None
        meta_data = {}
        results = {}

        # read from the seed directory
        if os.path.isdir(self.seed_dir):
            try:
                results = read_config_drive(self.seed_dir)
                found = self.seed_dir
            except openstack.NonReadable:
                found = None
                util.logexc(LOG, "Failed reading config drive from %s",
                            self.seed_dir)
        else:
            found = None
            util.logexc(LOG, "The seed directory %s not exists", self.seed_dir)

        if not found:
            # read from the opt device directory
            if os.path.isdir(self.opt_dir):
                try:
                    results = read_config_drive(self.opt_dir)
                    found = self.opt_dir
                except openstack.NonReadable:
                    found = None
                    util.logexc(LOG, "Failed reading config drive from %s",
                                self.opt_dir)
            else:
                util.logexc(LOG, "The opt device directory %s not exists",
                            self.opt_dir)

        if not found:
            # read from the opt device backup directory
            if os.path.isdir(self.backup_dir):
                try:
                    util.logexc(LOG, "reading config drive from %s",
                                self.backup_dir)
                    results = read_config_drive(self.backup_dir)
                    found = self.backup_dir
                except openstack.NonReadable:
                    found = None
                    util.logexc(LOG, "Failed reading config drive from %s",
                                self.backup_dir)
            else:
                util.logexc(LOG,
                            "The opt device backup directory %s not exists",
                            self.backup_dir)
                found = None

        if not found:
            return False

        meta_data = results.get('metadata', {})
        meta_data = util.mergemanydict([meta_data, DEFAULT_METADATA])
        user_dsmode = results.get('dsmode', None)
        if user_dsmode not in VALID_DSMODES + (None, ):
            LOG.warning("User specified invalid mode: %s", user_dsmode)
            user_dsmode = None

        dsmode = get_ds_mode(cfgdrv_ver=results['version'],
                             ds_cfg=self.ds_cfg.get('dsmode'),
                             user=user_dsmode)

        if dsmode == "disabled":
            # most likely user specified
            return False
        LOG.debug("DataSourceConfigDrive.py get_data results=%s", results)
        # we want to do some things (writing files and network config)
        # only on first boot, and even then, we want to do so in the
        # local datasource (so they happen earlier) even if the configured
        # dsmode is 'net' or 'pass'. To do this, we check the previous
        # instance-id
        prev_iid = get_previous_iid(self.paths)
        LOG.debug("DataSourceConfigDrive.py get_data prev_iid=%s", prev_iid)
        cur_iid = meta_data['instance-id']
        LOG.debug("DataSourceConfigDrive.py get_data cur_iid=%s", cur_iid)
        LOG.debug("DataSourceConfigDrive.py get_data dsmode=%s,self.dsmode=%s",
                  dsmode, self.dsmode)

        # Comment out the below line
        if prev_iid != cur_iid and self.dsmode == "local":
            on_first_boot(results, distro=self.distro)
        # dsmode != self.dsmode here if:
        #  * dsmode = "pass",  pass means it should only copy files and then
        #    pass to another datasource
        #  * dsmode = "net" and self.dsmode = "local"
        #    so that user boothooks would be applied with network, the
        #    local datasource just gets out of the way, and lets the net claim
        if dsmode != self.dsmode:
            LOG.debug("%s: not claiming datasource, dsmode=%s", self, dsmode)
            return False

        self.source = found
        self.metadata = meta_data
        self.ec2_metadata = results.get('ec2-metadata')
        self.userdata_raw = results.get('userdata')
        self.version = results['version']
        self.files.update(results.get('files', {}))
        self.vendordata_raw = results.get('vendordata')
        return True
예제 #45
0
    def _get_data(self):
        defaults = {
            "instance-id": "nocloud",
            "dsmode": self.dsmode,
        }

        found = []
        mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': "",
                  'network-config': None}

        try:
            # Parse the system serial label from dmi. If not empty, try parsing
            # like the commandline
            md = {}
            serial = util.read_dmi_data('system-serial-number')
            if serial and load_cmdline_data(md, serial):
                found.append("dmi")
                mydata = _merge_new_seed(mydata, {'meta-data': md})
        except Exception:
            util.logexc(LOG, "Unable to parse dmi data")
            return False

        try:
            # Parse the kernel command line, getting data passed in
            md = {}
            if load_cmdline_data(md):
                found.append("cmdline")
                mydata = _merge_new_seed(mydata, {'meta-data': md})
        except Exception:
            util.logexc(LOG, "Unable to parse command line data")
            return False

        # Check to see if the seed dir has data.
        pp2d_kwargs = {'required': ['user-data', 'meta-data'],
                       'optional': ['vendor-data', 'network-config']}

        for path in self.seed_dirs:
            try:
                seeded = util.pathprefix2dict(path, **pp2d_kwargs)
                found.append(path)
                LOG.debug("Using seeded data from %s", path)
                mydata = _merge_new_seed(mydata, seeded)
                break
            except ValueError:
                pass

        # If the datasource config had a 'seedfrom' entry, then that takes
        # precedence over a 'seedfrom' that was found in a filesystem
        # but not over external media
        if self.ds_cfg.get('seedfrom'):
            found.append("ds_config_seedfrom")
            mydata['meta-data']["seedfrom"] = self.ds_cfg['seedfrom']

        # fields appropriately named can also just come from the datasource
        # config (ie, 'user-data', 'meta-data', 'vendor-data' there)
        if 'user-data' in self.ds_cfg and 'meta-data' in self.ds_cfg:
            mydata = _merge_new_seed(mydata, self.ds_cfg)
            found.append("ds_config")

        def _pp2d_callback(mp, data):
            return util.pathprefix2dict(mp, **data)

        label = self.ds_cfg.get('fs_label', "cidata")
        if label is not None:
            for dev in self._get_devices(label):
                try:
                    LOG.debug("Attempting to use data from %s", dev)

                    try:
                        seeded = util.mount_cb(dev, _pp2d_callback,
                                               pp2d_kwargs)
                    except ValueError:
                        LOG.warning("device %s with label=%s not a"
                                    "valid seed.", dev, label)
                        continue

                    mydata = _merge_new_seed(mydata, seeded)

                    LOG.debug("Using data from %s", dev)
                    found.append(dev)
                    break
                except OSError as e:
                    if e.errno != errno.ENOENT:
                        raise
                except util.MountFailedError:
                    util.logexc(LOG, "Failed to mount %s when looking for "
                                "data", dev)

        # There was no indication on kernel cmdline or data
        # in the seeddir suggesting this handler should be used.
        if len(found) == 0:
            return False

        # The special argument "seedfrom" indicates we should
        # attempt to seed the userdata / metadata from its value
        # its primarily value is in allowing the user to type less
        # on the command line, ie: ds=nocloud;s=http://bit.ly/abcdefg
        if "seedfrom" in mydata['meta-data']:
            seedfrom = mydata['meta-data']["seedfrom"]
            seedfound = False
            for proto in self.supported_seed_starts:
                if seedfrom.startswith(proto):
                    seedfound = proto
                    break
            if not seedfound:
                LOG.debug("Seed from %s not supported by %s", seedfrom, self)
                return False

            # This could throw errors, but the user told us to do it
            # so if errors are raised, let them raise
            (md_seed, ud) = util.read_seeded(seedfrom, timeout=None)
            LOG.debug("Using seeded cache data from %s", seedfrom)

            # Values in the command line override those from the seed
            mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
                                                      md_seed])
            mydata['user-data'] = ud
            found.append(seedfrom)

        # Now that we have exhausted any other places merge in the defaults
        mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
                                                  defaults])

        self.dsmode = self._determine_dsmode(
            [mydata['meta-data'].get('dsmode')])

        if self.dsmode == sources.DSMODE_DISABLED:
            LOG.debug("%s: not claiming datasource, dsmode=%s", self,
                      self.dsmode)
            return False

        self.seed = ",".join(found)
        self.metadata = mydata['meta-data']
        self.userdata_raw = mydata['user-data']
        self.vendordata_raw = mydata['vendor-data']
        self._network_config = mydata['network-config']
        self._network_eni = mydata['meta-data'].get('network-interfaces')
        return True
예제 #46
0
 def expire_passwd(self, user):
     try:
         subp.subp(["pw", "usermod", user, "-p", "01-Jan-1970"])
     except Exception:
         util.logexc(LOG, "Failed to set pw expiration for %s", user)
         raise
예제 #47
0
def handle(name, cfg, cloud, log, args):
    if len(args) != 0:
        ph_cfg = util.read_conf(args[0])
    else:
        if "phone_home" not in cfg:
            log.debug(
                "Skipping module named %s, "
                "no 'phone_home' configuration found",
                name,
            )
            return
        ph_cfg = cfg["phone_home"]

    if "url" not in ph_cfg:
        log.warning(
            "Skipping module named %s, "
            "no 'url' found in 'phone_home' configuration",
            name,
        )
        return

    url = ph_cfg["url"]
    post_list = ph_cfg.get("post", "all")
    tries = ph_cfg.get("tries")
    try:
        tries = int(tries)  # type: ignore
    except ValueError:
        tries = 10
        util.logexc(
            log,
            "Configuration entry 'tries' is not an integer, using %s instead",
            tries,
        )

    if post_list == "all":
        post_list = POST_LIST_ALL

    all_keys = {}
    all_keys["instance_id"] = cloud.get_instance_id()
    all_keys["hostname"] = cloud.get_hostname()
    all_keys["fqdn"] = cloud.get_hostname(fqdn=True)

    pubkeys = {
        "pub_key_dsa": "/etc/ssh/ssh_host_dsa_key.pub",
        "pub_key_rsa": "/etc/ssh/ssh_host_rsa_key.pub",
        "pub_key_ecdsa": "/etc/ssh/ssh_host_ecdsa_key.pub",
        "pub_key_ed25519": "/etc/ssh/ssh_host_ed25519_key.pub",
    }

    for (n, path) in pubkeys.items():
        try:
            all_keys[n] = util.load_file(path)
        except Exception:
            util.logexc(log,
                        "%s: failed to open, can not phone home that data!",
                        path)

    submit_keys = {}
    for k in post_list:
        if k in all_keys:
            submit_keys[k] = all_keys[k]
        else:
            submit_keys[k] = None
            log.warning(
                "Requested key %s from 'post'"
                " configuration list not available",
                k,
            )

    # Get them read to be posted
    real_submit_keys = {}
    for (k, v) in submit_keys.items():
        if v is None:
            real_submit_keys[k] = "N/A"
        else:
            real_submit_keys[k] = str(v)

    # Incase the url is parameterized
    url_params = {
        "INSTANCE_ID": all_keys["instance_id"],
    }
    url = templater.render_string(url, url_params)
    try:
        url_helper.read_file_or_url(
            url,
            data=real_submit_keys,
            retries=tries,
            sec_between=3,
            ssl_details=util.fetch_ssl_details(cloud.paths),
        )
    except Exception:
        util.logexc(log, "Failed to post phone home data to %s in %s tries",
                    url, tries)
예제 #48
0
def do_resize(resize_cmd, log):
    try:
        subp.subp(resize_cmd)
    except subp.ProcessExecutionError:
        util.logexc(log, "Failed to resize filesystem (cmd=%s)", resize_cmd)
        raise
예제 #49
0
    def user_data_rhevm(self):
        '''
        RHEVM specific userdata read

         If on RHEV-M the user data will be contained on the
         floppy device in file <user_data_file>
         To access it:
           modprobe floppy

           Leverage util.mount_cb to:
               mkdir <tmp mount dir>
               mount /dev/fd0 <tmp mount dir>
               The call back passed to util.mount_cb will do:
                   read <tmp mount dir>/<user_data_file>
        '''

        return_str = None

        # modprobe floppy
        try:
            cmd = CMD_PROBE_FLOPPY
            (cmd_out, _err) = util.subp(cmd)
            LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out)
        except ProcessExecutionError as _err:
            util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
            return False
        except OSError as _err:
            util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
            return False

        floppy_dev = '/dev/fd0'

        # udevadm settle for floppy device
        try:
            cmd = CMD_UDEVADM_SETTLE
            cmd.append('--exit-if-exists=' + floppy_dev)
            (cmd_out, _err) = util.subp(cmd)
            LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out)
        except ProcessExecutionError as _err:
            util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
            return False
        except OSError as _err:
            util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
            return False

        try:
            return_str = util.mount_cb(floppy_dev, read_user_data_callback)
        except OSError as err:
            if err.errno != errno.ENOENT:
                raise
        except util.MountFailedError:
            util.logexc(LOG, "Failed to mount %s when looking for user data",
                        floppy_dev)

        self.userdata_raw = return_str
        self.metadata = META_DATA_NOT_SUPPORTED

        if return_str:
            return True
        else:
            return False
예제 #50
0
    def update_hostname(self, hostname, fqdn, prev_hostname_fn):
        applying_hostname = hostname

        # Determine what the actual written hostname should be
        hostname = self._select_hostname(hostname, fqdn)

        # If the previous hostname file exists lets see if we
        # can get a hostname from it
        if prev_hostname_fn and os.path.exists(prev_hostname_fn):
            prev_hostname = self._read_hostname(prev_hostname_fn)
        else:
            prev_hostname = None

        # Lets get where we should write the system hostname
        # and what the system hostname is
        (sys_fn, sys_hostname) = self._read_system_hostname()
        update_files = []

        # If there is no previous hostname or it differs
        # from what we want, lets update it or create the
        # file in the first place
        if not prev_hostname or prev_hostname != hostname:
            update_files.append(prev_hostname_fn)

        # If the system hostname is different than the previous
        # one or the desired one lets update it as well
        if (not sys_hostname) or (
            sys_hostname == prev_hostname and sys_hostname != hostname
        ):
            update_files.append(sys_fn)

        # If something else has changed the hostname after we set it
        # initially, we should not overwrite those changes (we should
        # only be setting the hostname once per instance)
        if sys_hostname and prev_hostname and sys_hostname != prev_hostname:
            LOG.info(
                "%s differs from %s, assuming user maintained hostname.",
                prev_hostname_fn,
                sys_fn,
            )
            return

        # Remove duplicates (incase the previous config filename)
        # is the same as the system config filename, don't bother
        # doing it twice
        update_files = set([f for f in update_files if f])
        LOG.debug(
            "Attempting to update hostname to %s in %s files",
            hostname,
            len(update_files),
        )

        for fn in update_files:
            try:
                self._write_hostname(hostname, fn)
            except IOError:
                util.logexc(
                    LOG, "Failed to write hostname %s to %s", hostname, fn
                )

        # If the system hostname file name was provided set the
        # non-fqdn as the transient hostname.
        if sys_fn in update_files:
            self._apply_hostname(applying_hostname)
예제 #51
0
def main_init(name, args):
    deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK]
    if args.local:
        deps = [sources.DEP_FILESYSTEM]

    if not args.local:
        # See doc/kernel-cmdline.txt
        #
        # This is used in maas datasource, in "ephemeral" (read-only root)
        # environment where the instance netboots to iscsi ro root.
        # and the entity that controls the pxe config has to configure
        # the maas datasource.
        #
        # Could be used elsewhere, only works on network based (not local).
        root_name = "%s.d" % (CLOUD_CONFIG)
        target_fn = os.path.join(root_name, "91_kernel_cmdline_url.cfg")
        util.read_write_cmdline_url(target_fn)

    # Cloud-init 'init' stage is broken up into the following sub-stages
    # 1. Ensure that the init object fetches its config without errors
    # 2. Setup logging/output redirections with resultant config (if any)
    # 3. Initialize the cloud-init filesystem
    # 4. Check if we can stop early by looking for various files
    # 5. Fetch the datasource
    # 6. Connect to the current instance location + update the cache
    # 7. Consume the userdata (handlers get activated here)
    # 8. Construct the modules object
    # 9. Adjust any subsequent logging/output redirections using the modules
    #    objects config as it may be different from init object
    # 10. Run the modules for the 'init' stage
    # 11. Done!
    if not args.local:
        w_msg = welcome_format(name)
    else:
        w_msg = welcome_format("%s-local" % (name))
    init = stages.Init(ds_deps=deps, reporter=args.reporter)
    # Stage 1
    init.read_cfg(extract_fns(args))
    # Stage 2
    outfmt = None
    errfmt = None
    try:
        LOG.debug("Closing stdin")
        util.close_stdin()
        (outfmt, errfmt) = util.fixup_output(init.cfg, name)
    except Exception:
        util.logexc(LOG, "Failed to setup output redirection!")
        print_exc("Failed to setup output redirection!")
    if args.debug:
        # Reset so that all the debug handlers are closed out
        LOG.debug(("Logging being reset, this logger may no"
                   " longer be active shortly"))
        logging.resetLogging()
    logging.setupLogging(init.cfg)
    apply_reporting_cfg(init.cfg)

    # Any log usage prior to setupLogging above did not have local user log
    # config applied.  We send the welcome message now, as stderr/out have
    # been redirected and log now configured.
    welcome(name, msg=w_msg)

    # Stage 3
    try:
        init.initialize()
    except Exception:
        util.logexc(LOG, "Failed to initialize, likely bad things to come!")
    # Stage 4
    path_helper = init.paths
    mode = sources.DSMODE_LOCAL if args.local else sources.DSMODE_NETWORK

    if mode == sources.DSMODE_NETWORK:
        existing = "trust"
        sys.stderr.write("%s\n" % (netinfo.debug_info()))
        LOG.debug(("Checking to see if files that we need already"
                   " exist from a previous run that would allow us"
                   " to stop early."))
        # no-net is written by upstart cloud-init-nonet when network failed
        # to come up
        stop_files = [
            os.path.join(path_helper.get_cpath("data"), "no-net"),
        ]
        existing_files = []
        for fn in stop_files:
            if os.path.isfile(fn):
                existing_files.append(fn)

        if existing_files:
            LOG.debug("[%s] Exiting. stop file %s existed", mode,
                      existing_files)
            return (None, [])
        else:
            LOG.debug("Execution continuing, no previous run detected that"
                      " would allow us to stop early.")
    else:
        existing = "check"
        if util.get_cfg_option_bool(init.cfg, 'manual_cache_clean', False):
            existing = "trust"

        init.purge_cache()
        # Delete the non-net file as well
        util.del_file(os.path.join(path_helper.get_cpath("data"), "no-net"))

    # Stage 5
    try:
        init.fetch(existing=existing)
        # if in network mode, and the datasource is local
        # then work was done at that stage.
        if mode == sources.DSMODE_NETWORK and init.datasource.dsmode != mode:
            LOG.debug("[%s] Exiting. datasource %s in local mode", mode,
                      init.datasource)
            return (None, [])
    except sources.DataSourceNotFoundException:
        # In the case of 'cloud-init init' without '--local' it is a bit
        # more likely that the user would consider it failure if nothing was
        # found. When using upstart it will also mentions job failure
        # in console log if exit code is != 0.
        if mode == sources.DSMODE_LOCAL:
            LOG.debug("No local datasource found")
        else:
            util.logexc(LOG, ("No instance datasource found!"
                              " Likely bad things to come!"))
        if not args.force:
            init.apply_network_config(bring_up=not args.local)
            LOG.debug("[%s] Exiting without datasource in local mode", mode)
            if mode == sources.DSMODE_LOCAL:
                return (None, [])
            else:
                return (None, ["No instance datasource found."])
        else:
            LOG.debug("[%s] barreling on in force mode without datasource",
                      mode)

    # Stage 6
    iid = init.instancify()
    LOG.debug("[%s] %s will now be targeting instance id: %s. new=%s", mode,
              name, iid, init.is_new_instance())

    init.apply_network_config(bring_up=bool(mode != sources.DSMODE_LOCAL))

    if mode == sources.DSMODE_LOCAL:
        if init.datasource.dsmode != mode:
            LOG.debug("[%s] Exiting. datasource %s not in local mode.", mode,
                      init.datasource)
            return (init.datasource, [])
        else:
            LOG.debug("[%s] %s is in local mode, will apply init modules now.",
                      mode, init.datasource)

    # update fully realizes user-data (pulling in #include if necessary)
    init.update()
    # Stage 7
    try:
        # Attempt to consume the data per instance.
        # This may run user-data handlers and/or perform
        # url downloads and such as needed.
        (ran, _results) = init.cloudify().run('consume_data',
                                              init.consume_data,
                                              args=[PER_INSTANCE],
                                              freq=PER_INSTANCE)
        if not ran:
            # Just consume anything that is set to run per-always
            # if nothing ran in the per-instance code
            #
            # See: https://bugs.launchpad.net/bugs/819507 for a little
            # reason behind this...
            init.consume_data(PER_ALWAYS)
    except Exception:
        util.logexc(LOG, "Consuming user data failed!")
        return (init.datasource, ["Consuming user data failed!"])

    apply_reporting_cfg(init.cfg)

    # Stage 8 - re-read and apply relevant cloud-config to include user-data
    mods = stages.Modules(init, extract_fns(args), reporter=args.reporter)
    # Stage 9
    try:
        outfmt_orig = outfmt
        errfmt_orig = errfmt
        (outfmt, errfmt) = util.get_output_cfg(mods.cfg, name)
        if outfmt_orig != outfmt or errfmt_orig != errfmt:
            LOG.warn("Stdout, stderr changing to (%s, %s)", outfmt, errfmt)
            (outfmt, errfmt) = util.fixup_output(mods.cfg, name)
    except Exception:
        util.logexc(LOG, "Failed to re-adjust output redirection!")
    logging.setupLogging(mods.cfg)

    # Stage 10
    return (init.datasource, run_module_section(mods, name, name))
예제 #52
0
    def add_user(self, name, **kwargs):
        """
        Add a user to the system using standard GNU tools
        """
        # XXX need to make add_user idempotent somehow as we
        # still want to add groups or modify SSH keys on pre-existing
        # users in the image.
        if util.is_user(name):
            LOG.info("User %s already exists, skipping.", name)
            return

        if 'create_groups' in kwargs:
            create_groups = kwargs.pop('create_groups')
        else:
            create_groups = True

        useradd_cmd = ['useradd', name]
        log_useradd_cmd = ['useradd', name]
        if util.system_is_snappy():
            useradd_cmd.append('--extrausers')
            log_useradd_cmd.append('--extrausers')

        # Since we are creating users, we want to carefully validate the
        # inputs. If something goes wrong, we can end up with a system
        # that nobody can login to.
        useradd_opts = {
            "gecos": '--comment',
            "homedir": '--home',
            "primary_group": '--gid',
            "uid": '--uid',
            "groups": '--groups',
            "passwd": '--password',
            "shell": '--shell',
            "expiredate": '--expiredate',
            "inactive": '--inactive',
            "selinux_user": '******',
        }

        useradd_flags = {
            "no_user_group": '--no-user-group',
            "system": '--system',
            "no_log_init": '--no-log-init',
        }

        redact_opts = ['passwd']

        # support kwargs having groups=[list] or groups="g1,g2"
        groups = kwargs.get('groups')
        if groups:
            if isinstance(groups, str):
                groups = groups.split(",")

            # remove any white spaces in group names, most likely
            # that came in as a string like: groups: group1, group2
            groups = [g.strip() for g in groups]

            # kwargs.items loop below wants a comma delimeted string
            # that can go right through to the command.
            kwargs['groups'] = ",".join(groups)

            primary_group = kwargs.get('primary_group')
            if primary_group:
                groups.append(primary_group)

        if create_groups and groups:
            for group in groups:
                if not util.is_group(group):
                    self.create_group(group)
                    LOG.debug("created group '%s' for user '%s'", group, name)

        # Check the values and create the command
        for key, val in sorted(kwargs.items()):

            if key in useradd_opts and val and isinstance(val, str):
                useradd_cmd.extend([useradd_opts[key], val])

                # Redact certain fields from the logs
                if key in redact_opts:
                    log_useradd_cmd.extend([useradd_opts[key], 'REDACTED'])
                else:
                    log_useradd_cmd.extend([useradd_opts[key], val])

            elif key in useradd_flags and val:
                useradd_cmd.append(useradd_flags[key])
                log_useradd_cmd.append(useradd_flags[key])

        # Don't create the home directory if directed so or if the user is a
        # system user
        if kwargs.get('no_create_home') or kwargs.get('system'):
            useradd_cmd.append('-M')
            log_useradd_cmd.append('-M')
        else:
            useradd_cmd.append('-m')
            log_useradd_cmd.append('-m')

        # Run the command
        LOG.debug("Adding user %s", name)
        try:
            subp.subp(useradd_cmd, logstring=log_useradd_cmd)
        except Exception as e:
            util.logexc(LOG, "Failed to create user %s", name)
            raise e
예제 #53
0
def status_wrapper(name, args, data_d=None, link_d=None):
    if data_d is None:
        data_d = os.path.normpath("/var/lib/cloud/data")
    if link_d is None:
        link_d = os.path.normpath("/run/cloud-init")

    status_path = os.path.join(data_d, "status.json")
    status_link = os.path.join(link_d, "status.json")
    result_path = os.path.join(data_d, "result.json")
    result_link = os.path.join(link_d, "result.json")

    util.ensure_dirs((
        data_d,
        link_d,
    ))

    (_name, functor) = args.action

    if name == "init":
        if args.local:
            mode = "init-local"
        else:
            mode = "init"
    elif name == "modules":
        mode = "modules-%s" % args.mode
    else:
        raise ValueError("unknown name: %s" % name)

    modes = ('init', 'init-local', 'modules-config', 'modules-final')

    status = None
    if mode == 'init-local':
        for f in (status_link, result_link, status_path, result_path):
            util.del_file(f)
    else:
        try:
            status = json.loads(util.load_file(status_path))
        except Exception:
            pass

    if status is None:
        nullstatus = {
            'errors': [],
            'start': None,
            'finished': None,
        }
        status = {'v1': {}}
        for m in modes:
            status['v1'][m] = nullstatus.copy()
        status['v1']['datasource'] = None

    v1 = status['v1']
    v1['stage'] = mode
    v1[mode]['start'] = time.time()

    atomic_helper.write_json(status_path, status)
    util.sym_link(os.path.relpath(status_path, link_d),
                  status_link,
                  force=True)

    try:
        ret = functor(name, args)
        if mode in ('init', 'init-local'):
            (datasource, errors) = ret
            if datasource is not None:
                v1['datasource'] = str(datasource)
        else:
            errors = ret

        v1[mode]['errors'] = [str(e) for e in errors]

    except Exception as e:
        util.logexc(LOG, "failed stage %s", mode)
        print_exc("failed run of stage %s" % mode)
        v1[mode]['errors'] = [str(e)]

    v1[mode]['finished'] = time.time()
    v1['stage'] = None

    atomic_helper.write_json(status_path, status)

    if mode == "modules-final":
        # write the 'finished' file
        errors = []
        for m in modes:
            if v1[m]['errors']:
                errors.extend(v1[m].get('errors', []))

        atomic_helper.write_json(
            result_path,
            {'v1': {
                'datasource': v1['datasource'],
                'errors': errors
            }})
        util.sym_link(os.path.relpath(result_path, link_d),
                      result_link,
                      force=True)

    return len(v1[mode]['errors'])
예제 #54
0
 def expire_passwd(self, user):
     try:
         subp.subp(['passwd', '--expire', user])
     except Exception as e:
         util.logexc(LOG, "Failed to set 'expire' for %s", user)
         raise e
예제 #55
0
 def lock_passwd(self, name):
     try:
         util.subp(['pw', 'usermod', name, '-h', '-'])
     except Exception as e:
         util.logexc(LOG, "Failed to lock user %s", name)
         raise e
예제 #56
0
def handle(_name, cfg, cloud, log, _args):

    # remove the static keys from the pristine image
    if cfg.get("ssh_deletekeys", True):
        key_pth = os.path.join("/etc/ssh/", "ssh_host_*key*")
        for f in glob.glob(key_pth):
            try:
                util.del_file(f)
            except Exception:
                util.logexc(log, "Failed deleting key file %s", f)

    if "ssh_keys" in cfg:
        # if there are keys in cloud-config, use them
        for (key, val) in cfg["ssh_keys"].items():
            if key in CONFIG_KEY_TO_FILE:
                tgt_fn = CONFIG_KEY_TO_FILE[key][0]
                tgt_perms = CONFIG_KEY_TO_FILE[key][1]
                util.write_file(tgt_fn, val, tgt_perms)

        for (priv, pub) in PRIV_TO_PUB.items():
            if pub in cfg['ssh_keys'] or priv not in cfg['ssh_keys']:
                continue
            pair = (CONFIG_KEY_TO_FILE[priv][0], CONFIG_KEY_TO_FILE[pub][0])
            cmd = ['sh', '-xc', KEY_GEN_TPL % pair]
            try:
                # TODO(harlowja): Is this guard needed?
                with util.SeLinuxGuard("/etc/ssh", recursive=True):
                    subp.subp(cmd, capture=False)
                log.debug("Generated a key for %s from %s", pair[0], pair[1])
            except Exception:
                util.logexc(log, "Failed generated a key for %s from %s",
                            pair[0], pair[1])
    else:
        # if not, generate them
        genkeys = util.get_cfg_option_list(cfg, 'ssh_genkeytypes',
                                           GENERATE_KEY_NAMES)
        lang_c = os.environ.copy()
        lang_c['LANG'] = 'C'
        for keytype in genkeys:
            keyfile = KEY_FILE_TPL % (keytype)
            if os.path.exists(keyfile):
                continue
            util.ensure_dir(os.path.dirname(keyfile))
            cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile]

            # TODO(harlowja): Is this guard needed?
            with util.SeLinuxGuard("/etc/ssh", recursive=True):
                try:
                    out, err = subp.subp(cmd, capture=True, env=lang_c)
                    sys.stdout.write(util.decode_binary(out))
                except subp.ProcessExecutionError as e:
                    err = util.decode_binary(e.stderr).lower()
                    if (e.exit_code == 1
                            and err.lower().startswith("unknown key")):
                        log.debug("ssh-keygen: unknown key type '%s'", keytype)
                    else:
                        util.logexc(
                            log, "Failed generating key type %s to "
                            "file %s", keytype, keyfile)

    if "ssh_publish_hostkeys" in cfg:
        host_key_blacklist = util.get_cfg_option_list(
            cfg["ssh_publish_hostkeys"], "blacklist",
            HOST_KEY_PUBLISH_BLACKLIST)
        publish_hostkeys = util.get_cfg_option_bool(
            cfg["ssh_publish_hostkeys"], "enabled", PUBLISH_HOST_KEYS)
    else:
        host_key_blacklist = HOST_KEY_PUBLISH_BLACKLIST
        publish_hostkeys = PUBLISH_HOST_KEYS

    if publish_hostkeys:
        hostkeys = get_public_host_keys(blacklist=host_key_blacklist)
        try:
            cloud.datasource.publish_host_keys(hostkeys)
        except Exception:
            util.logexc(log, "Publishing host keys failed!")

    try:
        (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
        (user, _user_config) = ug_util.extract_default(users)
        disable_root = util.get_cfg_option_bool(cfg, "disable_root", True)
        disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts",
                                                    ssh_util.DISABLE_USER_OPTS)

        keys = []
        if util.get_cfg_option_bool(cfg, 'allow_public_ssh_keys', True):
            keys = cloud.get_public_ssh_keys() or []
        else:
            log.debug('Skipping import of publish SSH keys per '
                      'config setting: allow_public_ssh_keys=False')

        if "ssh_authorized_keys" in cfg:
            cfgkeys = cfg["ssh_authorized_keys"]
            keys.extend(cfgkeys)

        apply_credentials(keys, user, disable_root, disable_root_opts)
    except Exception:
        util.logexc(log, "Applying SSH credentials failed!")
    def get_data(self):
        found = None
        md = {}
        results = {}
        if os.path.isdir(self.seed_dir):
            try:
                results = read_config_drive(self.seed_dir)
                found = self.seed_dir
            except openstack.NonReadable:
                util.logexc(LOG, "Failed reading config drive from %s",
                            self.seed_dir)
        if not found:
            for dev in find_candidate_devs():
                try:
                    results = util.mount_cb(dev, read_config_drive)
                    found = dev
                except openstack.NonReadable:
                    pass
                except util.MountFailedError:
                    pass
                except openstack.BrokenMetadata:
                    util.logexc(LOG, "Broken config drive: %s", dev)
                if found:
                    break
        if not found:
            return False

        md = results.get('metadata', {})
        md = util.mergemanydict([md, DEFAULT_METADATA])
        user_dsmode = results.get('dsmode', None)
        if user_dsmode not in VALID_DSMODES + (None,):
            LOG.warn("User specified invalid mode: %s", user_dsmode)
            user_dsmode = None

        dsmode = get_ds_mode(cfgdrv_ver=results['version'],
                             ds_cfg=self.ds_cfg.get('dsmode'),
                             user=user_dsmode)

        if dsmode == "disabled":
            # most likely user specified
            return False

        # TODO(smoser): fix this, its dirty.
        # we want to do some things (writing files and network config)
        # only on first boot, and even then, we want to do so in the
        # local datasource (so they happen earlier) even if the configured
        # dsmode is 'net' or 'pass'. To do this, we check the previous
        # instance-id
        prev_iid = get_previous_iid(self.paths)
        cur_iid = md['instance-id']
        if prev_iid != cur_iid and self.dsmode == "local":
            on_first_boot(results, distro=self.distro)

        # dsmode != self.dsmode here if:
        #  * dsmode = "pass",  pass means it should only copy files and then
        #    pass to another datasource
        #  * dsmode = "net" and self.dsmode = "local"
        #    so that user boothooks would be applied with network, the
        #    local datasource just gets out of the way, and lets the net claim
        if dsmode != self.dsmode:
            LOG.debug("%s: not claiming datasource, dsmode=%s", self, dsmode)
            return False

        self.source = found
        self.metadata = md
        self.ec2_metadata = results.get('ec2-metadata')
        self.userdata_raw = results.get('userdata')
        self.version = results['version']
        self.files.update(results.get('files', {}))

        vd = results.get('vendordata')
        self.vendordata_pure = vd
        try:
            self.vendordata_raw = openstack.convert_vendordata_json(vd)
        except ValueError as e:
            LOG.warn("Invalid content in vendor-data: %s", e)
            self.vendordata_raw = None

        return True
예제 #58
0
파일: main.py 프로젝트: delphix/cloud-init
def main_init(name, args):
    deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK]
    if args.local:
        deps = [sources.DEP_FILESYSTEM]

    early_logs = [
        attempt_cmdline_url(
            path=os.path.join(
                "%s.d" % CLOUD_CONFIG, "91_kernel_cmdline_url.cfg"
            ),
            network=not args.local,
        )
    ]

    # Cloud-init 'init' stage is broken up into the following sub-stages
    # 1. Ensure that the init object fetches its config without errors
    # 2. Setup logging/output redirections with resultant config (if any)
    # 3. Initialize the cloud-init filesystem
    # 4. Check if we can stop early by looking for various files
    # 5. Fetch the datasource
    # 6. Connect to the current instance location + update the cache
    # 7. Consume the userdata (handlers get activated here)
    # 8. Construct the modules object
    # 9. Adjust any subsequent logging/output redirections using the modules
    #    objects config as it may be different from init object
    # 10. Run the modules for the 'init' stage
    # 11. Done!
    if not args.local:
        w_msg = welcome_format(name)
    else:
        w_msg = welcome_format("%s-local" % (name))
    init = stages.Init(ds_deps=deps, reporter=args.reporter)
    # Stage 1
    init.read_cfg(extract_fns(args))
    # Stage 2
    outfmt = None
    errfmt = None
    try:
        early_logs.append((logging.DEBUG, "Closing stdin."))
        util.close_stdin()
        (outfmt, errfmt) = util.fixup_output(init.cfg, name)
    except Exception:
        msg = "Failed to setup output redirection!"
        util.logexc(LOG, msg)
        print_exc(msg)
        early_logs.append((logging.WARN, msg))
    if args.debug:
        # Reset so that all the debug handlers are closed out
        LOG.debug(
            "Logging being reset, this logger may no longer be active shortly"
        )
        logging.resetLogging()
    logging.setupLogging(init.cfg)
    apply_reporting_cfg(init.cfg)

    # Any log usage prior to setupLogging above did not have local user log
    # config applied.  We send the welcome message now, as stderr/out have
    # been redirected and log now configured.
    welcome(name, msg=w_msg)

    # re-play early log messages before logging was setup
    for lvl, msg in early_logs:
        LOG.log(lvl, msg)

    # Stage 3
    try:
        init.initialize()
    except Exception:
        util.logexc(LOG, "Failed to initialize, likely bad things to come!")
    # Stage 4
    path_helper = init.paths
    purge_cache_on_python_version_change(init)
    mode = sources.DSMODE_LOCAL if args.local else sources.DSMODE_NETWORK

    if mode == sources.DSMODE_NETWORK:
        existing = "trust"
        sys.stderr.write("%s\n" % (netinfo.debug_info()))
    else:
        existing = "check"
        mcfg = util.get_cfg_option_bool(init.cfg, "manual_cache_clean", False)
        if mcfg:
            LOG.debug("manual cache clean set from config")
            existing = "trust"
        else:
            mfile = path_helper.get_ipath_cur("manual_clean_marker")
            if os.path.exists(mfile):
                LOG.debug("manual cache clean found from marker: %s", mfile)
                existing = "trust"

        init.purge_cache()

    # Stage 5
    bring_up_interfaces = _should_bring_up_interfaces(init, args)
    try:
        init.fetch(existing=existing)
        # if in network mode, and the datasource is local
        # then work was done at that stage.
        if mode == sources.DSMODE_NETWORK and init.datasource.dsmode != mode:
            LOG.debug(
                "[%s] Exiting. datasource %s in local mode",
                mode,
                init.datasource,
            )
            return (None, [])
    except sources.DataSourceNotFoundException:
        # In the case of 'cloud-init init' without '--local' it is a bit
        # more likely that the user would consider it failure if nothing was
        # found.
        if mode == sources.DSMODE_LOCAL:
            LOG.debug("No local datasource found")
        else:
            util.logexc(
                LOG, "No instance datasource found! Likely bad things to come!"
            )
        if not args.force:
            init.apply_network_config(bring_up=bring_up_interfaces)
            LOG.debug("[%s] Exiting without datasource", mode)
            if mode == sources.DSMODE_LOCAL:
                return (None, [])
            else:
                return (None, ["No instance datasource found."])
        else:
            LOG.debug(
                "[%s] barreling on in force mode without datasource", mode
            )

    _maybe_persist_instance_data(init)
    # Stage 6
    iid = init.instancify()
    LOG.debug(
        "[%s] %s will now be targeting instance id: %s. new=%s",
        mode,
        name,
        iid,
        init.is_new_instance(),
    )

    if mode == sources.DSMODE_LOCAL:
        # Before network comes up, set any configured hostname to allow
        # dhcp clients to advertize this hostname to any DDNS services
        # LP: #1746455.
        _maybe_set_hostname(init, stage="local", retry_stage="network")
    init.apply_network_config(bring_up=bring_up_interfaces)

    if mode == sources.DSMODE_LOCAL:
        if init.datasource.dsmode != mode:
            LOG.debug(
                "[%s] Exiting. datasource %s not in local mode.",
                mode,
                init.datasource,
            )
            return (init.datasource, [])
        else:
            LOG.debug(
                "[%s] %s is in local mode, will apply init modules now.",
                mode,
                init.datasource,
            )

    # Give the datasource a chance to use network resources.
    # This is used on Azure to communicate with the fabric over network.
    init.setup_datasource()
    # update fully realizes user-data (pulling in #include if necessary)
    init.update()
    _maybe_set_hostname(init, stage="init-net", retry_stage="modules:config")
    # Stage 7
    try:
        # Attempt to consume the data per instance.
        # This may run user-data handlers and/or perform
        # url downloads and such as needed.
        (ran, _results) = init.cloudify().run(
            "consume_data",
            init.consume_data,
            args=[PER_INSTANCE],
            freq=PER_INSTANCE,
        )
        if not ran:
            # Just consume anything that is set to run per-always
            # if nothing ran in the per-instance code
            #
            # See: https://bugs.launchpad.net/bugs/819507 for a little
            # reason behind this...
            init.consume_data(PER_ALWAYS)
    except Exception:
        util.logexc(LOG, "Consuming user data failed!")
        return (init.datasource, ["Consuming user data failed!"])

    # Validate user-data adheres to schema definition
    if os.path.exists(init.paths.get_ipath_cur("userdata_raw")):
        validate_cloudconfig_schema(
            config=init.cfg, strict=False, log_details=False
        )
    else:
        LOG.debug("Skipping user-data validation. No user-data found.")

    apply_reporting_cfg(init.cfg)

    # Stage 8 - re-read and apply relevant cloud-config to include user-data
    mods = Modules(init, extract_fns(args), reporter=args.reporter)
    # Stage 9
    try:
        outfmt_orig = outfmt
        errfmt_orig = errfmt
        (outfmt, errfmt) = util.get_output_cfg(mods.cfg, name)
        if outfmt_orig != outfmt or errfmt_orig != errfmt:
            LOG.warning("Stdout, stderr changing to (%s, %s)", outfmt, errfmt)
            (outfmt, errfmt) = util.fixup_output(mods.cfg, name)
    except Exception:
        util.logexc(LOG, "Failed to re-adjust output redirection!")
    logging.setupLogging(mods.cfg)

    # give the activated datasource a chance to adjust
    init.activate_datasource()

    di_report_warn(datasource=init.datasource, cfg=init.cfg)

    # Stage 10
    return (init.datasource, run_module_section(mods, name, name))
예제 #59
0
def handle(_name, cfg, cloud, log, _args):
    # fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno
    def_mnt_opts = "defaults,nobootwait"
    uses_systemd = cloud.distro.uses_systemd()
    if uses_systemd:
        def_mnt_opts = "defaults,nofail,x-systemd.requires=cloud-init.service"

    defvals = [None, None, "auto", def_mnt_opts, "0", "2"]
    defvals = cfg.get("mount_default_fields", defvals)

    # these are our default set of mounts
    defmnts = [["ephemeral0", "/mnt", "auto", defvals[3], "0", "2"],
               ["swap", "none", "swap", "sw", "0", "0"]]

    cfgmnt = []
    if "mounts" in cfg:
        cfgmnt = cfg["mounts"]

    LOG.debug("mounts configuration is %s", cfgmnt)

    fstab_lines = []
    fstab_devs = {}
    fstab_removed = []

    for line in util.load_file(FSTAB_PATH).splitlines():
        if MNT_COMMENT in line:
            fstab_removed.append(line)
            continue

        try:
            toks = WS.split(line)
        except Exception:
            pass
        fstab_devs[toks[0]] = line
        fstab_lines.append(line)

    for i in range(len(cfgmnt)):
        # skip something that wasn't a list
        if not isinstance(cfgmnt[i], list):
            log.warning("Mount option %s not a list, got a %s instead",
                        (i + 1), type_utils.obj_name(cfgmnt[i]))
            continue

        start = str(cfgmnt[i][0])
        sanitized = sanitize_devname(start, cloud.device_name_to_device, log)
        if sanitized != start:
            log.debug("changed %s => %s" % (start, sanitized))

        if sanitized is None:
            log.debug("Ignoring nonexistent named mount %s", start)
            continue
        elif sanitized in fstab_devs:
            log.info("Device %s already defined in fstab: %s", sanitized,
                     fstab_devs[sanitized])
            continue

        cfgmnt[i][0] = sanitized

        # in case the user did not quote a field (likely fs-freq, fs_passno)
        # but do not convert None to 'None' (LP: #898365)
        for j in range(len(cfgmnt[i])):
            if cfgmnt[i][j] is None:
                continue
            else:
                cfgmnt[i][j] = str(cfgmnt[i][j])

    for i in range(len(cfgmnt)):
        # fill in values with defaults from defvals above
        for j in range(len(defvals)):
            if len(cfgmnt[i]) <= j:
                cfgmnt[i].append(defvals[j])
            elif cfgmnt[i][j] is None:
                cfgmnt[i][j] = defvals[j]

        # if the second entry in the list is 'None' this
        # clears all previous entries of that same 'fs_spec'
        # (fs_spec is the first field in /etc/fstab, ie, that device)
        if cfgmnt[i][1] is None:
            for j in range(i):
                if cfgmnt[j][0] == cfgmnt[i][0]:
                    cfgmnt[j][1] = None

    # for each of the "default" mounts, add them only if no other
    # entry has the same device name
    for defmnt in defmnts:
        start = defmnt[0]
        sanitized = sanitize_devname(start, cloud.device_name_to_device, log)
        if sanitized != start:
            log.debug("changed default device %s => %s" % (start, sanitized))

        if sanitized is None:
            log.debug("Ignoring nonexistent default named mount %s", start)
            continue
        elif sanitized in fstab_devs:
            log.debug("Device %s already defined in fstab: %s", sanitized,
                      fstab_devs[sanitized])
            continue

        defmnt[0] = sanitized

        cfgmnt_has = False
        for cfgm in cfgmnt:
            if cfgm[0] == defmnt[0]:
                cfgmnt_has = True
                break

        if cfgmnt_has:
            log.debug(("Not including %s, already"
                       " previously included"), start)
            continue
        cfgmnt.append(defmnt)

    # now, each entry in the cfgmnt list has all fstab values
    # if the second field is None (not the string, the value) we skip it
    actlist = []
    for x in cfgmnt:
        if x[1] is None:
            log.debug("Skipping nonexistent device named %s", x[0])
        else:
            actlist.append(x)

    swapret = handle_swapcfg(cfg.get('swap', {}))
    if swapret:
        actlist.append([swapret, "none", "swap", "sw", "0", "0"])

    if len(actlist) == 0:
        log.debug("No modifications to fstab needed")
        return

    cc_lines = []
    needswap = False
    need_mount_all = False
    dirs = []
    for line in actlist:
        # write 'comment' in the fs_mntops, entry,  claiming this
        line[3] = "%s,%s" % (line[3], MNT_COMMENT)
        if line[2] == "swap":
            needswap = True
        if line[1].startswith("/"):
            dirs.append(line[1])
        cc_lines.append('\t'.join(line))

    mount_points = [
        v['mountpoint'] for k, v in util.mounts().items() if 'mountpoint' in v
    ]
    for d in dirs:
        try:
            util.ensure_dir(d)
        except Exception:
            util.logexc(log, "Failed to make '%s' config-mount", d)
        # dirs is list of directories on which a volume should be mounted.
        # If any of them does not already show up in the list of current
        # mount points, we will definitely need to do mount -a.
        if not need_mount_all and d not in mount_points:
            need_mount_all = True

    sadds = [WS.sub(" ", n) for n in cc_lines]
    sdrops = [WS.sub(" ", n) for n in fstab_removed]

    sops = (["- " + drop for drop in sdrops if drop not in sadds] +
            ["+ " + add for add in sadds if add not in sdrops])

    fstab_lines.extend(cc_lines)
    contents = "%s\n" % ('\n'.join(fstab_lines))
    util.write_file(FSTAB_PATH, contents)

    activate_cmds = []
    if needswap:
        activate_cmds.append(["swapon", "-a"])

    if len(sops) == 0:
        log.debug("No changes to /etc/fstab made.")
    else:
        log.debug("Changes to fstab: %s", sops)
        need_mount_all = True

    if need_mount_all:
        activate_cmds.append(["mount", "-a"])
        if uses_systemd:
            activate_cmds.append(["systemctl", "daemon-reload"])

    fmt = "Activating swap and mounts with: %s"
    for cmd in activate_cmds:
        fmt = "Activate mounts: %s:" + ' '.join(cmd)
        try:
            util.subp(cmd)
            log.debug(fmt, "PASS")
        except util.ProcessExecutionError:
            log.warning(fmt, "FAIL")
            util.logexc(log, fmt, "FAIL")
예제 #60
0
파일: main.py 프로젝트: delphix/cloud-init
def status_wrapper(name, args, data_d=None, link_d=None):
    if data_d is None:
        data_d = os.path.normpath("/var/lib/cloud/data")
    if link_d is None:
        link_d = os.path.normpath("/run/cloud-init")

    status_path = os.path.join(data_d, "status.json")
    status_link = os.path.join(link_d, "status.json")
    result_path = os.path.join(data_d, "result.json")
    result_link = os.path.join(link_d, "result.json")

    util.ensure_dirs(
        (
            data_d,
            link_d,
        )
    )

    (_name, functor) = args.action

    if name == "init":
        if args.local:
            mode = "init-local"
        else:
            mode = "init"
    elif name == "modules":
        mode = "modules-%s" % args.mode
    else:
        raise ValueError("unknown name: %s" % name)

    modes = (
        "init",
        "init-local",
        "modules-init",
        "modules-config",
        "modules-final",
    )
    if mode not in modes:
        raise ValueError(
            "Invalid cloud init mode specified '{0}'".format(mode)
        )

    status = None
    if mode == "init-local":
        for f in (status_link, result_link, status_path, result_path):
            util.del_file(f)
    else:
        try:
            status = json.loads(util.load_file(status_path))
        except Exception:
            pass

    nullstatus = {
        "errors": [],
        "start": None,
        "finished": None,
    }

    if status is None:
        status = {"v1": {}}
        status["v1"]["datasource"] = None

    for m in modes:
        if m not in status["v1"]:
            status["v1"][m] = nullstatus.copy()

    v1 = status["v1"]
    v1["stage"] = mode
    v1[mode]["start"] = time.time()

    atomic_helper.write_json(status_path, status)
    util.sym_link(
        os.path.relpath(status_path, link_d), status_link, force=True
    )

    try:
        ret = functor(name, args)
        if mode in ("init", "init-local"):
            (datasource, errors) = ret
            if datasource is not None:
                v1["datasource"] = str(datasource)
        else:
            errors = ret

        v1[mode]["errors"] = [str(e) for e in errors]

    except Exception as e:
        util.logexc(LOG, "failed stage %s", mode)
        print_exc("failed run of stage %s" % mode)
        v1[mode]["errors"] = [str(e)]

    v1[mode]["finished"] = time.time()
    v1["stage"] = None

    atomic_helper.write_json(status_path, status)

    if mode == "modules-final":
        # write the 'finished' file
        errors = []
        for m in modes:
            if v1[m]["errors"]:
                errors.extend(v1[m].get("errors", []))

        atomic_helper.write_json(
            result_path,
            {"v1": {"datasource": v1["datasource"], "errors": errors}},
        )
        util.sym_link(
            os.path.relpath(result_path, link_d), result_link, force=True
        )

    return len(v1[mode]["errors"])