Exemplo n.º 1
0
def remove_artifacts(remove_logs, remove_seed=False):
    """Helper which removes artifacts dir and optionally log files.

    @param: remove_logs: Boolean. Set True to delete the cloud_dir path. False
        preserves them.
    @param: remove_seed: Boolean. Set True to also delete seed subdir in
        paths.cloud_dir.
    @returns: 0 on success, 1 otherwise.
    """
    init = Init(ds_deps=[])
    init.read_cfg()
    if remove_logs:
        for log_file in get_config_logfiles(init.cfg):
            del_file(log_file)

    if not os.path.isdir(init.paths.cloud_dir):
        return 0  # Artifacts dir already cleaned
    seed_path = os.path.join(init.paths.cloud_dir, 'seed')
    for path in glob.glob('%s/*' % init.paths.cloud_dir):
        if path == seed_path and not remove_seed:
            continue
        try:
            if os.path.isdir(path) and not is_link(path):
                del_dir(path)
            else:
                del_file(path)
        except OSError as e:
            error('Could not remove {0}: {1}'.format(path, str(e)))
            return 1
    return 0
Exemplo n.º 2
0
def _migrate_legacy_sems(cloud, log):
    legacy_adjust = {
        'apt-update-upgrade': [
            'apt-configure',
            'package-update-upgrade-install',
        ],
    }
    paths = (cloud.paths.get_ipath('sem'), cloud.paths.get_cpath('sem'))
    for sem_path in paths:
        if not sem_path or not os.path.exists(sem_path):
            continue
        sem_helper = helpers.FileSemaphores(sem_path)
        for (mod_name, migrate_to) in legacy_adjust.items():
            possibles = [mod_name, helpers.canon_sem_name(mod_name)]
            old_exists = []
            for p in os.listdir(sem_path):
                (name, _ext) = os.path.splitext(p)
                if name in possibles and os.path.isfile(p):
                    old_exists.append(p)
            for p in old_exists:
                util.del_file(os.path.join(sem_path, p))
                (_name, freq) = os.path.splitext(p)
                for m in migrate_to:
                    log.debug("Migrating %s => %s with the same frequency",
                              p, m)
                    with sem_helper.lock(m, freq):
                        pass
Exemplo n.º 3
0
    def _get_data_source(self, existing):
        if self.datasource is not NULL_DATA_SOURCE:
            return self.datasource

        with events.ReportEventStack(
                name="check-cache",
                description="attempting to read from cache [%s]" % existing,
                parent=self.reporter) as myrep:

            ds, desc = self._restore_from_checked_cache(existing)
            myrep.description = desc
            self.ds_restored = bool(ds)
            LOG.debug(myrep.description)

        if not ds:
            util.del_file(self.paths.instance_link)
            (cfg_list, pkg_list) = self._get_datasources()
            # Deep copy so that user-data handlers can not modify
            # (which will affect user-data handlers down the line...)
            (ds, dsname) = sources.find_source(self.cfg,
                                               self.distro,
                                               self.paths,
                                               copy.deepcopy(self.ds_deps),
                                               cfg_list,
                                               pkg_list, self.reporter)
            LOG.info("Loaded datasource %s - %s", dsname, ds)
        self.datasource = ds
        # Ensure we adjust our path members datasource
        # now that we have one (thus allowing ipath to be used)
        self._reset()
        return ds
Exemplo n.º 4
0
def _maybe_remove_legacy_eth0(path="/etc/network/interfaces.d/eth0.cfg"):
    """Ubuntu cloud images previously included a 'eth0.cfg' that had
       hard coded content.  That file would interfere with the rendered
       configuration if it was present.

       if the file does not exist do nothing.
       If the file exists:
         - with known content, remove it and warn
         - with unknown content, leave it and warn
    """

    if not os.path.exists(path):
        return

    bmsg = "Dynamic networking config may not apply."
    try:
        contents = util.load_file(path)
        known_contents = ["auto eth0", "iface eth0 inet dhcp"]
        lines = [f.strip() for f in contents.splitlines()
                 if not f.startswith("#")]
        if lines == known_contents:
            util.del_file(path)
            msg = "removed %s with known contents" % path
        else:
            msg = (bmsg + " '%s' exists with user configured content." % path)
    except Exception:
        msg = bmsg + " %s exists, but could not be read." % path

    LOG.warning(msg)
Exemplo n.º 5
0
 def purge_cache(self, rm_instance_lnk=False):
     rm_list = [self.paths.boot_finished]
     if rm_instance_lnk:
         rm_list.append(self.paths.instance_link)
     for f in rm_list:
         util.del_file(f)
     return len(rm_list)
Exemplo n.º 6
0
 def _write_network_config(self, netconfig):
     ns = net.parse_net_config_data(netconfig)
     net.render_network_state(target="/", network_state=ns,
                              eni=self.network_conf_fn,
                              links_prefix=self.links_prefix,
                              netrules=None)
     util.del_file("/etc/network/interfaces.d/eth0.cfg")
     return []
Exemplo n.º 7
0
 def clear(self, name, freq):
     name = canon_sem_name(name)
     sem_file = self._get_path(name, freq)
     try:
         util.del_file(sem_file)
     except (IOError, OSError):
         util.logexc(LOG, "Failed deleting semaphore %s", sem_file)
         return False
     return True
Exemplo n.º 8
0
 def clear(self, name, freq):
     name = canon_sem_name(name)
     sem_file = self._get_path(name, freq)
     try:
         util.del_file(sem_file)
     except (IOError, OSError):
         util.logexc(LOG, "Failed deleting semaphore %s", sem_file)
         return False
     return True
Exemplo n.º 9
0
def write_ntp_config_template(distro_name,
                              service_name=None,
                              servers=None,
                              pools=None,
                              path=None,
                              template_fn=None,
                              template=None):
    """Render a ntp client configuration for the specified client.

    @param distro_name: string.  The distro class name.
    @param service_name: string. The name of the NTP client service.
    @param servers: A list of strings specifying ntp servers. Defaults to empty
    list.
    @param pools: A list of strings specifying ntp pools. Defaults to empty
    list.
    @param path: A string to specify where to write the rendered template.
    @param template_fn: A string to specify the template source file.
    @param template: A string specifying the contents of the template. This
    content will be written to a temporary file before being used to render
    the configuration file.

    @raises: ValueError when path is None.
    @raises: ValueError when template_fn is None and template is None.
    """
    if not servers:
        servers = []
    if not pools:
        pools = []

    if (len(servers) == 0 and distro_name == 'alpine'
            and service_name == 'ntpd'):
        # Alpine's Busybox ntpd only understands "servers" configuration
        # and not "pool" configuration.
        servers = generate_server_names(distro_name)
        LOG.debug('Adding distro default ntp servers: %s', ','.join(servers))
    elif len(servers) == 0 and len(pools) == 0:
        pools = generate_server_names(distro_name)
        LOG.debug('Adding distro default ntp pool servers: %s',
                  ','.join(pools))

    if not path:
        raise ValueError('Invalid value for path parameter')

    if not template_fn and not template:
        raise ValueError('Not template_fn or template provided')

    params = {'servers': servers, 'pools': pools}
    if template:
        tfile = temp_utils.mkstemp(prefix='template_name-', suffix=".tmpl")
        template_fn = tfile[1]  # filepath is second item in tuple
        util.write_file(template_fn, content=template)

    templater.render_to_file(template_fn, path, params)
    # clean up temporary template
    if template:
        util.del_file(template_fn)
Exemplo n.º 10
0
def persist_do_declaration(declaration, additional_declarations):
    """Write the f5-declarative-onboarding declaration to file"""
    do_declaration_dir_exists()
    if os.path.isfile(DO_DECLARATION_FILE):
        util.del_file(DO_DECLARATION_FILE)
    if additional_declarations:
        # top level merge
        for key, value in additional_declarations['Common'].iteritems():
            declaration['Common'][key] = value
    util.write_file(DO_DECLARATION_FILE, json.dumps(declaration))
Exemplo n.º 11
0
    def _generate_ssh_keys(self, data_dir):
        """Generate SSH keys to be used with image."""
        filename = os.path.join(data_dir, self.config['private_key'])

        if os.path.exists(filename):
            c_util.del_file(filename)

        c_util.subp(['ssh-keygen', '-t', 'rsa', '-b', '4096',
                     '-f', filename, '-P', '',
                     '-C', 'ubuntu@cloud_test'],
                    capture=True)
Exemplo n.º 12
0
    def _generate_ssh_keys(self, data_dir):
        """Generate SSH keys to be used with image."""
        filename = os.path.join(data_dir, self.config['private_key'])

        if os.path.exists(filename):
            c_util.del_file(filename)

        subp.subp(['ssh-keygen', '-m', 'PEM', '-t', 'rsa', '-b', '4096',
                   '-f', filename, '-P', '',
                   '-C', 'ubuntu@cloud_test'],
                  capture=True)
Exemplo n.º 13
0
def setup_marker_files(markerid):
    """
    Create a new marker file.
    Marker files are unique to a full customization workflow in VMware
    environment.
    """
    if not markerid:
        return
    markerfile = "/.markerfile-" + markerid
    util.del_file("/.markerfile-*.txt")
    open(markerfile, 'w').close()
Exemplo n.º 14
0
def create_swapfile(fname: str, size: str) -> None:
    """Size is in MiB."""

    errmsg = "Failed to create swapfile '%s' of size %sMB via %s: %s"

    def create_swap(fname, size, method):
        LOG.debug(
            "Creating swapfile in '%s' on fstype '%s' using '%s'",
            fname,
            fstype,
            method,
        )

        if method == "fallocate":
            cmd = ["fallocate", "-l", "%sM" % size, fname]
        elif method == "dd":
            cmd = [
                "dd",
                "if=/dev/zero",
                "of=%s" % fname,
                "bs=1M",
                "count=%s" % size,
            ]

        try:
            subp.subp(cmd, capture=True)
        except subp.ProcessExecutionError as e:
            LOG.info(errmsg, fname, size, method, e)
            util.del_file(fname)
            raise

    swap_dir = os.path.dirname(fname)
    util.ensure_dir(swap_dir)

    fstype = util.get_mount_info(swap_dir)[1]

    if (fstype == "xfs" and util.kernel_version() <
        (4, 18)) or fstype == "btrfs":
        create_swap(fname, size, "dd")
    else:
        try:
            create_swap(fname, size, "fallocate")
        except subp.ProcessExecutionError:
            LOG.info("fallocate swap creation failed, will attempt with dd")
            create_swap(fname, size, "dd")

    if os.path.exists(fname):
        util.chmod(fname, 0o600)
    try:
        subp.subp(["mkswap", fname])
    except subp.ProcessExecutionError:
        util.del_file(fname)
        raise
Exemplo n.º 15
0
def disable_enable_ssh(enabled):
    LOG.debug("setting enablement of ssh to: %s", enabled)
    # do something here that would enable or disable
    not_to_be_run = "/etc/ssh/sshd_not_to_be_run"
    if enabled:
        util.del_file(not_to_be_run)
        # this is an indempotent operation
        util.subp(["systemctl", "start", "ssh"])
    else:
        # this is an indempotent operation
        util.subp(["systemctl", "stop", "ssh"])
        util.write_file(not_to_be_run, "cloud-init\n")
Exemplo n.º 16
0
def disable_enable_ssh(enabled):
    LOG.debug("setting enablement of ssh to: %s", enabled)
    # do something here that would enable or disable
    not_to_be_run = "/etc/ssh/sshd_not_to_be_run"
    if enabled:
        util.del_file(not_to_be_run)
        # this is an indempotent operation
        util.subp(["systemctl", "start", "ssh"])
    else:
        # this is an indempotent operation
        util.subp(["systemctl", "stop", "ssh"])
        util.write_file(not_to_be_run, "cloud-init\n")
Exemplo n.º 17
0
def generate_ssh_keys(data_dir):
    """Generate SSH keys to be used with image."""
    LOG.info('generating SSH keys')
    filename = os.path.join(data_dir, 'id_rsa')

    if os.path.exists(filename):
        c_util.del_file(filename)

    c_util.subp([
        'ssh-keygen', '-t', 'rsa', '-b', '4096', '-f', filename, '-P', '',
        '-C', 'ubuntu@cloud_test'
    ],
                capture=True)
Exemplo n.º 18
0
def set_etc_timezone(tz, tz_file=None, tz_conf="/etc/timezone",
                     tz_local="/etc/localtime"):
    util.write_file(tz_conf, str(tz).rstrip() + "\n")
    # This ensures that the correct tz will be used for the system
    if tz_local and tz_file:
        # use a symlink if there exists a symlink or tz_local is not present
        islink = os.path.islink(tz_local)
        if islink or not os.path.exists(tz_local):
            if islink:
                util.del_file(tz_local)
            os.symlink(tz_file, tz_local)
        else:
            util.copy(tz_file, tz_local)
    return
Exemplo n.º 19
0
def set_etc_timezone(tz, tz_file=None, tz_conf="/etc/timezone",
                     tz_local="/etc/localtime"):
    util.write_file(tz_conf, str(tz).rstrip() + "\n")
    # This ensures that the correct tz will be used for the system
    if tz_local and tz_file:
        # use a symlink if there exists a symlink or tz_local is not present
        islink = os.path.islink(tz_local)
        if islink or not os.path.exists(tz_local):
            if islink:
                util.del_file(tz_local)
            os.symlink(tz_file, tz_local)
        else:
            util.copy(tz_file, tz_local)
    return
Exemplo n.º 20
0
    def _reflect_cur_instance(self):
        # Remove the old symlink and attach a new one so
        # that further reads/writes connect into the right location
        idir = self._get_ipath()
        util.del_file(self.paths.instance_link)
        util.sym_link(idir, self.paths.instance_link)

        # Ensures these dirs exist
        dir_list = []
        for d in self._get_instance_subdirs():
            dir_list.append(os.path.join(idir, d))
        util.ensure_dirs(dir_list)

        # Write out information on what is being used for the current instance
        # and what may have been used for a previous instance...
        dp = self.paths.get_cpath("data")

        # Write what the datasource was and is..
        ds = "%s: %s" % (type_utils.obj_name(self.datasource), self.datasource)
        previous_ds = None
        ds_fn = os.path.join(idir, "datasource")
        try:
            previous_ds = util.load_file(ds_fn).strip()
        except Exception:
            pass
        if not previous_ds:
            previous_ds = ds
        util.write_file(ds_fn, "%s\n" % ds)
        util.write_file(
            os.path.join(dp, "previous-datasource"), "%s\n" % (previous_ds)
        )

        # What the instance id was and is...
        iid = self.datasource.get_instance_id()
        iid_fn = os.path.join(dp, "instance-id")

        previous_iid = self.previous_iid()
        util.write_file(iid_fn, "%s\n" % iid)
        util.write_file(self.paths.get_runpath("instance_id"), "%s\n" % iid)
        util.write_file(
            os.path.join(dp, "previous-instance-id"), "%s\n" % (previous_iid)
        )

        self._write_to_cache()
        # Ensure needed components are regenerated
        # after change of instance which may cause
        # change of configuration
        self._reset()
        return iid
Exemplo n.º 21
0
    def _reflect_cur_instance(self):
        # Remove the old symlink and attach a new one so
        # that further reads/writes connect into the right location
        idir = self._get_ipath()
        util.del_file(self.paths.instance_link)
        util.sym_link(idir, self.paths.instance_link)

        # Ensures these dirs exist
        dir_list = []
        for d in self._get_instance_subdirs():
            dir_list.append(os.path.join(idir, d))
        util.ensure_dirs(dir_list)

        # Write out information on what is being used for the current instance
        # and what may have been used for a previous instance...
        dp = self.paths.get_cpath('data')

        # Write what the datasource was and is..
        ds = "%s: %s" % (type_utils.obj_name(self.datasource), self.datasource)
        previous_ds = None
        ds_fn = os.path.join(idir, 'datasource')
        try:
            previous_ds = util.load_file(ds_fn).strip()
        except Exception:
            pass
        if not previous_ds:
            previous_ds = ds
        util.write_file(ds_fn, "%s\n" % ds)
        util.write_file(os.path.join(dp, 'previous-datasource'),
                        "%s\n" % (previous_ds))

        # What the instance id was and is...
        iid = self.datasource.get_instance_id()
        previous_iid = None
        iid_fn = os.path.join(dp, 'instance-id')
        try:
            previous_iid = util.load_file(iid_fn).strip()
        except Exception:
            pass
        if not previous_iid:
            previous_iid = iid
        util.write_file(iid_fn, "%s\n" % iid)
        util.write_file(os.path.join(dp, 'previous-instance-id'),
                        "%s\n" % (previous_iid))
        # Ensure needed components are regenerated
        # after change of instance which may cause
        # change of configuration
        self._reset()
        return iid
Exemplo n.º 22
0
    def create_swap(fname, size, method):
        LOG.debug("Creating swapfile in '%s' on fstype '%s' using '%s'",
                  fname, fstype, method)

        if method == "fallocate":
            cmd = ['fallocate', '-l', '%dM' % size, fname]
        elif method == "dd":
            cmd = ['dd', 'if=/dev/zero', 'of=%s' % fname, 'bs=1M',
                   'count=%d' % size]

        try:
            util.subp(cmd, capture=True)
        except util.ProcessExecutionError as e:
            LOG.warning(errmsg, fname, size, method, e)
            util.del_file(fname)
Exemplo n.º 23
0
 def set_timezone(self, tz):
     tz_file = self._find_tz_file(tz)
     if self._dist_uses_systemd():
         # Currently, timedatectl complains if invoked during startup
         # so for compatibility, create the link manually.
         util.del_file(self.tz_local_fn)
         util.sym_link(tz_file, self.tz_local_fn)
     else:
         # Adjust the sysconfig clock zone setting
         clock_cfg = {
             'ZONE': str(tz),
         }
         rhel_util.update_sysconfig_file(self.clock_conf_fn, clock_cfg)
         # This ensures that the correct tz will be used for the system
         util.copy(tz_file, self.tz_local_fn)
Exemplo n.º 24
0
 def set_timezone(self, tz):
     tz_file = self._find_tz_file(tz)
     if self.uses_systemd():
         # Currently, timedatectl complains if invoked during startup
         # so for compatibility, create the link manually.
         util.del_file(self.tz_local_fn)
         util.sym_link(tz_file, self.tz_local_fn)
     else:
         # Adjust the sysconfig clock zone setting
         clock_cfg = {
             'ZONE': str(tz),
         }
         rhel_util.update_sysconfig_file(self.clock_conf_fn, clock_cfg)
         # This ensures that the correct tz will be used for the system
         util.copy(tz_file, self.tz_local_fn)
Exemplo n.º 25
0
def setup_marker_files(markerid, marker_dir):
    """
    Create a new marker file.
    Marker files are unique to a full customization workflow in VMware
    environment.
    @param markerid: is an unique string representing a particular product
                     marker.
    @param: marker_dir: The directory in which markers exist.

    """
    LOG.debug("Handle marker creation")
    markerfile = os.path.join(marker_dir, ".markerfile-" + markerid + ".txt")
    for fname in os.listdir(marker_dir):
        if fname.startswith(".markerfile"):
            util.del_file(os.path.join(marker_dir, fname))
    open(markerfile, 'w').close()
Exemplo n.º 26
0
def setup_marker_files(markerid, marker_dir):
    """
    Create a new marker file.
    Marker files are unique to a full customization workflow in VMware
    environment.
    @param markerid: is an unique string representing a particular product
                     marker.
    @param: marker_dir: The directory in which markers exist.

    """
    LOG.debug("Handle marker creation")
    markerfile = os.path.join(marker_dir, ".markerfile-" + markerid + ".txt")
    for fname in os.listdir(marker_dir):
        if fname.startswith(".markerfile"):
            util.del_file(os.path.join(marker_dir, fname))
    open(markerfile, 'w').close()
Exemplo n.º 27
0
def create_post_onboard():
    """Creates the initialization script for all post onboarding scripts"""
    LOG.debug('writing out post onboard initialization script')
    onboard_script = POST_ONBOARD_CMD_FILE_DIR + '/onboard.sh'
    if os.path.isfile(onboard_script):
        util.del_file(onboard_script)
    script_files = os.listdir(POST_ONBOARD_CMD_FILE_DIR)
    script_files.sort()
    with open(onboard_script, 'w') as obs:
        obs.write("#!/bin/bash\n\n")
        obs.write("function check_mcpd_up() {\n")
        obs.write("    checks=0\n")
        obs.write("    while [ $checks -lt 120 ]; do\n")
        obs.write(
            "        if tmsh -a show sys mcp-state field-fmt 2> /dev/null | grep -q running; then\n"
        )
        obs.write("            break\n")
        obs.write("        fi\n")
        obs.write("        echo 'waiting for mcpd to reach running state'\n")
        obs.write("        let checks=checks+1\n")
        obs.write("        sleep 10\n")
        obs.write("    done\n")
        obs.write("}\n\n")
        obs.write("function check_icontrol_up() {\n")
        obs.write("    checks=0\n")
        obs.write("    while [ $checks -lt 120 ]; do\n")
        obs.write(
            "        started=$(curl -su 'admin:' http://localhost:8100/shared/echo | grep '\"stage\":\"STARTED\"' | wc -l 2>/dev/null)\n"
        )
        obs.write("        if [ $started == 1 ]; then\n")
        obs.write("            break\n")
        obs.write("        fi\n")
        obs.write("        echo 'waiting for iControl to start'\n")
        obs.write("        let checks=checks+1\n")
        obs.write("        sleep 10\n")
        obs.write("    done\n")
        obs.write("}\n\n")
        obs.write("function exec_phases() {\n")
        for script_file in script_files:
            obs.write("    /bin/bash %s/%s\n" %
                      (POST_ONBOARD_CMD_FILE_DIR, script_file))
        obs.write("    echo 1 > " + POST_ONBOARD_FLAG_FILE + '\n')
        obs.write("}\n\n")
        obs.write("check_mcpd_up\n")
        obs.write("check_icontrol_up\n")
        obs.write("exec_phases\n")
    os.chmod(onboard_script, 0775)
Exemplo n.º 28
0
def apply_apt_config(cfg, proxy_fname, config_fname):
    # Set up any apt proxy
    cfgs = (('apt_proxy', 'Acquire::HTTP::Proxy "%s";'),
            ('apt_http_proxy', 'Acquire::HTTP::Proxy "%s";'),
            ('apt_ftp_proxy', 'Acquire::FTP::Proxy "%s";'),
            ('apt_https_proxy', 'Acquire::HTTPS::Proxy "%s";'))

    proxies = [fmt % cfg.get(name) for (name, fmt) in cfgs if cfg.get(name)]
    if len(proxies):
        util.write_file(proxy_fname, '\n'.join(proxies) + '\n')
    elif os.path.isfile(proxy_fname):
        util.del_file(proxy_fname)

    if cfg.get('apt_config', None):
        util.write_file(config_fname, cfg.get('apt_config'))
    elif os.path.isfile(config_fname):
        util.del_file(config_fname)
Exemplo n.º 29
0
def apply_apt_config(cfg, proxy_fname, config_fname):
    # Set up any apt proxy
    cfgs = (('apt_proxy', 'Acquire::HTTP::Proxy "%s";'),
            ('apt_http_proxy', 'Acquire::HTTP::Proxy "%s";'),
            ('apt_ftp_proxy', 'Acquire::FTP::Proxy "%s";'),
            ('apt_https_proxy', 'Acquire::HTTPS::Proxy "%s";'))

    proxies = [fmt % cfg.get(name) for (name, fmt) in cfgs if cfg.get(name)]
    if len(proxies):
        util.write_file(proxy_fname, '\n'.join(proxies) + '\n')
    elif os.path.isfile(proxy_fname):
        util.del_file(proxy_fname)

    if cfg.get('apt_config', None):
        util.write_file(config_fname, cfg.get('apt_config'))
    elif os.path.isfile(config_fname):
        util.del_file(config_fname)
def handle(_name, cfg, cloud, log, _args):
    validate_cloudconfig_schema(cfg, schema)
    network_hotplug_enabled = (
        "updates" in cfg
        and "network" in cfg["updates"]
        and "when" in cfg["updates"]["network"]
        and "hotplug" in cfg["updates"]["network"]["when"]
    )
    hotplug_supported = EventType.HOTPLUG in (
        cloud.datasource.get_supported_events([EventType.HOTPLUG]).get(
            EventScope.NETWORK, set()
        )
    )
    hotplug_enabled = stages.update_event_enabled(
        datasource=cloud.datasource,
        cfg=cfg,
        event_source_type=EventType.HOTPLUG,
        scope=EventScope.NETWORK,
    )
    if not (hotplug_supported and hotplug_enabled):
        if os.path.exists(HOTPLUG_UDEV_PATH):
            log.debug("Uninstalling hotplug, not enabled")
            util.del_file(HOTPLUG_UDEV_PATH)
            subp.subp(["udevadm", "control", "--reload-rules"])
        elif network_hotplug_enabled:
            log.warning(
                "Hotplug is unsupported by current datasource. "
                "Udev rules will NOT be installed."
            )
        else:
            log.debug("Skipping hotplug install, not enabled")
        return
    if not subp.which("udevadm"):
        log.debug("Skipping hotplug install, udevadm not found")
        return

    # This may need to turn into a distro property at some point
    libexecdir = "/usr/libexec/cloud-init"
    if not os.path.exists(libexecdir):
        libexecdir = "/usr/lib/cloud-init"
    util.write_file(
        filename=HOTPLUG_UDEV_PATH,
        content=HOTPLUG_UDEV_RULES_TEMPLATE.format(libexecdir=libexecdir),
    )
    subp.subp(["udevadm", "control", "--reload-rules"])
Exemplo n.º 31
0
def create_swapfile(fname, size):
    """Size is in MiB."""

    errmsg = "Failed to create swapfile '%s' of size %dMB via %s: %s"

    def create_swap(fname, size, method):
        LOG.debug("Creating swapfile in '%s' on fstype '%s' using '%s'", fname,
                  fstype, method)

        if method == "fallocate":
            cmd = ['fallocate', '-l', '%dM' % size, fname]
        elif method == "dd":
            cmd = [
                'dd', 'if=/dev/zero',
                'of=%s' % fname, 'bs=1M',
                'count=%d' % size
            ]

        try:
            util.subp(cmd, capture=True)
        except util.ProcessExecutionError as e:
            LOG.warning(errmsg, fname, size, method, e)
            util.del_file(fname)

    swap_dir = os.path.dirname(fname)
    util.ensure_dir(swap_dir)

    fstype = util.get_mount_info(swap_dir)[1]

    if fstype in ("xfs", "btrfs"):
        create_swap(fname, size, "dd")
    else:
        try:
            create_swap(fname, size, "fallocate")
        except util.ProcessExecutionError as e:
            LOG.warning(errmsg, fname, size, "dd", e)
            LOG.warning("Will attempt with dd.")
            create_swap(fname, size, "dd")

    util.chmod(fname, 0o600)
    try:
        util.subp(['mkswap', fname])
    except util.ProcessExecutionError:
        util.del_file(fname)
        raise
Exemplo n.º 32
0
def write_ntp_config_template(distro_name, servers=None, pools=None,
                              path=None, template_fn=None, template=None):
    """Render a ntp client configuration for the specified client.

    @param distro_name: string.  The distro class name.
    @param servers: A list of strings specifying ntp servers. Defaults to empty
    list.
    @param pools: A list of strings specifying ntp pools. Defaults to empty
    list.
    @param path: A string to specify where to write the rendered template.
    @param template_fn: A string to specify the template source file.
    @param template: A string specifying the contents of the template. This
    content will be written to a temporary file before being used to render
    the configuration file.

    @raises: ValueError when path is None.
    @raises: ValueError when template_fn is None and template is None.
    """
    if not servers:
        servers = []
    if not pools:
        pools = []

    if len(servers) == 0 and len(pools) == 0:
        pools = generate_server_names(distro_name)
        LOG.debug(
            'Adding distro default ntp pool servers: %s', ','.join(pools))

    if not path:
        raise ValueError('Invalid value for path parameter')

    if not template_fn and not template:
        raise ValueError('Not template_fn or template provided')

    params = {'servers': servers, 'pools': pools}
    if template:
        tfile = temp_utils.mkstemp(prefix='template_name-', suffix=".tmpl")
        template_fn = tfile[1]  # filepath is second item in tuple
        util.write_file(template_fn, content=template)

    templater.render_to_file(template_fn, path, params)
    # clean up temporary template
    if template:
        util.del_file(template_fn)
Exemplo n.º 33
0
def make_dhcp4_request(interface, timeout=120):
    """Makes DHCPv4 queries out a linux link device"""
    dhcp_lease_dir_exists()
    tmp_conf_file = DHCP_LEASE_DIR + '/dhclient.conf'
    lease_file = DHCP_LEASE_DIR + '/' + interface + '.lease'
    tmp_lease_file = '/tmp/' + interface + '.lease'
    fnull = open(os.devnull, 'w')
    dhclient_cf = open(tmp_conf_file, 'w')
    dhclient_cf.write(
        "\nrequest subnet-mask, broadcast-address, time-offset, routers,\n")
    dhclient_cf.write(
        "        domain-name, domain-name-servers, domain-search, host-name,\n")
    dhclient_cf.write(
        "        root-path, interface-mtu, classless-static-routes;\n")
    dhclient_cf.close()
    if os.path.isfile(lease_file):
        util.del_file(lease_file)
    subprocess.call(['/bin/pkill', 'dhclient'], stdout=fnull)
    subprocess.call(['/sbin/ip', 'link', 'set', interface, 'up'], stdout=fnull)
    subprocess.call(['/sbin/dhclient',
                     '-lf',
                     tmp_lease_file,
                     '-cf',
                     tmp_conf_file,
                     '-1',
                     '-timeout',
                     str(timeout),
                     '-pf',
                     '/tmp/dhclient.' + interface + '.pid',
                     '-sf',
                     '/bin/echo',
                     interface], stdout=fnull)
    if os.path.getsize(tmp_lease_file) > 0:
        util.copy(tmp_lease_file, lease_file)
        subprocess.call(['/bin/pkill', 'dhclient'], stdout=fnull)
        util.del_file('/tmp/dhclient.' + interface + '.pid')
        util.del_file(tmp_lease_file)
        return True
    else:
        subprocess.call(['/bin/pkill', 'dhclient'], stdout=fnull)
        util.del_file('/tmp/dhclient.' + interface + '.pid')
        util.del_file(tmp_lease_file)
    return False
Exemplo n.º 34
0
    def _get_data_source(self, existing):
        if self.datasource is not NULL_DATA_SOURCE:
            return self.datasource

        with events.ReportEventStack(
                name="check-cache",
                description="attempting to read from cache [%s]" % existing,
                parent=self.reporter) as myrep:
            ds = self._restore_from_cache()
            if ds and existing == "trust":
                myrep.description = "restored from cache: %s" % ds
            elif ds and existing == "check":
                if (hasattr(ds, 'check_instance_id') and
                        ds.check_instance_id(self.cfg)):
                    myrep.description = "restored from checked cache: %s" % ds
                else:
                    myrep.description = "cache invalid in datasource: %s" % ds
                    ds = None
            else:
                myrep.description = "no cache found"

            self.ds_restored = bool(ds)
            LOG.debug(myrep.description)

        if not ds:
            util.del_file(self.paths.instance_link)
            (cfg_list, pkg_list) = self._get_datasources()
            # Deep copy so that user-data handlers can not modify
            # (which will affect user-data handlers down the line...)
            (ds, dsname) = sources.find_source(self.cfg,
                                               self.distro,
                                               self.paths,
                                               copy.deepcopy(self.ds_deps),
                                               cfg_list,
                                               pkg_list, self.reporter)
            LOG.info("Loaded datasource %s - %s", dsname, ds)
        self.datasource = ds
        # Ensure we adjust our path members datasource
        # now that we have one (thus allowing ipath to be used)
        self._reset()
        return ds
Exemplo n.º 35
0
    def _get_data_source(self, existing):
        if self.datasource is not NULL_DATA_SOURCE:
            return self.datasource

        with events.ReportEventStack(
                name="check-cache",
                description="attempting to read from cache [%s]" % existing,
                parent=self.reporter) as myrep:
            ds = self._restore_from_cache()
            if ds and existing == "trust":
                myrep.description = "restored from cache: %s" % ds
            elif ds and existing == "check":
                if (hasattr(ds, 'check_instance_id') and
                        ds.check_instance_id(self.cfg)):
                    myrep.description = "restored from checked cache: %s" % ds
                else:
                    myrep.description = "cache invalid in datasource: %s" % ds
                    ds = None
            else:
                myrep.description = "no cache found"
            LOG.debug(myrep.description)

        if not ds:
            util.del_file(self.paths.instance_link)
            (cfg_list, pkg_list) = self._get_datasources()
            # Deep copy so that user-data handlers can not modify
            # (which will affect user-data handlers down the line...)
            (ds, dsname) = sources.find_source(self.cfg,
                                               self.distro,
                                               self.paths,
                                               copy.deepcopy(self.ds_deps),
                                               cfg_list,
                                               pkg_list, self.reporter)
            LOG.info("Loaded datasource %s - %s", dsname, ds)
        self.datasource = ds
        # Ensure we adjust our path members datasource
        # now that we have one (thus allowing ipath to be used)
        self._reset()
        return ds
Exemplo n.º 36
0
def apply_apt_config(cfg, proxy_fname, config_fname):
    """apply_apt_config
       Applies any apt*proxy config from if specified
    """
    # Set up any apt proxy
    cfgs = (('proxy', 'Acquire::http::Proxy "%s";'),
            ('http_proxy', 'Acquire::http::Proxy "%s";'),
            ('ftp_proxy', 'Acquire::ftp::Proxy "%s";'),
            ('https_proxy', 'Acquire::https::Proxy "%s";'))

    proxies = [fmt % cfg.get(name) for (name, fmt) in cfgs if cfg.get(name)]
    if len(proxies):
        LOG.debug("write apt proxy info to %s", proxy_fname)
        util.write_file(proxy_fname, '\n'.join(proxies) + '\n')
    elif os.path.isfile(proxy_fname):
        util.del_file(proxy_fname)
        LOG.debug("no apt proxy configured, removed %s", proxy_fname)

    if cfg.get('conf', None):
        LOG.debug("write apt config info to %s", config_fname)
        util.write_file(config_fname, cfg.get('conf'))
    elif os.path.isfile(config_fname):
        util.del_file(config_fname)
        LOG.debug("no apt config configured, removed %s", config_fname)
Exemplo n.º 37
0
def apply_apt_config(cfg, proxy_fname, config_fname):
    """apply_apt_config
       Applies any apt*proxy config from if specified
    """
    # Set up any apt proxy
    cfgs = (('proxy', 'Acquire::http::Proxy "%s";'),
            ('http_proxy', 'Acquire::http::Proxy "%s";'),
            ('ftp_proxy', 'Acquire::ftp::Proxy "%s";'),
            ('https_proxy', 'Acquire::https::Proxy "%s";'))

    proxies = [fmt % cfg.get(name) for (name, fmt) in cfgs if cfg.get(name)]
    if len(proxies):
        LOG.debug("write apt proxy info to %s", proxy_fname)
        util.write_file(proxy_fname, '\n'.join(proxies) + '\n')
    elif os.path.isfile(proxy_fname):
        util.del_file(proxy_fname)
        LOG.debug("no apt proxy configured, removed %s", proxy_fname)

    if cfg.get('conf', None):
        LOG.debug("write apt config info to %s", config_fname)
        util.write_file(config_fname, cfg.get('conf'))
    elif os.path.isfile(config_fname):
        util.del_file(config_fname)
        LOG.debug("no apt config configured, removed %s", config_fname)
Exemplo n.º 38
0
    def _write_network(self, settings):
        # Convert debian settings to ifcfg format
        entries = net_util.translate_network(settings)
        LOG.debug("Translated ubuntu style network settings %s into %s",
                  settings, entries)

        # Match Debian/Ubunto distro functionality of clean slating
        # the network interface configuration.
        # Remove all existing ifcfg-eth* files.  This cleans up files that
        # are left around if you capture an image from a VM with 5 NICs
        # and deploy it with 1 NIC.
        rhel_util.remove_ifcfg_files(self.network_script_dir)
        rhel_util.remove_resolve_conf_file(self.resolve_conf_fn)
        util.del_file(self.routes_fn)

        # Make the intermediate format as the suse format...
        nameservers = []
        searchservers = []
        dev_names = entries.keys()
        mac_addrs = []
        for (dev, info) in entries.iteritems():
            mac_addrs.append(info.get('hwaddress'))
            net_fn = self.network_script_tpl % (dev)
            mode = info.get('auto')
            if mode:
                mode = 'auto'
            else:
                mode = 'manual'
            net_cfg = {}
            net_cfg['BOOTPROTO'] = info.get('bootproto')
            net_cfg['BROADCAST'] = info.get('broadcast')
            net_cfg['LLADDR'] = info.get('hwaddress')
            net_cfg['STARTMODE'] = mode
            if info['ipv6']:
                prefix = info.get('netmask')
                ipv6addr = info.get('address')
                net_cfg['IPADDR_0'] = ipv6addr
                net_cfg['PREFIXLEN_0'] = prefix
                net_cfg['LABEL_0'] = '0'
            if info['ipv4']:
                net_cfg['NETMASK'] = info.get('netmask')
                net_cfg['IPADDR'] = info.get('address')
            if dev != 'lo':
                # net_cfg['ETHERDEVICE'] = dev
                net_cfg['ETHTOOL_OPTIONS'] = ''
                net_cfg['USERCONTROL'] = 'no'
                net_cfg['NM_CONTROLLED'] = 'no'
                net_cfg['BRIDGE'] = 'yes'
            else:
                net_cfg['FIREWALL'] = 'no'
            if dev == 'eth0' and info.get('gateway'):
                self._write_default_route(self.routes_fn, info.get('gateway'))
            if 'mtu' in info:
                net_cfg['MTU'] = info.get('mtu')

            # Remove the existing cfg file so the network configuration
            # is a replacement versus an update to match debian distro
            # functionality.
            if dev != 'lo':
                util.del_file(net_fn)
            rhel_util.update_sysconfig_file(net_fn, net_cfg, True)
            if 'dns-nameservers' in info:
                nameservers.extend(info['dns-nameservers'])
            if 'dns-search' in info:
                searchservers.extend(info['dns-search'])
        if nameservers or searchservers:
            rhel_util.update_resolve_conf_file(self.resolve_conf_fn,
                                               nameservers, searchservers)
        return dev_names, mac_addrs
Exemplo n.º 39
0
def status_wrapper(name, args, data_d=None, link_d=None):
    if data_d is None:
        data_d = os.path.normpath("/var/lib/cloud/data")
    if link_d is None:
        link_d = os.path.normpath("/run/cloud-init")

    status_path = os.path.join(data_d, "status.json")
    status_link = os.path.join(link_d, "status.json")
    result_path = os.path.join(data_d, "result.json")
    result_link = os.path.join(link_d, "result.json")

    util.ensure_dirs((
        data_d,
        link_d,
    ))

    (_name, functor) = args.action

    if name == "init":
        if args.local:
            mode = "init-local"
        else:
            mode = "init"
    elif name == "modules":
        mode = "modules-%s" % args.mode
    else:
        raise ValueError("unknown name: %s" % name)

    modes = ('init', 'init-local', 'modules-config', 'modules-final')

    status = None
    if mode == 'init-local':
        for f in (status_link, result_link, status_path, result_path):
            util.del_file(f)
    else:
        try:
            status = json.loads(util.load_file(status_path))
        except Exception:
            pass

    if status is None:
        nullstatus = {
            'errors': [],
            'start': None,
            'finished': None,
        }
        status = {'v1': {}}
        for m in modes:
            status['v1'][m] = nullstatus.copy()
        status['v1']['datasource'] = None

    v1 = status['v1']
    v1['stage'] = mode
    v1[mode]['start'] = time.time()

    atomic_helper.write_json(status_path, status)
    util.sym_link(os.path.relpath(status_path, link_d),
                  status_link,
                  force=True)

    try:
        ret = functor(name, args)
        if mode in ('init', 'init-local'):
            (datasource, errors) = ret
            if datasource is not None:
                v1['datasource'] = str(datasource)
        else:
            errors = ret

        v1[mode]['errors'] = [str(e) for e in errors]

    except Exception as e:
        util.logexc(LOG, "failed stage %s", mode)
        print_exc("failed run of stage %s" % mode)
        v1[mode]['errors'] = [str(e)]

    v1[mode]['finished'] = time.time()
    v1['stage'] = None

    atomic_helper.write_json(status_path, status)

    if mode == "modules-final":
        # write the 'finished' file
        errors = []
        for m in modes:
            if v1[m]['errors']:
                errors.extend(v1[m].get('errors', []))

        atomic_helper.write_json(
            result_path,
            {'v1': {
                'datasource': v1['datasource'],
                'errors': errors
            }})
        util.sym_link(os.path.relpath(result_path, link_d),
                      result_link,
                      force=True)

    return len(v1[mode]['errors'])
Exemplo n.º 40
0
def main_init(name, args):
    deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK]
    if args.local:
        deps = [sources.DEP_FILESYSTEM]

    if not args.local:
        # See doc/kernel-cmdline.txt
        #
        # This is used in maas datasource, in "ephemeral" (read-only root)
        # environment where the instance netboots to iscsi ro root.
        # and the entity that controls the pxe config has to configure
        # the maas datasource.
        #
        # Could be used elsewhere, only works on network based (not local).
        root_name = "%s.d" % (CLOUD_CONFIG)
        target_fn = os.path.join(root_name, "91_kernel_cmdline_url.cfg")
        util.read_write_cmdline_url(target_fn)

    # Cloud-init 'init' stage is broken up into the following sub-stages
    # 1. Ensure that the init object fetches its config without errors
    # 2. Setup logging/output redirections with resultant config (if any)
    # 3. Initialize the cloud-init filesystem
    # 4. Check if we can stop early by looking for various files
    # 5. Fetch the datasource
    # 6. Connect to the current instance location + update the cache
    # 7. Consume the userdata (handlers get activated here)
    # 8. Construct the modules object
    # 9. Adjust any subsequent logging/output redirections using the modules
    #    objects config as it may be different from init object
    # 10. Run the modules for the 'init' stage
    # 11. Done!
    if not args.local:
        w_msg = welcome_format(name)
    else:
        w_msg = welcome_format("%s-local" % (name))
    init = stages.Init(ds_deps=deps, reporter=args.reporter)
    # Stage 1
    init.read_cfg(extract_fns(args))
    # Stage 2
    outfmt = None
    errfmt = None
    try:
        LOG.debug("Closing stdin")
        util.close_stdin()
        (outfmt, errfmt) = util.fixup_output(init.cfg, name)
    except Exception:
        util.logexc(LOG, "Failed to setup output redirection!")
        print_exc("Failed to setup output redirection!")
    if args.debug:
        # Reset so that all the debug handlers are closed out
        LOG.debug(("Logging being reset, this logger may no"
                   " longer be active shortly"))
        logging.resetLogging()
    logging.setupLogging(init.cfg)
    apply_reporting_cfg(init.cfg)

    # Any log usage prior to setupLogging above did not have local user log
    # config applied.  We send the welcome message now, as stderr/out have
    # been redirected and log now configured.
    welcome(name, msg=w_msg)

    # Stage 3
    try:
        init.initialize()
    except Exception:
        util.logexc(LOG, "Failed to initialize, likely bad things to come!")
    # Stage 4
    path_helper = init.paths
    mode = sources.DSMODE_LOCAL if args.local else sources.DSMODE_NETWORK

    if mode == sources.DSMODE_NETWORK:
        existing = "trust"
        sys.stderr.write("%s\n" % (netinfo.debug_info()))
        LOG.debug(("Checking to see if files that we need already"
                   " exist from a previous run that would allow us"
                   " to stop early."))
        # no-net is written by upstart cloud-init-nonet when network failed
        # to come up
        stop_files = [
            os.path.join(path_helper.get_cpath("data"), "no-net"),
        ]
        existing_files = []
        for fn in stop_files:
            if os.path.isfile(fn):
                existing_files.append(fn)

        if existing_files:
            LOG.debug("[%s] Exiting. stop file %s existed", mode,
                      existing_files)
            return (None, [])
        else:
            LOG.debug("Execution continuing, no previous run detected that"
                      " would allow us to stop early.")
    else:
        existing = "check"
        if util.get_cfg_option_bool(init.cfg, 'manual_cache_clean', False):
            existing = "trust"

        init.purge_cache()
        # Delete the non-net file as well
        util.del_file(os.path.join(path_helper.get_cpath("data"), "no-net"))

    # Stage 5
    try:
        init.fetch(existing=existing)
        # if in network mode, and the datasource is local
        # then work was done at that stage.
        if mode == sources.DSMODE_NETWORK and init.datasource.dsmode != mode:
            LOG.debug("[%s] Exiting. datasource %s in local mode", mode,
                      init.datasource)
            return (None, [])
    except sources.DataSourceNotFoundException:
        # In the case of 'cloud-init init' without '--local' it is a bit
        # more likely that the user would consider it failure if nothing was
        # found. When using upstart it will also mentions job failure
        # in console log if exit code is != 0.
        if mode == sources.DSMODE_LOCAL:
            LOG.debug("No local datasource found")
        else:
            util.logexc(LOG, ("No instance datasource found!"
                              " Likely bad things to come!"))
        if not args.force:
            init.apply_network_config(bring_up=not args.local)
            LOG.debug("[%s] Exiting without datasource in local mode", mode)
            if mode == sources.DSMODE_LOCAL:
                return (None, [])
            else:
                return (None, ["No instance datasource found."])
        else:
            LOG.debug("[%s] barreling on in force mode without datasource",
                      mode)

    # Stage 6
    iid = init.instancify()
    LOG.debug("[%s] %s will now be targeting instance id: %s. new=%s", mode,
              name, iid, init.is_new_instance())

    init.apply_network_config(bring_up=bool(mode != sources.DSMODE_LOCAL))

    if mode == sources.DSMODE_LOCAL:
        if init.datasource.dsmode != mode:
            LOG.debug("[%s] Exiting. datasource %s not in local mode.", mode,
                      init.datasource)
            return (init.datasource, [])
        else:
            LOG.debug("[%s] %s is in local mode, will apply init modules now.",
                      mode, init.datasource)

    # update fully realizes user-data (pulling in #include if necessary)
    init.update()
    # Stage 7
    try:
        # Attempt to consume the data per instance.
        # This may run user-data handlers and/or perform
        # url downloads and such as needed.
        (ran, _results) = init.cloudify().run('consume_data',
                                              init.consume_data,
                                              args=[PER_INSTANCE],
                                              freq=PER_INSTANCE)
        if not ran:
            # Just consume anything that is set to run per-always
            # if nothing ran in the per-instance code
            #
            # See: https://bugs.launchpad.net/bugs/819507 for a little
            # reason behind this...
            init.consume_data(PER_ALWAYS)
    except Exception:
        util.logexc(LOG, "Consuming user data failed!")
        return (init.datasource, ["Consuming user data failed!"])

    apply_reporting_cfg(init.cfg)

    # Stage 8 - re-read and apply relevant cloud-config to include user-data
    mods = stages.Modules(init, extract_fns(args), reporter=args.reporter)
    # Stage 9
    try:
        outfmt_orig = outfmt
        errfmt_orig = errfmt
        (outfmt, errfmt) = util.get_output_cfg(mods.cfg, name)
        if outfmt_orig != outfmt or errfmt_orig != errfmt:
            LOG.warn("Stdout, stderr changing to (%s, %s)", outfmt, errfmt)
            (outfmt, errfmt) = util.fixup_output(mods.cfg, name)
    except Exception:
        util.logexc(LOG, "Failed to re-adjust output redirection!")
    logging.setupLogging(mods.cfg)

    # Stage 10
    return (init.datasource, run_module_section(mods, name, name))
Exemplo n.º 41
0
def handle(_name, cfg, cloud, log, _args):

    # remove the static keys from the pristine image
    if cfg.get("ssh_deletekeys", True):
        key_pth = os.path.join("/etc/ssh/", "ssh_host_*key*")
        for f in glob.glob(key_pth):
            try:
                util.del_file(f)
            except:
                util.logexc(log, "Failed deleting key file %s", f)

    if "ssh_keys" in cfg:
        # if there are keys in cloud-config, use them
        for (key, val) in cfg["ssh_keys"].items():
            if key in KEY_2_FILE:
                tgt_fn = KEY_2_FILE[key][0]
                tgt_perms = KEY_2_FILE[key][1]
                util.write_file(tgt_fn, val, tgt_perms)

        for (priv, pub) in PRIV_2_PUB.items():
            if pub in cfg['ssh_keys'] or priv not in cfg['ssh_keys']:
                continue
            pair = (KEY_2_FILE[priv][0], KEY_2_FILE[pub][0])
            cmd = ['sh', '-xc', KEY_GEN_TPL % pair]
            try:
                # TODO(harlowja): Is this guard needed?
                with util.SeLinuxGuard("/etc/ssh", recursive=True):
                    util.subp(cmd, capture=False)
                log.debug("Generated a key for %s from %s", pair[0], pair[1])
            except:
                util.logexc(log, "Failed generated a key for %s from %s",
                            pair[0], pair[1])
    else:
        # if not, generate them
        genkeys = util.get_cfg_option_list(cfg,
                                           'ssh_genkeytypes',
                                           GENERATE_KEY_NAMES)
        for keytype in genkeys:
            keyfile = KEY_FILE_TPL % (keytype)
            util.ensure_dir(os.path.dirname(keyfile))
            if not os.path.exists(keyfile):
                cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile]
                try:
                    # TODO(harlowja): Is this guard needed?
                    with util.SeLinuxGuard("/etc/ssh", recursive=True):
                        util.subp(cmd, capture=False)
                except:
                    util.logexc(log, "Failed generating key type %s to "
                                "file %s", keytype, keyfile)

    try:
        (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
        (user, _user_config) = ds.extract_default(users)
        disable_root = util.get_cfg_option_bool(cfg, "disable_root", True)
        disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts",
                                                    DISABLE_ROOT_OPTS)

        keys = cloud.get_public_ssh_keys() or []
        if "ssh_authorized_keys" in cfg:
            cfgkeys = cfg["ssh_authorized_keys"]
            keys.extend(cfgkeys)

        apply_credentials(keys, user, disable_root, disable_root_opts)
    except:
        util.logexc(log, "Applying ssh credentials failed!")
Exemplo n.º 42
0
def handle(name, cfg, cloud, log, args):
    # Get config
    lxd_cfg = cfg.get('lxd')
    if not lxd_cfg:
        log.debug("Skipping module named %s, not present or disabled by cfg")
        return
    if not isinstance(lxd_cfg, dict):
        log.warn("lxd config must be a dictionary. found a '%s'",
                 type(lxd_cfg))
        return

    # Grab the configuration
    init_cfg = lxd_cfg.get('init')
    if not isinstance(init_cfg, dict):
        log.warn("lxd/init config must be a dictionary. found a '%s'",
                 type(init_cfg))
        init_cfg = {}

    bridge_cfg = lxd_cfg.get('bridge')
    if not isinstance(bridge_cfg, dict):
        log.warn("lxd/bridge config must be a dictionary. found a '%s'",
                 type(bridge_cfg))
        bridge_cfg = {}

    # Install the needed packages
    packages = []
    if not util.which("lxd"):
        packages.append('lxd')

    if init_cfg.get("storage_backend") == "zfs" and not util.which('zfs'):
        packages.append('zfs')

    if len(packages):
        try:
            cloud.distro.install_packages(packages)
        except util.ProcessExecutionError as exc:
            log.warn("failed to install packages %s: %s", packages, exc)
            return

    # Set up lxd if init config is given
    if init_cfg:
        init_keys = (
            'network_address', 'network_port', 'storage_backend',
            'storage_create_device', 'storage_create_loop',
            'storage_pool', 'trust_password')
        cmd = ['lxd', 'init', '--auto']
        for k in init_keys:
            if init_cfg.get(k):
                cmd.extend(["--%s=%s" %
                            (k.replace('_', '-'), str(init_cfg[k]))])
        util.subp(cmd)

    # Set up lxd-bridge if bridge config is given
    dconf_comm = "debconf-communicate"
    if bridge_cfg and util.which(dconf_comm):
        debconf = bridge_to_debconf(bridge_cfg)

        # Update debconf database
        try:
            log.debug("Setting lxd debconf via " + dconf_comm)
            data = "\n".join(["set %s %s" % (k, v)
                              for k, v in debconf.items()]) + "\n"
            util.subp(['debconf-communicate'], data)
        except Exception:
            util.logexc(log, "Failed to run '%s' for lxd with" % dconf_comm)

        # Remove the existing configuration file (forces re-generation)
        util.del_file("/etc/default/lxd-bridge")

        # Run reconfigure
        log.debug("Running dpkg-reconfigure for lxd")
        util.subp(['dpkg-reconfigure', 'lxd',
                   '--frontend=noninteractive'])
    elif bridge_cfg:
        raise RuntimeError(
            "Unable to configure lxd bridge without %s." + dconf_comm)
Exemplo n.º 43
0
def persist_as3_declaration(declaration):
    """Write the f5-appsvcs-extension declaration to file"""
    as3_declaration_dir_exists()
    if os.path.isfile(AS3_DECLARATION_FILE):
        util.del_file(AS3_DECLARATION_FILE)
    util.write_file(AS3_DECLARATION_FILE, json.dumps(declaration))
Exemplo n.º 44
0
def handle(name, cfg, cloud, log, args):
    # Get config
    lxd_cfg = cfg.get('lxd')
    if not lxd_cfg:
        log.debug("Skipping module named %s, not present or disabled by cfg")
        return
    if not isinstance(lxd_cfg, dict):
        log.warn("lxd config must be a dictionary. found a '%s'",
                 type(lxd_cfg))
        return

    # Grab the configuration
    init_cfg = lxd_cfg.get('init')
    if not isinstance(init_cfg, dict):
        log.warn("lxd/init config must be a dictionary. found a '%s'",
                 type(init_cfg))
        init_cfg = {}

    bridge_cfg = lxd_cfg.get('bridge')
    if not isinstance(bridge_cfg, dict):
        log.warn("lxd/bridge config must be a dictionary. found a '%s'",
                 type(bridge_cfg))
        bridge_cfg = {}

    # Install the needed packages
    packages = []
    if not util.which("lxd"):
        packages.append('lxd')

    if init_cfg.get("storage_backend") == "zfs" and not util.which('zfs'):
        packages.append('zfs')

    if len(packages):
        try:
            cloud.distro.install_packages(packages)
        except util.ProcessExecutionError as exc:
            log.warn("failed to install packages %s: %s", packages, exc)
            return

    # Set up lxd if init config is given
    if init_cfg:
        init_keys = ('network_address', 'network_port', 'storage_backend',
                     'storage_create_device', 'storage_create_loop',
                     'storage_pool', 'trust_password')
        cmd = ['lxd', 'init', '--auto']
        for k in init_keys:
            if init_cfg.get(k):
                cmd.extend(
                    ["--%s=%s" % (k.replace('_', '-'), str(init_cfg[k]))])
        util.subp(cmd)

    # Set up lxd-bridge if bridge config is given
    dconf_comm = "debconf-communicate"
    if bridge_cfg and util.which(dconf_comm):
        debconf = bridge_to_debconf(bridge_cfg)

        # Update debconf database
        try:
            log.debug("Setting lxd debconf via " + dconf_comm)
            data = "\n".join(
                ["set %s %s" % (k, v) for k, v in debconf.items()]) + "\n"
            util.subp(['debconf-communicate'], data)
        except Exception:
            util.logexc(log, "Failed to run '%s' for lxd with" % dconf_comm)

        # Remove the existing configuration file (forces re-generation)
        util.del_file("/etc/default/lxd-bridge")

        # Run reconfigure
        log.debug("Running dpkg-reconfigure for lxd")
        util.subp(['dpkg-reconfigure', 'lxd', '--frontend=noninteractive'])
    elif bridge_cfg:
        raise RuntimeError("Unable to configure lxd bridge without %s." +
                           dconf_comm)
Exemplo n.º 45
0
 def __exit__(self, exc_type, exc_value, traceback):
     self.handle.close()
     util.del_file(self.handle.name)
Exemplo n.º 46
0
def main_init(name, args):
    deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK]
    if args.local:
        deps = [sources.DEP_FILESYSTEM]

    if not args.local:
        # See doc/kernel-cmdline.txt
        #
        # This is used in maas datasource, in "ephemeral" (read-only root)
        # environment where the instance netboots to iscsi ro root.
        # and the entity that controls the pxe config has to configure
        # the maas datasource.
        #
        # Could be used elsewhere, only works on network based (not local).
        root_name = "%s.d" % (CLOUD_CONFIG)
        target_fn = os.path.join(root_name, "91_kernel_cmdline_url.cfg")
        util.read_write_cmdline_url(target_fn)

    # Cloud-init 'init' stage is broken up into the following sub-stages
    # 1. Ensure that the init object fetches its config without errors
    # 2. Setup logging/output redirections with resultant config (if any)
    # 3. Initialize the cloud-init filesystem
    # 4. Check if we can stop early by looking for various files
    # 5. Fetch the datasource
    # 6. Connect to the current instance location + update the cache
    # 7. Consume the userdata (handlers get activated here)
    # 8. Construct the modules object
    # 9. Adjust any subsequent logging/output redirections using the modules
    #    objects config as it may be different from init object
    # 10. Run the modules for the 'init' stage
    # 11. Done!
    if not args.local:
        w_msg = welcome_format(name)
    else:
        w_msg = welcome_format("%s-local" % (name))
    init = stages.Init(ds_deps=deps, reporter=args.reporter)
    # Stage 1
    init.read_cfg(extract_fns(args))
    # Stage 2
    outfmt = None
    errfmt = None
    try:
        LOG.debug("Closing stdin")
        util.close_stdin()
        (outfmt, errfmt) = util.fixup_output(init.cfg, name)
    except Exception:
        util.logexc(LOG, "Failed to setup output redirection!")
        print_exc("Failed to setup output redirection!")
    if args.debug:
        # Reset so that all the debug handlers are closed out
        LOG.debug(("Logging being reset, this logger may no"
                   " longer be active shortly"))
        logging.resetLogging()
    logging.setupLogging(init.cfg)
    apply_reporting_cfg(init.cfg)

    # Any log usage prior to setupLogging above did not have local user log
    # config applied.  We send the welcome message now, as stderr/out have
    # been redirected and log now configured.
    welcome(name, msg=w_msg)

    # Stage 3
    try:
        init.initialize()
    except Exception:
        util.logexc(LOG, "Failed to initialize, likely bad things to come!")
    # Stage 4
    path_helper = init.paths
    mode = sources.DSMODE_LOCAL if args.local else sources.DSMODE_NETWORK

    if mode == sources.DSMODE_NETWORK:
        existing = "trust"
        sys.stderr.write("%s\n" % (netinfo.debug_info()))
        LOG.debug(("Checking to see if files that we need already"
                   " exist from a previous run that would allow us"
                   " to stop early."))
        # no-net is written by upstart cloud-init-nonet when network failed
        # to come up
        stop_files = [
            os.path.join(path_helper.get_cpath("data"), "no-net"),
        ]
        existing_files = []
        for fn in stop_files:
            if os.path.isfile(fn):
                existing_files.append(fn)

        if existing_files:
            LOG.debug("[%s] Exiting. stop file %s existed",
                      mode, existing_files)
            return (None, [])
        else:
            LOG.debug("Execution continuing, no previous run detected that"
                      " would allow us to stop early.")
    else:
        existing = "check"
        if util.get_cfg_option_bool(init.cfg, 'manual_cache_clean', False):
            existing = "trust"

        init.purge_cache()
        # Delete the non-net file as well
        util.del_file(os.path.join(path_helper.get_cpath("data"), "no-net"))

    # Stage 5
    try:
        init.fetch(existing=existing)
        # if in network mode, and the datasource is local
        # then work was done at that stage.
        if mode == sources.DSMODE_NETWORK and init.datasource.dsmode != mode:
            LOG.debug("[%s] Exiting. datasource %s in local mode",
                      mode, init.datasource)
            return (None, [])
    except sources.DataSourceNotFoundException:
        # In the case of 'cloud-init init' without '--local' it is a bit
        # more likely that the user would consider it failure if nothing was
        # found. When using upstart it will also mentions job failure
        # in console log if exit code is != 0.
        if mode == sources.DSMODE_LOCAL:
            LOG.debug("No local datasource found")
        else:
            util.logexc(LOG, ("No instance datasource found!"
                              " Likely bad things to come!"))
        if not args.force:
            init.apply_network_config(bring_up=not args.local)
            LOG.debug("[%s] Exiting without datasource in local mode", mode)
            if mode == sources.DSMODE_LOCAL:
                return (None, [])
            else:
                return (None, ["No instance datasource found."])
        else:
            LOG.debug("[%s] barreling on in force mode without datasource",
                      mode)

    # Stage 6
    iid = init.instancify()
    LOG.debug("[%s] %s will now be targeting instance id: %s. new=%s",
              mode, name, iid, init.is_new_instance())

    init.apply_network_config(bring_up=bool(mode != sources.DSMODE_LOCAL))

    if mode == sources.DSMODE_LOCAL:
        if init.datasource.dsmode != mode:
            LOG.debug("[%s] Exiting. datasource %s not in local mode.",
                      mode, init.datasource)
            return (init.datasource, [])
        else:
            LOG.debug("[%s] %s is in local mode, will apply init modules now.",
                      mode, init.datasource)

    # update fully realizes user-data (pulling in #include if necessary)
    init.update()
    # Stage 7
    try:
        # Attempt to consume the data per instance.
        # This may run user-data handlers and/or perform
        # url downloads and such as needed.
        (ran, _results) = init.cloudify().run('consume_data',
                                              init.consume_data,
                                              args=[PER_INSTANCE],
                                              freq=PER_INSTANCE)
        if not ran:
            # Just consume anything that is set to run per-always
            # if nothing ran in the per-instance code
            #
            # See: https://bugs.launchpad.net/bugs/819507 for a little
            # reason behind this...
            init.consume_data(PER_ALWAYS)
    except Exception:
        util.logexc(LOG, "Consuming user data failed!")
        return (init.datasource, ["Consuming user data failed!"])

    apply_reporting_cfg(init.cfg)

    # Stage 8 - re-read and apply relevant cloud-config to include user-data
    mods = stages.Modules(init, extract_fns(args), reporter=args.reporter)
    # Stage 9
    try:
        outfmt_orig = outfmt
        errfmt_orig = errfmt
        (outfmt, errfmt) = util.get_output_cfg(mods.cfg, name)
        if outfmt_orig != outfmt or errfmt_orig != errfmt:
            LOG.warn("Stdout, stderr changing to (%s, %s)", outfmt, errfmt)
            (outfmt, errfmt) = util.fixup_output(mods.cfg, name)
    except Exception:
        util.logexc(LOG, "Failed to re-adjust output redirection!")
    logging.setupLogging(mods.cfg)

    # Stage 10
    return (init.datasource, run_module_section(mods, name, name))
Exemplo n.º 47
0
def clean():
    """Remove any onboarding artifacts"""
    if REMOVE_DHCP_LEASE_FILES:
        lease_files = os.listdir(DHCP_LEASE_DIR)
        for lease_file in lease_files:
            util.del_file("%s/%s" % (DHCP_LEASE_DIR, lease_file))
 def tearDown(self):
     apply_patches([i for i in reversed(self.unapply)])
     util.del_file(self._seed_file)
Exemplo n.º 49
0
 def tearDown(self):
     util.del_file(self._seed_file)
Exemplo n.º 50
0
def status_wrapper(name, args, data_d=None, link_d=None):
    if data_d is None:
        data_d = os.path.normpath("/var/lib/cloud/data")
    if link_d is None:
        link_d = os.path.normpath("/run/cloud-init")

    status_path = os.path.join(data_d, "status.json")
    status_link = os.path.join(link_d, "status.json")
    result_path = os.path.join(data_d, "result.json")
    result_link = os.path.join(link_d, "result.json")

    util.ensure_dirs((data_d, link_d,))

    (_name, functor) = args.action

    if name == "init":
        if args.local:
            mode = "init-local"
        else:
            mode = "init"
    elif name == "modules":
        mode = "modules-%s" % args.mode
    else:
        raise ValueError("unknown name: %s" % name)

    modes = ('init', 'init-local', 'modules-config', 'modules-final')

    status = None
    if mode == 'init-local':
        for f in (status_link, result_link, status_path, result_path):
            util.del_file(f)
    else:
        try:
            status = json.loads(util.load_file(status_path))
        except Exception:
            pass

    if status is None:
        nullstatus = {
            'errors': [],
            'start': None,
            'finished': None,
        }
        status = {'v1': {}}
        for m in modes:
            status['v1'][m] = nullstatus.copy()
        status['v1']['datasource'] = None

    v1 = status['v1']
    v1['stage'] = mode
    v1[mode]['start'] = time.time()

    atomic_write_json(status_path, status)
    util.sym_link(os.path.relpath(status_path, link_d), status_link,
                  force=True)

    try:
        ret = functor(name, args)
        if mode in ('init', 'init-local'):
            (datasource, errors) = ret
            if datasource is not None:
                v1['datasource'] = str(datasource)
        else:
            errors = ret

        v1[mode]['errors'] = [str(e) for e in errors]

    except Exception as e:
        util.logexc(LOG, "failed stage %s", mode)
        print_exc("failed run of stage %s" % mode)
        v1[mode]['errors'] = [str(e)]

    v1[mode]['finished'] = time.time()
    v1['stage'] = None

    atomic_write_json(status_path, status)

    if mode == "modules-final":
        # write the 'finished' file
        errors = []
        for m in modes:
            if v1[m]['errors']:
                errors.extend(v1[m].get('errors', []))

        atomic_write_json(result_path,
                          {'v1': {'datasource': v1['datasource'],
                                  'errors': errors}})
        util.sym_link(os.path.relpath(result_path, link_d), result_link,
                      force=True)

    return len(v1[mode]['errors'])
Exemplo n.º 51
0
 def __exit__(self, exc_type, exc_value, traceback):
     self.handle.close()
     util.del_file(self.handle.name)
Exemplo n.º 52
0
    def get_data(self):
        # azure removes/ejects the cdrom containing the ovf-env.xml
        # file on reboot.  So, in order to successfully reboot we
        # need to look in the datadir and consider that valid
        ddir = self.ds_cfg['data_dir']

        candidates = [self.seed_dir]
        candidates.extend(list_possible_azure_ds_devs())
        if ddir:
            candidates.append(ddir)

        found = None

        for cdev in candidates:
            try:
                if cdev.startswith("/dev/"):
                    ret = util.mount_cb(cdev, load_azure_ds_dir)
                else:
                    ret = load_azure_ds_dir(cdev)

            except NonAzureDataSource:
                continue
            except BrokenAzureDataSource as exc:
                raise exc
            except util.MountFailedError:
                LOG.warn("%s was not mountable", cdev)
                continue

            (md, self.userdata_raw, cfg, files) = ret
            self.seed = cdev
            self.metadata = util.mergemanydict([md, IB_DEFAULT_METADATA])
            self.cfg = util.mergemanydict([cfg, IB_BUILTIN_CLOUD_CONFIG])
            found = cdev

            LOG.debug("found datasource in %s", cdev)
            break

        if not found:
            return False

        if found == ddir:
            LOG.debug("using files cached in %s", ddir)

        # azure / hyper-v provides random data here
        seed = util.load_file("/sys/firmware/acpi/tables/OEM0", quiet=True)
        if seed:
            self.metadata['random_seed'] = seed

        # now update ds_cfg to reflect contents pass in config
        user_ds_cfg = util.get_cfg_by_path(self.cfg, IB_DS_CFG_PATH, {})
        self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])
        mycfg = self.ds_cfg
        ddir = mycfg['data_dir']

        if found != ddir:
            cached_ovfenv = util.load_file(os.path.join(ddir, 'ovf-env.xml'),
                                           quiet=True)
            if cached_ovfenv != files['ovf-env.xml']:
                # source was not walinux-agent's datadir, so we have to clean
                # up so 'wait_for_files' doesn't return early due to stale data
                cleaned = []
                for f in [
                        os.path.join(ddir, f) for f in IB_DATA_DIR_CLEAN_LIST
                ]:
                    if os.path.exists(f):
                        util.del_file(f)
                        cleaned.append(f)
                if cleaned:
                    LOG.info("removed stale file(s) in '%s': %s", ddir,
                             str(cleaned))

        # walinux agent writes files world readable, but expects
        # the directory to be protected.
        write_files(ddir, files, dirmode=0700)

        # handle the hostname 'publishing'
        try:
            handle_set_hostname(mycfg.get('set_hostname'),
                                self.metadata.get('local-hostname'),
                                mycfg['hostname_bounce'])
        except Exception as e:
            LOG.warn("Failed publishing hostname: %s", e)
            util.logexc(LOG, "handling set_hostname failed")

        try:
            invoke_agent(mycfg['agent_command'])
        except util.ProcessExecutionError:
            # claim the datasource even if the command failed
            util.logexc(LOG, "agent command '%s' failed.",
                        mycfg['agent_command'])

        # code is commented out because we have no WAAgent and
        #   we do not use SSH keys. Thus, no files to wait for.

        # shcfgxml = os.path.join(ddir, "SharedConfig.xml")
        # wait_for = [shcfgxml]

        # fp_files = []
        # for pk in self.cfg.get('_pubkeys', []):
        #     bname = str(pk['fingerprint'] + ".crt")
        #     fp_files += [os.path.join(ddir, bname)]

        # missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
        #                         func=wait_for_files,
        #                         args=(wait_for + fp_files,))
        # if len(missing):
        #     LOG.warn("Did not find files, but going on: %s", missing)

        # if shcfgxml in missing:
        #     LOG.warn("SharedConfig.xml missing, using static instance-id")
        # else:
        #     try:
        #         self.metadata['instance-id'] = \
        #             iid_from_shared_config(shcfgxml)
        #     except ValueError as e:
        #         LOG.warn("failed to get instance id in %s: %s", shcfgxml, e)

        # pubkeys = pubkeys_from_crt_files(fp_files)
        # self.metadata['public-keys'] = pubkeys

        found_ephemeral = find_ephemeral_disk()
        if found_ephemeral:
            self.ds_cfg['disk_aliases']['ephemeral0'] = found_ephemeral
            LOG.debug("using detected ephemeral0 of %s", found_ephemeral)

        cc_modules_override = support_new_ephemeral(self.sys_cfg)
        if cc_modules_override:
            self.cfg['cloud_config_modules'] = cc_modules_override

        return True
Exemplo n.º 53
0
def persist_ts_declaration(declaration):
    """Write the f5-telemetry-streaming declaration to file"""
    ts_declaration_dir_exists()
    if os.path.isfile(TS_DECLARATION_FILE):
        util.del_file(TS_DECLARATION_FILE)
    util.write_file(TS_DECLARATION_FILE, json.dumps(declaration))
Exemplo n.º 54
0
    def get_data(self):
        # azure removes/ejects the cdrom containing the ovf-env.xml
        # file on reboot.  So, in order to successfully reboot we
        # need to look in the datadir and consider that valid
        ddir = self.ds_cfg['data_dir']

        candidates = [self.seed_dir]
        candidates.extend(list_possible_azure_ds_devs())
        if ddir:
            candidates.append(ddir)

        found = None

        for cdev in candidates:
            try:
                if cdev.startswith("/dev/"):
                    ret = util.mount_cb(cdev, load_azure_ds_dir)
                else:
                    ret = load_azure_ds_dir(cdev)

            except NonAzureDataSource:
                continue
            except BrokenAzureDataSource as exc:
                raise exc
            except util.MountFailedError:
                LOG.warn("%s was not mountable", cdev)
                continue

            (md, self.userdata_raw, cfg, files) = ret
            self.seed = cdev
            self.metadata = util.mergemanydict([md, DEFAULT_METADATA])
            self.cfg = util.mergemanydict([cfg, BUILTIN_CLOUD_CONFIG])
            found = cdev

            LOG.debug("found datasource in %s", cdev)
            break

        if not found:
            return False

        if found == ddir:
            LOG.debug("using files cached in %s", ddir)

        # azure / hyper-v provides random data here
        seed = util.load_file("/sys/firmware/acpi/tables/OEM0",
                              quiet=True, decode=False)
        if seed:
            self.metadata['random_seed'] = seed

        # now update ds_cfg to reflect contents pass in config
        user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
        self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])
        mycfg = self.ds_cfg
        ddir = mycfg['data_dir']

        if found != ddir:
            cached_ovfenv = util.load_file(
                os.path.join(ddir, 'ovf-env.xml'), quiet=True, decode=False)
            if cached_ovfenv != files['ovf-env.xml']:
                # source was not walinux-agent's datadir, so we have to clean
                # up so 'wait_for_files' doesn't return early due to stale data
                cleaned = []
                for f in [os.path.join(ddir, f) for f in DATA_DIR_CLEAN_LIST]:
                    if os.path.exists(f):
                        util.del_file(f)
                        cleaned.append(f)
                if cleaned:
                    LOG.info("removed stale file(s) in '%s': %s",
                             ddir, str(cleaned))

        # walinux agent writes files world readable, but expects
        # the directory to be protected.
        write_files(ddir, files, dirmode=0o700)

        # handle the hostname 'publishing'
        try:
            handle_set_hostname(mycfg.get('set_hostname'),
                                self.metadata.get('local-hostname'),
                                mycfg['hostname_bounce'])
        except Exception as e:
            LOG.warn("Failed publishing hostname: %s", e)
            util.logexc(LOG, "handling set_hostname failed")

        try:
            invoke_agent(mycfg['agent_command'])
        except util.ProcessExecutionError:
            # claim the datasource even if the command failed
            util.logexc(LOG, "agent command '%s' failed.",
                        mycfg['agent_command'])

        shcfgxml = os.path.join(ddir, "SharedConfig.xml")
        wait_for = [shcfgxml]

        fp_files = []
        for pk in self.cfg.get('_pubkeys', []):
            bname = str(pk['fingerprint'] + ".crt")
            fp_files += [os.path.join(ddir, bname)]

        missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
                                func=wait_for_files,
                                args=(wait_for + fp_files,))
        if len(missing):
            LOG.warn("Did not find files, but going on: %s", missing)

        if shcfgxml in missing:
            LOG.warn("SharedConfig.xml missing, using static instance-id")
        else:
            try:
                self.metadata['instance-id'] = iid_from_shared_config(shcfgxml)
            except ValueError as e:
                LOG.warn("failed to get instance id in %s: %s", shcfgxml, e)

        pubkeys = pubkeys_from_crt_files(fp_files)
        self.metadata['public-keys'] = pubkeys

        found_ephemeral = find_ephemeral_disk()
        if found_ephemeral:
            self.ds_cfg['disk_aliases']['ephemeral0'] = found_ephemeral
            LOG.debug("using detected ephemeral0 of %s", found_ephemeral)

        cc_modules_override = support_new_ephemeral(self.sys_cfg)
        if cc_modules_override:
            self.cfg['cloud_config_modules'] = cc_modules_override

        return True
Exemplo n.º 55
0
def handle(_name, cfg, cloud, log, _args):

    # remove the static keys from the pristine image
    if cfg.get("ssh_deletekeys", True):
        key_pth = os.path.join("/etc/ssh/", "ssh_host_*key*")
        for f in glob.glob(key_pth):
            try:
                util.del_file(f)
            except:
                util.logexc(log, "Failed deleting key file %s", f)

    if "ssh_keys" in cfg:
        # if there are keys in cloud-config, use them
        for (key, val) in cfg["ssh_keys"].items():
            if key in CONFIG_KEY_TO_FILE:
                tgt_fn = CONFIG_KEY_TO_FILE[key][0]
                tgt_perms = CONFIG_KEY_TO_FILE[key][1]
                util.write_file(tgt_fn, val, tgt_perms)

        for (priv, pub) in PRIV_TO_PUB.items():
            if pub in cfg['ssh_keys'] or priv not in cfg['ssh_keys']:
                continue
            pair = (CONFIG_KEY_TO_FILE[priv][0], CONFIG_KEY_TO_FILE[pub][0])
            cmd = ['sh', '-xc', KEY_GEN_TPL % pair]
            try:
                # TODO(harlowja): Is this guard needed?
                with util.SeLinuxGuard("/etc/ssh", recursive=True):
                    util.subp(cmd, capture=False)
                log.debug("Generated a key for %s from %s", pair[0], pair[1])
            except:
                util.logexc(log, "Failed generated a key for %s from %s",
                            pair[0], pair[1])
    else:
        # if not, generate them
        genkeys = util.get_cfg_option_list(cfg,
                                           'ssh_genkeytypes',
                                           GENERATE_KEY_NAMES)
        lang_c = os.environ.copy()
        lang_c['LANG'] = 'C'
        for keytype in genkeys:
            keyfile = KEY_FILE_TPL % (keytype)
            if os.path.exists(keyfile):
                continue
            util.ensure_dir(os.path.dirname(keyfile))
            cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile]

            # TODO(harlowja): Is this guard needed?
            with util.SeLinuxGuard("/etc/ssh", recursive=True):
                try:
                    out, err = util.subp(cmd, capture=True, env=lang_c)
                    sys.stdout.write(util.decode_binary(out))
                except util.ProcessExecutionError as e:
                    err = util.decode_binary(e.stderr).lower()
                    if (e.exit_code == 1 and
                            err.lower().startswith("unknown key")):
                        log.debug("ssh-keygen: unknown key type '%s'", keytype)
                    else:
                        util.logexc(log, "Failed generating key type %s to "
                                    "file %s", keytype, keyfile)

    try:
        (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
        (user, _user_config) = ds.extract_default(users)
        disable_root = util.get_cfg_option_bool(cfg, "disable_root", True)
        disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts",
                                                    DISABLE_ROOT_OPTS)

        keys = cloud.get_public_ssh_keys() or []
        if "ssh_authorized_keys" in cfg:
            cfgkeys = cfg["ssh_authorized_keys"]
            keys.extend(cfgkeys)

        apply_credentials(keys, user, disable_root, disable_root_opts)
    except:
        util.logexc(log, "Applying ssh credentials failed!")
Exemplo n.º 56
0
    def get_data(self):
        # azure removes/ejects the cdrom containing the ovf-env.xml
        # file on reboot.  So, in order to successfully reboot we
        # need to look in the datadir and consider that valid
        ddir = self.ds_cfg['data_dir']

        candidates = [self.seed_dir]
        candidates.extend(list_possible_azure_ds_devs())
        if ddir:
            candidates.append(ddir)

        found = None

        for cdev in candidates:
            try:
                if cdev.startswith("/dev/"):
                    ret = util.mount_cb(cdev, load_azure_ds_dir)
                else:
                    ret = load_azure_ds_dir(cdev)

            except NonAzureDataSource:
                continue
            except BrokenAzureDataSource as exc:
                raise exc
            except util.MountFailedError:
                LOG.warn("%s was not mountable", cdev)
                continue

            (md, self.userdata_raw, cfg, files) = ret
            self.seed = cdev
            self.metadata = util.mergemanydict([md, DEFAULT_METADATA])
            self.cfg = util.mergemanydict([cfg, BUILTIN_CLOUD_CONFIG])
            found = cdev

            LOG.debug("found datasource in %s", cdev)
            break

        if not found:
            return False

        if found == ddir:
            LOG.debug("using files cached in %s", ddir)

        # azure / hyper-v provides random data here
        seed = util.load_file("/sys/firmware/acpi/tables/OEM0",
                              quiet=True, decode=False)
        if seed:
            self.metadata['random_seed'] = seed

        # now update ds_cfg to reflect contents pass in config
        user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
        self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])

        if found != ddir:
            cached_ovfenv = util.load_file(
                os.path.join(ddir, 'ovf-env.xml'), quiet=True, decode=False)
            if cached_ovfenv != files['ovf-env.xml']:
                # source was not walinux-agent's datadir, so we have to clean
                # up so 'wait_for_files' doesn't return early due to stale data
                cleaned = []
                for f in [os.path.join(ddir, f) for f in DATA_DIR_CLEAN_LIST]:
                    if os.path.exists(f):
                        util.del_file(f)
                        cleaned.append(f)
                if cleaned:
                    LOG.info("removed stale file(s) in '%s': %s",
                             ddir, str(cleaned))

        # walinux agent writes files world readable, but expects
        # the directory to be protected.
        write_files(ddir, files, dirmode=0o700)

        if self.ds_cfg['agent_command'] == '__builtin__':
            metadata_func = get_metadata_from_fabric
        else:
            metadata_func = self.get_metadata_from_agent
        try:
            fabric_data = metadata_func()
        except Exception as exc:
            LOG.info("Error communicating with Azure fabric; assume we aren't"
                     " on Azure.", exc_info=True)
            return False

        self.metadata.update(fabric_data)

        found_ephemeral = find_fabric_formatted_ephemeral_disk()
        if found_ephemeral:
            self.ds_cfg['disk_aliases']['ephemeral0'] = found_ephemeral
            LOG.debug("using detected ephemeral0 of %s", found_ephemeral)

        cc_modules_override = support_new_ephemeral(self.sys_cfg)
        if cc_modules_override:
            self.cfg['cloud_config_modules'] = cc_modules_override

        return True
Exemplo n.º 57
0
def handle(_name, cfg, cloud, log, _args):

    # remove the static keys from the pristine image
    if cfg.get("ssh_deletekeys", True):
        key_pth = os.path.join("/etc/ssh/", "ssh_host_*key*")
        for f in glob.glob(key_pth):
            try:
                util.del_file(f)
            except Exception:
                util.logexc(log, "Failed deleting key file %s", f)

    if "ssh_keys" in cfg:
        # if there are keys in cloud-config, use them
        for (key, val) in cfg["ssh_keys"].items():
            if key in CONFIG_KEY_TO_FILE:
                tgt_fn = CONFIG_KEY_TO_FILE[key][0]
                tgt_perms = CONFIG_KEY_TO_FILE[key][1]
                util.write_file(tgt_fn, val, tgt_perms)

        for (priv, pub) in PRIV_TO_PUB.items():
            if pub in cfg['ssh_keys'] or priv not in cfg['ssh_keys']:
                continue
            pair = (CONFIG_KEY_TO_FILE[priv][0], CONFIG_KEY_TO_FILE[pub][0])
            cmd = ['sh', '-xc', KEY_GEN_TPL % pair]
            try:
                # TODO(harlowja): Is this guard needed?
                with util.SeLinuxGuard("/etc/ssh", recursive=True):
                    subp.subp(cmd, capture=False)
                log.debug("Generated a key for %s from %s", pair[0], pair[1])
            except Exception:
                util.logexc(log, "Failed generated a key for %s from %s",
                            pair[0], pair[1])
    else:
        # if not, generate them
        genkeys = util.get_cfg_option_list(cfg, 'ssh_genkeytypes',
                                           GENERATE_KEY_NAMES)
        lang_c = os.environ.copy()
        lang_c['LANG'] = 'C'
        for keytype in genkeys:
            keyfile = KEY_FILE_TPL % (keytype)
            if os.path.exists(keyfile):
                continue
            util.ensure_dir(os.path.dirname(keyfile))
            cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile]

            # TODO(harlowja): Is this guard needed?
            with util.SeLinuxGuard("/etc/ssh", recursive=True):
                try:
                    out, err = subp.subp(cmd, capture=True, env=lang_c)
                    sys.stdout.write(util.decode_binary(out))
                except subp.ProcessExecutionError as e:
                    err = util.decode_binary(e.stderr).lower()
                    if (e.exit_code == 1
                            and err.lower().startswith("unknown key")):
                        log.debug("ssh-keygen: unknown key type '%s'", keytype)
                    else:
                        util.logexc(
                            log, "Failed generating key type %s to "
                            "file %s", keytype, keyfile)

    if "ssh_publish_hostkeys" in cfg:
        host_key_blacklist = util.get_cfg_option_list(
            cfg["ssh_publish_hostkeys"], "blacklist",
            HOST_KEY_PUBLISH_BLACKLIST)
        publish_hostkeys = util.get_cfg_option_bool(
            cfg["ssh_publish_hostkeys"], "enabled", PUBLISH_HOST_KEYS)
    else:
        host_key_blacklist = HOST_KEY_PUBLISH_BLACKLIST
        publish_hostkeys = PUBLISH_HOST_KEYS

    if publish_hostkeys:
        hostkeys = get_public_host_keys(blacklist=host_key_blacklist)
        try:
            cloud.datasource.publish_host_keys(hostkeys)
        except Exception:
            util.logexc(log, "Publishing host keys failed!")

    try:
        (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
        (user, _user_config) = ug_util.extract_default(users)
        disable_root = util.get_cfg_option_bool(cfg, "disable_root", True)
        disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts",
                                                    ssh_util.DISABLE_USER_OPTS)

        keys = []
        if util.get_cfg_option_bool(cfg, 'allow_public_ssh_keys', True):
            keys = cloud.get_public_ssh_keys() or []
        else:
            log.debug('Skipping import of publish SSH keys per '
                      'config setting: allow_public_ssh_keys=False')

        if "ssh_authorized_keys" in cfg:
            cfgkeys = cfg["ssh_authorized_keys"]
            keys.extend(cfgkeys)

        apply_credentials(keys, user, disable_root, disable_root_opts)
    except Exception:
        util.logexc(log, "Applying SSH credentials failed!")
Exemplo n.º 58
0
def handle(_name, cfg, cloud, log, _args):

    # remove the static keys from the pristine image
    if cfg.get("ssh_deletekeys", True):
        key_pth = os.path.join("/etc/ssh/", "ssh_host_*key*")
        for f in glob.glob(key_pth):
            try:
                util.del_file(f)
            except:
                util.logexc(log, "Failed deleting key file %s", f)

    if "ssh_keys" in cfg:
        # if there are keys in cloud-config, use them
        for (key, val) in cfg["ssh_keys"].iteritems():
            if key in KEY_2_FILE:
                tgt_fn = KEY_2_FILE[key][0]
                tgt_perms = KEY_2_FILE[key][1]
                util.write_file(tgt_fn, val, tgt_perms)

        for (priv, pub) in PRIV_2_PUB.iteritems():
            if pub in cfg['ssh_keys'] or not priv in cfg['ssh_keys']:
                continue
            pair = (KEY_2_FILE[priv][0], KEY_2_FILE[pub][0])
            cmd = ['sh', '-xc', KEY_GEN_TPL % pair]
            try:
                # TODO(harlowja): Is this guard needed?
                with util.SeLinuxGuard("/etc/ssh", recursive=True):
                    util.subp(cmd, capture=False)
                log.debug("Generated a key for %s from %s", pair[0], pair[1])
            except:
                util.logexc(log, "Failed generated a key for %s from %s",
                            pair[0], pair[1])
    else:
        # if not, generate them
        genkeys = util.get_cfg_option_list(cfg,
                                           'ssh_genkeytypes',
                                           GENERATE_KEY_NAMES)
        for keytype in genkeys:
            keyfile = KEY_FILE_TPL % (keytype)
            util.ensure_dir(os.path.dirname(keyfile))
            if not os.path.exists(keyfile):
                cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile]
                try:
                    # TODO(harlowja): Is this guard needed?
                    with util.SeLinuxGuard("/etc/ssh", recursive=True):
                        util.subp(cmd, capture=False)
                except:
                    util.logexc(log, "Failed generating key type %s to "
                                "file %s", keytype, keyfile)

    try:
        (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
        (user, _user_config) = ds.extract_default(users)
        disable_root = util.get_cfg_option_bool(cfg, "disable_root", True)
        disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts",
                                                    DISABLE_ROOT_OPTS)

        keys = cloud.get_public_ssh_keys() or []
        if "ssh_authorized_keys" in cfg:
            cfgkeys = cfg["ssh_authorized_keys"]
            keys.extend(cfgkeys)

        apply_credentials(keys, user, disable_root, disable_root_opts)
    except:
        util.logexc(log, "Applying ssh credentials failed!")