Example #1
0
def install_chef(cloud, chef_cfg, log):
    # If chef is not installed, we install chef based on 'install_type'
    install_type = util.get_cfg_option_str(chef_cfg, 'install_type',
                                           'packages')
    run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False)
    if install_type == "gems":
        # This will install and run the chef-client from gems
        chef_version = util.get_cfg_option_str(chef_cfg, 'version', None)
        ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version',
                                               RUBY_VERSION_DEFAULT)
        install_chef_from_gems(ruby_version, chef_version, cloud.distro)
        # Retain backwards compat, by preferring True instead of False
        # when not provided/overriden...
        run = util.get_cfg_option_bool(chef_cfg, 'exec', default=True)
    elif install_type == 'packages':
        # This will install and run the chef-client from packages
        cloud.distro.install_packages(('chef', ))
    elif install_type == 'omnibus':
        omnibus_version = util.get_cfg_option_str(chef_cfg, "omnibus_version")
        install_chef_from_omnibus(
            url=util.get_cfg_option_str(chef_cfg, "omnibus_url"),
            retries=util.get_cfg_option_int(chef_cfg, "omnibus_url_retries"),
            omnibus_version=omnibus_version)
    else:
        log.warn("Unknown chef install type '%s'", install_type)
        run = False
    return run
Example #2
0
def handle(name, cfg, cloud, log, _args):
    if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
        log.debug(
            "Configuration option 'preserve_hostname' is set,"
            " not updating the hostname in module %s",
            name,
        )
        return

    # Set prefer_fqdn_over_hostname value in distro
    hostname_fqdn = util.get_cfg_option_bool(cfg, "prefer_fqdn_over_hostname",
                                             None)
    if hostname_fqdn is not None:
        cloud.distro.set_option("prefer_fqdn_over_hostname", hostname_fqdn)

    (hostname, fqdn, is_default) = util.get_hostname_fqdn(cfg, cloud)
    if is_default and hostname == "localhost":
        # https://github.com/systemd/systemd/commit/d39079fcaa05e23540d2b1f0270fa31c22a7e9f1
        log.debug("Hostname is localhost. Let other services handle this.")
        return

    try:
        prev_fn = os.path.join(cloud.get_cpath("data"), "previous-hostname")
        log.debug("Updating hostname to %s (%s)", fqdn, hostname)
        cloud.distro.update_hostname(hostname, fqdn, prev_fn)
    except Exception:
        util.logexc(log, "Failed to update the hostname to %s (%s)", fqdn,
                    hostname)
        raise
Example #3
0
def handle(name, cfg, cloud, log, _args):
    if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
        log.debug(
            "Configuration option 'preserve_hostname' is set,"
            " not updating the hostname in module %s",
            name,
        )
        return

    # Set prefer_fqdn_over_hostname value in distro
    hostname_fqdn = util.get_cfg_option_bool(
        cfg, "prefer_fqdn_over_hostname", None
    )
    if hostname_fqdn is not None:
        cloud.distro.set_option("prefer_fqdn_over_hostname", hostname_fqdn)

    (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
    try:
        prev_fn = os.path.join(cloud.get_cpath("data"), "previous-hostname")
        log.debug("Updating hostname to %s (%s)", fqdn, hostname)
        cloud.distro.update_hostname(hostname, fqdn, prev_fn)
    except Exception:
        util.logexc(
            log, "Failed to update the hostname to %s (%s)", fqdn, hostname
        )
        raise
Example #4
0
def install_chef(cloud, chef_cfg, log):
    # If chef is not installed, we install chef based on 'install_type'
    install_type = util.get_cfg_option_str(chef_cfg, 'install_type',
                                           'packages')
    run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False)
    if install_type == "gems":
        # This will install and run the chef-client from gems
        chef_version = util.get_cfg_option_str(chef_cfg, 'version', None)
        ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version',
                                               RUBY_VERSION_DEFAULT)
        install_chef_from_gems(ruby_version, chef_version, cloud.distro)
        # Retain backwards compat, by preferring True instead of False
        # when not provided/overriden...
        run = util.get_cfg_option_bool(chef_cfg, 'exec', default=True)
    elif install_type == 'packages':
        # This will install and run the chef-client from packages
        cloud.distro.install_packages(('chef',))
    elif install_type == 'omnibus':
        omnibus_version = util.get_cfg_option_str(chef_cfg, "omnibus_version")
        install_chef_from_omnibus(
            url=util.get_cfg_option_str(chef_cfg, "omnibus_url"),
            retries=util.get_cfg_option_int(chef_cfg, "omnibus_url_retries"),
            omnibus_version=omnibus_version)
    else:
        log.warn("Unknown chef install type '%s'", install_type)
        run = False
    return run
Example #5
0
def install_chef(cloud, chef_cfg, log):
    # If chef is not installed, we install chef based on 'install_type'
    install_type = util.get_cfg_option_str(chef_cfg, 'install_type',
                                           'packages')
    run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False)
    if install_type == "gems":
        # This will install and run the chef-client from gems
        chef_version = util.get_cfg_option_str(chef_cfg, 'version', None)
        ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version',
                                               RUBY_VERSION_DEFAULT)
        install_chef_from_gems(cloud.distro, ruby_version, chef_version)
        # Retain backwards compat, by preferring True instead of False
        # when not provided/overriden...
        run = util.get_cfg_option_bool(chef_cfg, 'exec', default=True)
    elif install_type == 'packages':
        # This will install and run the chef-client from packages
        cloud.distro.install_packages(('chef',))
    elif install_type == 'omnibus':
        # This will install as a omnibus unified package
        url = util.get_cfg_option_str(chef_cfg, "omnibus_url", OMNIBUS_URL)
        retries = max(0, util.get_cfg_option_int(chef_cfg,
                                                 "omnibus_url_retries",
                                                 default=OMNIBUS_URL_RETRIES))
        content = url_helper.readurl(url=url, retries=retries)
        with util.tempdir() as tmpd:
            # Use tmpdir over tmpfile to avoid 'text file busy' on execute
            tmpf = "%s/chef-omnibus-install" % tmpd
            util.write_file(tmpf, str(content), mode=0700)
            util.subp([tmpf], capture=False)
    else:
        log.warn("Unknown chef install type '%s'", install_type)
        run = False
    return run
Example #6
0
def install_chef(cloud, chef_cfg, log):
    # If chef is not installed, we install chef based on 'install_type'
    install_type = util.get_cfg_option_str(chef_cfg, 'install_type',
                                           'packages')
    run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False)
    if install_type == "gems":
        # This will install and run the chef-client from gems
        chef_version = util.get_cfg_option_str(chef_cfg, 'version', None)
        ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version',
                                               RUBY_VERSION_DEFAULT)
        install_chef_from_gems(ruby_version, chef_version, cloud.distro)
        # Retain backwards compat, by preferring True instead of False
        # when not provided/overriden...
        run = util.get_cfg_option_bool(chef_cfg, 'exec', default=True)
    elif install_type == 'packages':
        # This will install and run the chef-client from packages
        cloud.distro.install_packages(('chef', ))
    elif install_type == 'omnibus':
        # This will install as a omnibus unified package
        url = util.get_cfg_option_str(chef_cfg, "omnibus_url", OMNIBUS_URL)
        retries = max(
            0,
            util.get_cfg_option_int(chef_cfg,
                                    "omnibus_url_retries",
                                    default=OMNIBUS_URL_RETRIES))
        content = url_helper.readurl(url=url, retries=retries)
        with util.tempdir() as tmpd:
            # Use tmpdir over tmpfile to avoid 'text file busy' on execute
            tmpf = "%s/chef-omnibus-install" % tmpd
            util.write_file(tmpf, content, mode=0o700)
            util.subp([tmpf], capture=False)
    else:
        log.warn("Unknown chef install type '%s'", install_type)
        run = False
    return run
def handle(name,cfg,cloud,log,args):
    update = util.get_cfg_option_bool(cfg, 'repo_update', False)
    # map the various possible upgrade level choices to well known ones
    upgrade_val = util.get_cfg_option_str(cfg, 'repo_upgrade')
    upgrade = util.UPGRADE_NONE
    if upgrade_val:
        if upgrade_val.lower() in [ 'security', 'critical' ]:
            upgrade = util.UPGRADE_SECURITY
        elif upgrade_val.lower() in [ 'fixes', 'bugs', 'bugfix', 'bugfixes' ]:
            upgrade = util.UPGRADE_BUGFIX
        elif upgrade_val.lower() in [ 'true', '1', 'on', 'yes', 'all' ]:
            upgrade = util.UPGRADE_ALL

    dist = cloudinit.DistAction.DistAction("/etc/cloud/dist-defs.cfg")
    
    if not util.get_cfg_option_bool(cfg, 'repo_preserve', True):
        repo_cfg = dist.get_config_section('repo')

        if cfg.has_key("repo_mirror"):
            repo_cfg['mirror'] = cfg["repo_mirror"]
        else:
            # May build mirror from availabity zone information:
            availability_zone = cloud.datasource.get_availability_zone()
            repo_cfg['ec2_az'] = availability_zone[:-1] 
        log.debug("Generating default repo files");
        dist.repo_generate(repo_cfg)

        # Make this part of repo_generate??  (TODO)
        #old_mir = util.get_cfg_option_str(cfg,'repo_old_mirror', \
        #                                  mirror)
        #rename_repo(old_mir, mirror)

    # Equivalent to 'apt_sources': add a new package repository
    if cfg.has_key('repo_additions'):
        log.debug("Adding repo files from config");
        errors = dist.repo_add(cfg['repo_additions'])
        for e in errors:
            log.warn("Source Error: %s\n" % ':'.join(e))

    pkglist = []
    if 'packages' in cfg:
        if isinstance(cfg['packages'],list):
            pkglist = cfg['packages']
        else: pkglist.append(cfg['packages'])

    if update or upgrade or pkglist:
        log.debug("Running update on repo");
        dist.repo_update()

    if upgrade:
        log.debug("Running upgrade on repo");
        dist.repo_upgrade(upgrade)

    if pkglist:
        log.debug("Installing packages from repo");
        dist.repo_install(pkglist)

    return(True)
def handle(name, cfg, _cloud, log, _args):
    """
    Handler for resolv.conf

    @param name: The module name "resolv-conf" from cloud.cfg
    @param cfg: A nested dict containing the entire cloud config contents.
    @param cloud: The L{CloudInit} object in use.
    @param log: Pre-initialized Python logger object to use for logging.
    @param args: Any module arguments from cloud.cfg
    """
    if "manage_resolv_conf" not in cfg:
        log.debug(("Skipping module named %s,"
                   " no 'manage_resolv_conf' key in configuration"), name)
        return

    if not util.get_cfg_option_bool(cfg, "manage_resolv_conf", False):
        log.debug(("Skipping module named %s,"
                   " 'manage_resolv_conf' present but set to False"), name)
        return

    if not "resolv_conf" in cfg:
        log.warn("manage_resolv_conf True but no parameters provided!")

    generate_resolv_conf(_cloud, log, cfg["resolv_conf"])
    return
def handle(name, cfg, _cloud, log, _args):
    disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False)
    if disabled:
        util.subp(REJECT_CMD, capture=False)
    else:
        log.debug(("Skipping module named %s,"
                   " disabling the ec2 route not enabled"), name)
Example #10
0
def handle(name, cfg, cloud, log, _args):
    """
    Handler for resolv.conf

    @param name: The module name "resolv-conf" from cloud.cfg
    @param cfg: A nested dict containing the entire cloud config contents.
    @param cloud: The L{CloudInit} object in use.
    @param log: Pre-initialized Python logger object to use for logging.
    @param args: Any module arguments from cloud.cfg
    """
    if "manage_resolv_conf" not in cfg:
        log.debug(("Skipping module named %s,"
                   " no 'manage_resolv_conf' key in configuration"), name)
        return

    if not util.get_cfg_option_bool(cfg, "manage_resolv_conf", False):
        log.debug(("Skipping module named %s,"
                   " 'manage_resolv_conf' present but set to False"), name)
        return

    if "resolv_conf" not in cfg:
        log.warn("manage_resolv_conf True but no parameters provided!")

    template_fn = cloud.get_template_filename('resolv.conf')
    if not template_fn:
        log.warn("No template found, not rendering /etc/resolv.conf")
        return

    generate_resolv_conf(template_fn=template_fn, params=cfg["resolv_conf"])
    return
Example #11
0
def handle(name, cfg, cloud, log, _args):
    if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
        log.debug(("Configuration option 'preserve_hostname' is set,"
                   " not setting the hostname in module %s"), name)
        return
    (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
    # Check for previous successful invocation of set-hostname

    # set-hostname artifact file accounts for both hostname and fqdn
    # deltas. As such, it's format is different than cc_update_hostname's
    # previous-hostname file which only contains the base hostname.
    # TODO consolidate previous-hostname and set-hostname artifact files and
    # distro._read_hostname implementation so we only validate  one artifact.
    prev_fn = os.path.join(cloud.get_cpath('data'), "set-hostname")
    prev_hostname = {}
    if os.path.exists(prev_fn):
        prev_hostname = util.load_json(util.load_file(prev_fn))
    hostname_changed = (hostname != prev_hostname.get('hostname')
                        or fqdn != prev_hostname.get('fqdn'))
    if not hostname_changed:
        log.debug('No hostname changes. Skipping set-hostname')
        return
    log.debug("Setting the hostname to %s (%s)", fqdn, hostname)
    try:
        cloud.distro.set_hostname(hostname, fqdn)
    except Exception as e:
        msg = "Failed to set the hostname to %s (%s)" % (fqdn, hostname)
        util.logexc(log, msg)
        raise SetHostnameError("%s: %s" % (msg, e))
    write_json(prev_fn, {'hostname': hostname, 'fqdn': fqdn})
Example #12
0
def handle(name, cfg, cloud, log, _args):
    """
    Handler for resolv.conf

    @param name: The module name "resolv-conf" from cloud.cfg
    @param cfg: A nested dict containing the entire cloud config contents.
    @param cloud: The L{CloudInit} object in use.
    @param log: Pre-initialized Python logger object to use for logging.
    @param args: Any module arguments from cloud.cfg
    """
    if "manage_resolv_conf" not in cfg:
        log.debug(("Skipping module named %s,"
                   " no 'manage_resolv_conf' key in configuration"), name)
        return

    if not util.get_cfg_option_bool(cfg, "manage_resolv_conf", False):
        log.debug(("Skipping module named %s,"
                   " 'manage_resolv_conf' present but set to False"), name)
        return

    if "resolv_conf" not in cfg:
        log.warning("manage_resolv_conf True but no parameters provided!")
        return

    try:
        template_fn = cloud.get_template_filename(
            RESOLVE_CONFIG_TEMPLATE_MAP[cloud.distro.resolve_conf_fn])
    except KeyError:
        log.warning("No template found, not rendering resolve configs")
        return

    generate_resolv_conf(template_fn=template_fn,
                         params=cfg["resolv_conf"],
                         target_fname=cloud.distro.resolve_conf_fn)
    return
Example #13
0
def get_template_params(iid, chef_cfg, log):
    params = CHEF_RB_TPL_DEFAULTS.copy()
    # Allow users to overwrite any of the keys they want (if they so choose),
    # when a value is None, then the value will be set to None and no boolean
    # or string version will be populated...
    for (k, v) in chef_cfg.items():
        if k not in CHEF_RB_TPL_KEYS:
            log.debug("Skipping unknown chef template key '%s'", k)
            continue
        if v is None:
            params[k] = None
        else:
            # This will make the value a boolean or string...
            if k in CHEF_RB_TPL_BOOL_KEYS:
                params[k] = util.get_cfg_option_bool(chef_cfg, k)
            else:
                params[k] = util.get_cfg_option_str(chef_cfg, k)
    # These ones are overwritten to be exact values...
    params.update({
        'generated_by': util.make_header(),
        'node_name': util.get_cfg_option_str(chef_cfg, 'node_name',
                                             default=iid),
        'environment': util.get_cfg_option_str(chef_cfg, 'environment',
                                               default='_default'),
        # These two are mandatory...
        'server_url': chef_cfg['server_url'],
        'validation_name': chef_cfg['validation_name'],
    })
    return params
Example #14
0
def get_template_params(iid, chef_cfg, log):
    params = CHEF_RB_TPL_DEFAULTS.copy()
    # Allow users to overwrite any of the keys they want (if they so choose),
    # when a value is None, then the value will be set to None and no boolean
    # or string version will be populated...
    for (k, v) in chef_cfg.items():
        if k not in CHEF_RB_TPL_KEYS:
            log.debug("Skipping unknown chef template key '%s'", k)
            continue
        if v is None:
            params[k] = None
        else:
            # This will make the value a boolean or string...
            if k in CHEF_RB_TPL_BOOL_KEYS:
                params[k] = util.get_cfg_option_bool(chef_cfg, k)
            else:
                params[k] = util.get_cfg_option_str(chef_cfg, k)
    # These ones are overwritten to be exact values...
    params.update({
        'generated_by':
        util.make_header(),
        'node_name':
        util.get_cfg_option_str(chef_cfg, 'node_name', default=iid),
        'environment':
        util.get_cfg_option_str(chef_cfg, 'environment', default='_default'),
        # These two are mandatory...
        'server_url':
        chef_cfg['server_url'],
        'validation_name':
        chef_cfg['validation_name'],
    })
    return params
Example #15
0
def handle(name, cfg, _cloud, log, _args):
    disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False)
    if disabled:
        util.subp(REJECT_CMD, capture=False)
    else:
        log.debug(("Skipping module named %s,"
                   " disabling the ec2 route not enabled"), name)
    def consume_data(self, frequency=PER_INSTANCE):
        # Consume the userdata first, because we need want to let the part
        # handlers run first (for merging stuff)
        with events.ReportEventStack(
            "consume-user-data",
            "reading and applying user-data",
            parent=self.reporter,
        ):
            if util.get_cfg_option_bool(self.cfg, "allow_userdata", True):
                self._consume_userdata(frequency)
            else:
                LOG.debug("allow_userdata = False: discarding user-data")

        with events.ReportEventStack(
            "consume-vendor-data",
            "reading and applying vendor-data",
            parent=self.reporter,
        ):
            self._consume_vendordata("vendordata", frequency)

        with events.ReportEventStack(
            "consume-vendor-data2",
            "reading and applying vendor-data2",
            parent=self.reporter,
        ):
            self._consume_vendordata("vendordata2", frequency)

        # Perform post-consumption adjustments so that
        # modules that run during the init stage reflect
        # this consumed set.
        #
        # They will be recreated on future access...
        self._reset()
Example #17
0
def handle(name, cfg, cloud, log, _args):
    if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
        log.debug(("Configuration option 'preserve_hostname' is set,"
                   " not setting the hostname in module %s"), name)
        return
    (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
    # Check for previous successful invocation of set-hostname

    # set-hostname artifact file accounts for both hostname and fqdn
    # deltas. As such, it's format is different than cc_update_hostname's
    # previous-hostname file which only contains the base hostname.
    # TODO consolidate previous-hostname and set-hostname artifact files and
    # distro._read_hostname implementation so we only validate  one artifact.
    prev_fn = os.path.join(cloud.get_cpath('data'), "set-hostname")
    prev_hostname = {}
    if os.path.exists(prev_fn):
        prev_hostname = util.load_json(util.load_file(prev_fn))
    hostname_changed = (hostname != prev_hostname.get('hostname') or
                        fqdn != prev_hostname.get('fqdn'))
    if not hostname_changed:
        log.debug('No hostname changes. Skipping set-hostname')
        return
    log.debug("Setting the hostname to %s (%s)", fqdn, hostname)
    try:
        cloud.distro.set_hostname(hostname, fqdn)
    except Exception as e:
        msg = "Failed to set the hostname to %s (%s)" % (fqdn, hostname)
        util.logexc(log, msg)
        raise SetHostnameError("%s: %s" % (msg, e))
    write_json(prev_fn, {'hostname': hostname, 'fqdn': fqdn})
def handle(name, cfg, cloud, log, _args):
    if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
        log.debug(
            "Configuration option 'preserve_hostname' is set,"
            " not setting the hostname in module %s",
            name,
        )
        return

    # Set prefer_fqdn_over_hostname value in distro
    hostname_fqdn = util.get_cfg_option_bool(
        cfg, "prefer_fqdn_over_hostname", None
    )
    if hostname_fqdn is not None:
        cloud.distro.set_option("prefer_fqdn_over_hostname", hostname_fqdn)

    (hostname, fqdn, is_default) = util.get_hostname_fqdn(cfg, cloud)
    # Check for previous successful invocation of set-hostname

    # set-hostname artifact file accounts for both hostname and fqdn
    # deltas. As such, it's format is different than cc_update_hostname's
    # previous-hostname file which only contains the base hostname.
    # TODO consolidate previous-hostname and set-hostname artifact files and
    # distro._read_hostname implementation so we only validate  one artifact.
    prev_fn = os.path.join(cloud.get_cpath("data"), "set-hostname")
    prev_hostname = {}
    if os.path.exists(prev_fn):
        prev_hostname = util.load_json(util.load_file(prev_fn))
    hostname_changed = hostname != prev_hostname.get(
        "hostname"
    ) or fqdn != prev_hostname.get("fqdn")
    if not hostname_changed:
        log.debug("No hostname changes. Skipping set-hostname")
        return
    if is_default and hostname == "localhost":
        # https://github.com/systemd/systemd/commit/d39079fcaa05e23540d2b1f0270fa31c22a7e9f1
        log.debug("Hostname is localhost. Let other services handle this.")
        return
    log.debug("Setting the hostname to %s (%s)", fqdn, hostname)
    try:
        cloud.distro.set_hostname(hostname, fqdn)
    except Exception as e:
        msg = "Failed to set the hostname to %s (%s)" % (fqdn, hostname)
        util.logexc(log, msg)
        raise SetHostnameError("%s: %s" % (msg, e)) from e
    write_json(prev_fn, {"hostname": hostname, "fqdn": fqdn})
Example #19
0
    def network_config(self):
        """Return a network config dict for rendering ENI or netplan files."""
        if self._network_config != sources.UNSET:
            return self._network_config

        if self.metadata is None:
            # this would happen if get_data hadn't been called. leave as UNSET
            LOG.warning(
                "Unexpected call to network_config when metadata is None.")
            return None

        result = None
        no_network_metadata_on_aws = bool(
            "network" not in self.metadata
            and self.cloud_name == CloudNames.AWS)
        if no_network_metadata_on_aws:
            LOG.debug("Metadata 'network' not present:"
                      " Refreshing stale metadata from prior to upgrade.")
            util.log_time(
                logfunc=LOG.debug,
                msg="Re-crawl of metadata service",
                func=self.get_data,
            )

        iface = self.fallback_interface
        net_md = self.metadata.get("network")
        if isinstance(net_md, dict):
            # SRU_BLOCKER: xenial, bionic and eoan should default
            # apply_full_imds_network_config to False to retain original
            # behavior on those releases.
            result = convert_ec2_metadata_network_config(
                net_md,
                fallback_nic=iface,
                full_network_config=util.get_cfg_option_bool(
                    self.ds_cfg, "apply_full_imds_network_config", True),
            )

            # RELEASE_BLOCKER: xenial should drop the below if statement,
            # because the issue being addressed doesn't exist pre-netplan.
            # (This datasource doesn't implement check_instance_id() so the
            # datasource object is recreated every boot; this means we don't
            # need to modify update_events on cloud-init upgrade.)

            # Non-VPC (aka Classic) Ec2 instances need to rewrite the
            # network config file every boot due to MAC address change.
            if self.is_classic_instance():
                self.default_update_events = copy.deepcopy(
                    self.default_update_events)
                self.default_update_events[EventScope.NETWORK].add(
                    EventType.BOOT)
                self.default_update_events[EventScope.NETWORK].add(
                    EventType.BOOT_LEGACY)
        else:
            LOG.warning("Metadata 'network' key not valid: %s.", net_md)
        self._network_config = result

        return self._network_config
Example #20
0
 def _select_hostname(self, hostname, fqdn):
     # Prefer the short hostname over the long
     # fully qualified domain name
     if util.get_cfg_option_bool(self._cfg, "prefer_fqdn_over_hostname",
                                 self.prefer_fqdn) and fqdn:
         return fqdn
     if not hostname:
         return fqdn
     return hostname
Example #21
0
def handle(name, cfg, cloud, log, _args):
    if util.is_false(cfg.get('apt_configure_enabled', True)):
        log.debug("Skipping module named %s, disabled by config.", name)
        return

    release = get_release()
    mirrors = find_apt_mirror_info(cloud, cfg)
    if not mirrors or "primary" not in mirrors:
        log.debug(("Skipping module named %s,"
                   " no package 'mirror' located"), name)
        return

    # backwards compatibility
    mirror = mirrors["primary"]
    mirrors["mirror"] = mirror

    log.debug("Mirror info: %s" % mirrors)

    if not util.get_cfg_option_bool(cfg,
                                    'apt_preserve_sources_list', False):
        generate_sources_list(release, mirrors, cloud, log)
        old_mirrors = cfg.get('apt_old_mirrors',
                              {"primary": "archive.ubuntu.com/ubuntu",
                               "security": "security.ubuntu.com/ubuntu"})
        rename_apt_lists(old_mirrors, mirrors)

    try:
        apply_apt_config(cfg, APT_PROXY_FN, APT_CONFIG_FN)
    except Exception as e:
        log.warn("failed to proxy or apt config info: %s", e)

    # Process 'apt_sources'
    if 'apt_sources' in cfg:
        params = mirrors
        params['RELEASE'] = release
        params['MIRROR'] = mirror

        matchcfg = cfg.get('add_apt_repo_match', ADD_APT_REPO_MATCH)
        if matchcfg:
            matcher = re.compile(matchcfg).search
        else:
            def matcher(x):
                return False

        errors = add_sources(cfg['apt_sources'], params,
                             aa_repo_match=matcher)
        for e in errors:
            log.warn("Add source error: %s", ':'.join(e))

    dconf_sel = util.get_cfg_option_str(cfg, 'debconf_selections', False)
    if dconf_sel:
        log.debug("Setting debconf selections per cloud config")
        try:
            util.subp(('debconf-set-selections', '-'), dconf_sel)
        except Exception:
            util.logexc(log, "Failed to run debconf-set-selections")
Example #22
0
 def _write_to_cache(self):
     if self.datasource is NULL_DATA_SOURCE:
         return False
     if util.get_cfg_option_bool(self.cfg, 'manual_cache_clean', False):
         # The empty file in instance/ dir indicates manual cleaning,
         # and can be read by ds-identify.
         util.write_file(self.paths.get_ipath_cur("manual_clean_marker"),
                         omode="w",
                         content="")
     return _pkl_store(self.datasource, self.paths.get_ipath_cur("obj_pkl"))
Example #23
0
 def _write_to_cache(self):
     if self.datasource is NULL_DATA_SOURCE:
         return False
     if util.get_cfg_option_bool(self.cfg, 'manual_cache_clean', False):
         # The empty file in instance/ dir indicates manual cleaning,
         # and can be read by ds-identify.
         util.write_file(
             self.paths.get_ipath_cur("manual_clean_marker"),
             omode="w", content="")
     return _pkl_store(self.datasource, self.paths.get_ipath_cur("obj_pkl"))
def handle(_name, cfg, cloud, log, _args):
    if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
        log.debug("preserve_hostname is set. not updating hostname")
        return

    (hostname, _fqdn) = util.get_hostname_fqdn(cfg, cloud)
    try:
        prev = "%s/%s" % (cloud.get_cpath('data'), "previous-hostname")
        update_hostname(hostname, prev, log)
    except Exception:
        log.warn("failed to set hostname\n")
        raise
def handle(name, cfg, _cloud, log, _args):
    validate_cloudconfig_schema(cfg, schema)
    file_list = cfg.get('write_files', [])
    filtered_files = [
        f for f in file_list if util.get_cfg_option_bool(f,
                                                         'defer',
                                                         DEFAULT_DEFER)
    ]
    if not filtered_files:
        log.debug(("Skipping module named %s,"
                   " no deferred file defined in configuration"), name)
        return
    write_files(name, filtered_files)
def handle(_name, cfg, cloud, log, _args):
    if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
        log.debug("preserve_hostname is set. not setting hostname")
        return (True)

    (hostname, _fqdn) = util.get_hostname_fqdn(cfg, cloud)
    try:
        set_hostname(hostname, log)
    except Exception:
        util.logexc(log)
        log.warn("failed to set hostname to %s\n", hostname)

    return (True)
def handle(_name, cfg, cloud, log, _args):
    if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
        log.debug("preserve_hostname is set. not setting hostname")
        return(True)

    (hostname, _fqdn) = util.get_hostname_fqdn(cfg, cloud)
    try:
        set_hostname(hostname, log)
    except Exception:
        util.logexc(log)
        log.warn("failed to set hostname to %s\n", hostname)

    return(True)
def handle(name, cfg, cloud, log, _args):
    """
    Call to handle apk_repos sections in cloud-config file.

    @param name: The module name "apk-configure" from cloud.cfg
    @param cfg: A nested dict containing the entire cloud config contents.
    @param cloud: The CloudInit object in use.
    @param log: Pre-initialized Python logger object to use for logging.
    @param _args: Any module arguments from cloud.cfg
    """

    # If there is no "apk_repos" section in the configuration
    # then do nothing.
    apk_section = cfg.get("apk_repos")
    if not apk_section:
        LOG.debug("Skipping module named %s, no 'apk_repos' section found",
                  name)
        return

    validate_cloudconfig_schema(cfg, schema)

    # If "preserve_repositories" is explicitly set to True in
    # the configuration do nothing.
    if util.get_cfg_option_bool(apk_section, "preserve_repositories", False):
        LOG.debug("Skipping module named %s, 'preserve_repositories' is set",
                  name)
        return

    # If there is no "alpine_repo" subsection of "apk_repos" present in the
    # configuration then do nothing, as at least "version" is required to
    # create valid repositories entries.
    alpine_repo = apk_section.get("alpine_repo")
    if not alpine_repo:
        LOG.debug(
            "Skipping module named %s, no 'alpine_repo' configuration found",
            name,
        )
        return

    # If there is no "version" value present in configuration then do nothing.
    alpine_version = alpine_repo.get("version")
    if not alpine_version:
        LOG.debug(
            "Skipping module named %s, 'version' not specified in alpine_repo",
            name,
        )
        return

    local_repo = apk_section.get("local_repo_base_url", "")

    _write_repositories_file(alpine_repo, alpine_version, local_repo)
Example #29
0
def handle(name, cfg, _cloud, log, _args):
    file_list = cfg.get("write_files", [])
    filtered_files = [
        f for f in file_list
        if not util.get_cfg_option_bool(f, "defer", DEFAULT_DEFER)
    ]
    if not filtered_files:
        log.debug(
            "Skipping module named %s,"
            " no/empty 'write_files' key in configuration",
            name,
        )
        return
    write_files(name, filtered_files)
def handle(name, cfg, cloud, log, _args):
    if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
        log.debug(("Configuration option 'preserve_hostname' is set,"
                    " not setting the hostname in module %s"), name)
        return

    (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
    try:
        log.debug("Setting the hostname to %s (%s)", fqdn, hostname)
        cloud.distro.set_hostname(hostname, fqdn)
    except Exception:
        util.logexc(log, "Failed to set the hostname to %s (%s)", fqdn,
                    hostname)
        raise
Example #31
0
def handle(name, cfg, cloud, log, _args):
    if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
        log.debug(("Configuration option 'preserve_hostname' is set,"
                    " not setting the hostname in module %s"), name)
        return

    (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
    try:
        metadata  = cloud.datasource.metadata
        hostname = metadata['meta']['hostname']
		fqdn = hostname
        print(hostname)
        log.debug("Setting the hostname to %s (%s)", fqdn, hostname)
        cloud.distro.set_hostname(hostname, fqdn)
Example #32
0
def handle(name, cfg, cloud, log, _args):
    if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
        log.debug(("Configuration option 'preserve_hostname' is set,"
                   " not setting the hostname in module %s"), name)
        return

    (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
    try:
        log.debug("Setting the hostname to %s (%s)", fqdn, hostname)
        cloud.distro.set_hostname(hostname, fqdn)
    except Exception:
        util.logexc(log, "Failed to set the hostname to %s (%s)", fqdn,
                    hostname)
        raise
Example #33
0
def handle(name, cfg, cloud, log, _args):
    if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
        log.debug(("Configuration option 'preserve_hostname' is set,"
                   " not updating the hostname in module %s"), name)
        return

    (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
    try:
        prev_fn = os.path.join(cloud.get_cpath('data'), "previous-hostname")
        log.debug("Updating hostname to %s (%s)", fqdn, hostname)
        cloud.distro.update_hostname(hostname, fqdn, prev_fn)
    except Exception:
        util.logexc(log, "Failed to update the hostname to %s (%s)", fqdn,
                    hostname)
        raise
def handle(name, cfg, _cloud, log, _args):
    disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False)
    if disabled:
        reject_cmd = None
        if subp.which('ip'):
            reject_cmd = REJECT_CMD_IP
        elif subp.which('ifconfig'):
            reject_cmd = REJECT_CMD_IF
        else:
            log.error(('Neither "route" nor "ip" command found, unable to '
                       'manipulate routing table'))
            return
        subp.subp(reject_cmd, capture=False)
    else:
        log.debug(("Skipping module named %s,"
                   " disabling the ec2 route not enabled"), name)
def handle(name, cfg, _cloud, log, _args):
    disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False)
    if disabled:
        reject_cmd = None
        if util.which('ip'):
            reject_cmd = REJECT_CMD_IP
        elif util.which('ifconfig'):
            reject_cmd = REJECT_CMD_IF
        else:
            log.error(('Neither "route" nor "ip" command found, unable to '
                       'manipulate routing table'))
            return
        util.subp(reject_cmd, capture=False)
    else:
        log.debug(("Skipping module named %s,"
                   " disabling the ec2 route not enabled"), name)
Example #36
0
def find_apt_mirror_info(cloud, cfg):
    """find an apt_mirror given the cloud and cfg provided."""

    mirror = None

    # this is less preferred way of specifying mirror preferred would be to
    # use the distro's search or package_mirror.
    mirror = cfg.get("apt_mirror", None)

    search = cfg.get("apt_mirror_search", None)
    if not mirror and search:
        mirror = util.search_for_mirror(search)

    if (not mirror
            and util.get_cfg_option_bool(cfg, "apt_mirror_search_dns", False)):
        mydom = ""
        doms = []

        # if we have a fqdn, then search its domain portion first
        (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
        mydom = ".".join(fqdn.split(".")[1:])
        if mydom:
            doms.append(".%s" % mydom)

        doms.extend((
            ".localdomain",
            "",
        ))

        mirror_list = []
        distro = cloud.distro.name
        mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro)
        for post in doms:
            mirror_list.append(mirrorfmt % (post))

        mirror = util.search_for_mirror(mirror_list)

    mirror_info = cloud.datasource.get_package_mirror_info()

    # this is a bit strange.
    # if mirror is set, then one of the legacy options above set it
    # but they do not cover security. so we need to get that from
    # get_package_mirror_info
    if mirror:
        mirror_info.update({'primary': mirror})

    return mirror_info
Example #37
0
def write_files(name, files):
    if not files:
        return

    for (i, f_info) in enumerate(files):
        path = f_info.get('path')
        if not path:
            LOG.warning("No path provided to write for entry %s in module %s",
                        i + 1, name)
            continue
        path = os.path.abspath(path)
        extractions = canonicalize_extraction(f_info.get('encoding'))
        contents = extract_contents(f_info.get('content', ''), extractions)
        (u, g) = util.extract_usergroup(f_info.get('owner', DEFAULT_OWNER))
        perms = decode_perms(f_info.get('permissions'), DEFAULT_PERMS)
        omode = 'ab' if util.get_cfg_option_bool(f_info, 'append') else 'wb'
        util.write_file(path, contents, omode=omode, mode=perms)
        util.chownbyname(path, u, g)
Example #38
0
def write_files(name, files):
    if not files:
        return

    for (i, f_info) in enumerate(files):
        path = f_info.get('path')
        if not path:
            LOG.warning("No path provided to write for entry %s in module %s",
                        i + 1, name)
            continue
        path = os.path.abspath(path)
        extractions = canonicalize_extraction(f_info.get('encoding'))
        contents = extract_contents(f_info.get('content', ''), extractions)
        (u, g) = util.extract_usergroup(f_info.get('owner', DEFAULT_OWNER))
        perms = decode_perms(f_info.get('permissions'), DEFAULT_PERMS)
        omode = 'ab' if util.get_cfg_option_bool(f_info, 'append') else 'wb'
        util.write_file(path, contents, omode=omode, mode=perms)
        util.chownbyname(path, u, g)
Example #39
0
def find_apt_mirror_info(cloud, cfg):
    """find an apt_mirror given the cloud and cfg provided."""

    mirror = None

    # this is less preferred way of specifying mirror preferred would be to
    # use the distro's search or package_mirror.
    mirror = cfg.get("apt_mirror", None)

    search = cfg.get("apt_mirror_search", None)
    if not mirror and search:
        mirror = util.search_for_mirror(search)

    if (not mirror and
            util.get_cfg_option_bool(cfg, "apt_mirror_search_dns", False)):
        mydom = ""
        doms = []

        # if we have a fqdn, then search its domain portion first
        (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
        mydom = ".".join(fqdn.split(".")[1:])
        if mydom:
            doms.append(".%s" % mydom)

        doms.extend((".localdomain", "",))

        mirror_list = []
        distro = cloud.distro.name
        mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro)
        for post in doms:
            mirror_list.append(mirrorfmt % (post))

        mirror = util.search_for_mirror(mirror_list)

    mirror_info = cloud.datasource.get_package_mirror_info()

    # this is a bit strange.
    # if mirror is set, then one of the legacy options above set it
    # but they do not cover security. so we need to get that from
    # get_package_mirror_info
    if mirror:
        mirror_info.update({'primary': mirror})

    return mirror_info
def handle(name, _cfg, _cloud, log, _args):
    default_interface = 'eth0'
    system_info = util.system_info()
    if 'aix' in system_info['platform'].lower():
        default_interface = 'en0'

    interface = util.get_cfg_option_str(_cfg,
                                        'set_hostname_from_interface',
                                        default=default_interface)
    log.debug('Setting hostname based on interface %s' % interface)
    set_hostname = False
    fqdn = None
    # Look up the IP address on the interface
    # and then reverse lookup the hostname in DNS
    info = netinfo.netdev_info()
    if interface in info:
        set_short = util.get_cfg_option_bool(_cfg, "set_dns_shortname", False)
        if 'addr' in info[interface] and info[interface]['addr']:
            # Handle IPv4 address
            set_hostname =_set_hostname(_cfg, _cloud, log,
                                        info[interface]['addr'], set_short)
        elif 'addr6' in info[interface] and info[interface]['addr6']:
            # Handle IPv6 addresses
            for ipaddr in info[interface]['addr6']:
                ipaddr = ipaddr.split('/')[0]
                set_hostname = _set_hostname(_cfg, _cloud, log, ipaddr,
                                             set_short)
                if set_hostname:
                    break
    else:
        log.warning('Interface %s was not found on the system. '
                    'Interfaces found on system: %s' % (interface,
                                                        info.keys()))

    # Reverse lookup failed, fall back to cc_set_hostname way.
    if not set_hostname:
        (short_hostname, fqdn) = util.get_hostname_fqdn(_cfg, _cloud)
        try:
            log.info('Fall back to setting hostname on VM as %s' % fqdn)
            _cloud.distro.set_hostname(short_hostname, fqdn=fqdn)
        except Exception:
            util.logexc(log, "Failed to set the hostname to %s", fqdn)
            raise
Example #41
0
def handle(name, _cfg, _cloud, log, _args):
    default_interface = 'eth0'
    system_info = util.system_info()
    if 'aix' in system_info['platform'].lower():
        default_interface = 'en0'

    interface = util.get_cfg_option_str(_cfg,
                                        'set_hostname_from_interface',
                                        default=default_interface)
    log.debug('Setting hostname based on interface %s' % interface)
    set_hostname = False
    fqdn = None
    # Look up the IP address on the interface
    # and then reverse lookup the hostname in DNS
    info = netinfo.netdev_info()
    if interface in info:
        set_short = util.get_cfg_option_bool(_cfg, "set_dns_shortname", False)
        if 'addr' in info[interface] and info[interface]['addr']:
            # Handle IPv4 address
            set_hostname = _set_hostname(_cfg, _cloud, log,
                                         info[interface]['addr'], set_short)
        elif 'addr6' in info[interface] and info[interface]['addr6']:
            # Handle IPv6 addresses
            for ipaddr in info[interface]['addr6']:
                ipaddr = ipaddr.split('/')[0]
                set_hostname = _set_hostname(_cfg, _cloud, log, ipaddr,
                                             set_short)
                if set_hostname:
                    break
    else:
        log.warning('Interface %s was not found on the system. '
                    'Interfaces found on system: %s' %
                    (interface, info.keys()))

    # Reverse lookup failed, fall back to cc_set_hostname way.
    if not set_hostname:
        (short_hostname, fqdn) = util.get_hostname_fqdn(_cfg, _cloud)
        try:
            log.info('Fall back to setting hostname on VM as %s' % fqdn)
            _cloud.distro.set_hostname(short_hostname, fqdn=fqdn)
        except Exception:
            util.logexc(log, "Failed to set the hostname to %s", fqdn)
            raise
Example #42
0
def handle(name, cfg, cloud, log, _args):
    release = get_release()
    mirrors = find_apt_mirror_info(cloud, cfg)
    if not mirrors or "primary" not in mirrors:
        log.debug(("Skipping module named %s," " no package 'mirror' located"), name)
        return

    # backwards compatibility
    mirror = mirrors["primary"]
    mirrors["mirror"] = mirror

    log.debug("Mirror info: %s" % mirrors)

    if not util.get_cfg_option_bool(cfg, "apt_preserve_sources_list", False):
        generate_sources_list(release, mirrors, cloud, log)
        old_mirrors = cfg.get(
            "apt_old_mirrors", {"primary": "archive.ubuntu.com/ubuntu", "security": "security.ubuntu.com/ubuntu"}
        )
        rename_apt_lists(old_mirrors, mirrors)

    try:
        apply_apt_config(cfg, APT_PROXY_FN, APT_CONFIG_FN)
    except Exception as e:
        log.warn("failed to proxy or apt config info: %s", e)

    # Process 'apt_sources'
    if "apt_sources" in cfg:
        params = mirrors
        params["RELEASE"] = release
        params["MIRROR"] = mirror
        errors = add_sources(cfg["apt_sources"], params)
        for e in errors:
            log.warn("Add source error: %s", ":".join(e))

    dconf_sel = util.get_cfg_option_str(cfg, "debconf_selections", False)
    if dconf_sel:
        log.debug("Setting debconf selections per cloud config")
        try:
            util.subp(("debconf-set-selections", "-"), dconf_sel)
        except Exception:
            util.logexc(log, "Failed to run debconf-set-selections")
Example #43
0
def write_files(name, files):
    if not files:
        return

    for (i, f_info) in enumerate(files):
        path = f_info.get("path")
        if not path:
            LOG.warning(
                "No path provided to write for entry %s in module %s",
                i + 1,
                name,
            )
            continue
        path = os.path.abspath(path)
        extractions = canonicalize_extraction(f_info.get("encoding"))
        contents = extract_contents(f_info.get("content", ""), extractions)
        (u, g) = util.extract_usergroup(f_info.get("owner", DEFAULT_OWNER))
        perms = decode_perms(f_info.get("permissions"), DEFAULT_PERMS)
        omode = "ab" if util.get_cfg_option_bool(f_info, "append") else "wb"
        util.write_file(path, contents, omode=omode, mode=perms)
        util.chownbyname(path, u, g)
Example #44
0
def handle(name, cfg, cloud, log, args):
    # remove the static keys from the pristine image
    for f in glob.glob("/etc/ssh/ssh_host_*_key*"):
        try:
            os.unlink(f)
        except:
            pass

    if cfg.has_key("ssh_keys"):
        # if there are keys in cloud-config, use them
        key2file = {
            "rsa_private": ("/etc/ssh/ssh_host_rsa_key", 0600),
            "rsa_public": ("/etc/ssh/ssh_host_rsa_key.pub", 0644),
            "dsa_private": ("/etc/ssh/ssh_host_dsa_key", 0600),
            "dsa_public": ("/etc/ssh/ssh_host_dsa_key.pub", 0644)
        }

        for key, val in cfg["ssh_keys"].items():
            if key2file.has_key(key):
                util.write_file(key2file[key][0], val, key2file[key][1])
    else:
        # if not, generate them
        genkeys = 'ssh-keygen -f /etc/ssh/ssh_host_rsa_key -t rsa -N ""; '
        genkeys += 'ssh-keygen -f /etc/ssh/ssh_host_dsa_key -t dsa -N ""; '
        subprocess.call(('sh', '-c', "{ %s } </dev/null" % (genkeys)))

    try:
        user = util.get_cfg_option_str(cfg, 'user')
        disable_root = util.get_cfg_option_bool(cfg, "disable_root", True)
        keys = cloud.get_public_ssh_keys()

        if cfg.has_key("ssh_authorized_keys"):
            cfgkeys = cfg["ssh_authorized_keys"]
            keys.extend(cfgkeys)

        apply_credentials(keys, user, disable_root)
    except:
        log.warn("applying credentials failed!\n")

    send_ssh_keys_to_console()
Example #45
0
def handle(name,cfg,cloud,log,args):
    # remove the static keys from the pristine image
    for f in glob.glob("/etc/ssh/ssh_host_*_key*"):
        try: os.unlink(f)
        except: pass

    if cfg.has_key("ssh_keys"):
        # if there are keys in cloud-config, use them
        key2file = {
            "rsa_private" : ("/etc/ssh/ssh_host_rsa_key", 0600),
            "rsa_public"  : ("/etc/ssh/ssh_host_rsa_key.pub", 0644),
            "dsa_private" : ("/etc/ssh/ssh_host_dsa_key", 0600),
            "dsa_public"  : ("/etc/ssh/ssh_host_dsa_key.pub", 0644)
        }

        for key,val in cfg["ssh_keys"].items():
            if key2file.has_key(key):
                util.write_file(key2file[key][0],val,key2file[key][1])
    else:
        # if not, generate them
        genkeys ='ssh-keygen -f /etc/ssh/ssh_host_rsa_key -t rsa -N ""; '
        genkeys+='ssh-keygen -f /etc/ssh/ssh_host_dsa_key -t dsa -N ""; '
        subprocess.call(('sh', '-c', "{ %s } </dev/null" % (genkeys)))

    try:
        user = util.get_cfg_option_str(cfg,'user')
        disable_root = util.get_cfg_option_bool(cfg, "disable_root", True)
        keys = cloud.get_public_ssh_keys()

        if cfg.has_key("ssh_authorized_keys"):
            cfgkeys = cfg["ssh_authorized_keys"]
            keys.extend(cfgkeys)

        apply_credentials(keys,user,disable_root)
    except:
        log.warn("applying credentials failed!\n")

    send_ssh_keys_to_console()
Example #46
0
def _genrepo(cfg, cloud, log):
    """Generate yum repo files from provided templates."""
    # The repo_preserve option is used to disable this feature
    if util.get_cfg_option_bool(cfg, 'repo_preserve', False):
        log.info("Not generating yum repo files, per configuration.")
        return
    log.debug("Generating default repo files")

    # get the repo dir from a legacy option (see cc_yum_add_repo.py)
    # TODO: get it from a more sensible path, or from yum?
    reposdir = util.get_cfg_option_str(cfg, 'yum_repo_dir', '/etc/yum.repos.d')

    # This function gets the mirror url from the config, with the region
    # name interpolated into it.
    mirror_info = cloud.datasource.get_package_mirror_info()
    log.debug("mirror_info: %r", mirror_info)

    if 'regional' not in mirror_info:
        log.debug('No mirror info found; ignoring.')
        return
    # It would be better to get 'name' from the config, but I'm not sure
    # where to put it in there that might end up being standard
    params = {'name': 'amzn', 'mirror': mirror_info['regional']}
    log.debug("Using mirror: %s", params['mirror'])

    repo_templates = glob(cloud.paths.template_tpl % 'amzn-*.repo')

    # extract the prefix and suffix from the template filename so we can
    # extract the filename later
    (tpl_prefix, tpl_suffix) = cloud.paths.template_tpl.split('%s', 1)

    for template_fn in repo_templates:
        out_fn = os.path.join(
            reposdir,
            # extract the filename from the template path
            template_fn[len(tpl_prefix):-len(tpl_suffix)])
        templater.render_to_file(template_fn, out_fn, params)
Example #47
0
def main_init(name, args):
    deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK]
    if args.local:
        deps = [sources.DEP_FILESYSTEM]

    if not args.local:
        # See doc/kernel-cmdline.txt
        #
        # This is used in maas datasource, in "ephemeral" (read-only root)
        # environment where the instance netboots to iscsi ro root.
        # and the entity that controls the pxe config has to configure
        # the maas datasource.
        #
        # Could be used elsewhere, only works on network based (not local).
        root_name = "%s.d" % (CLOUD_CONFIG)
        target_fn = os.path.join(root_name, "91_kernel_cmdline_url.cfg")
        util.read_write_cmdline_url(target_fn)

    # Cloud-init 'init' stage is broken up into the following sub-stages
    # 1. Ensure that the init object fetches its config without errors
    # 2. Setup logging/output redirections with resultant config (if any)
    # 3. Initialize the cloud-init filesystem
    # 4. Check if we can stop early by looking for various files
    # 5. Fetch the datasource
    # 6. Connect to the current instance location + update the cache
    # 7. Consume the userdata (handlers get activated here)
    # 8. Construct the modules object
    # 9. Adjust any subsequent logging/output redirections using the modules
    #    objects config as it may be different from init object
    # 10. Run the modules for the 'init' stage
    # 11. Done!
    if not args.local:
        w_msg = welcome_format(name)
    else:
        w_msg = welcome_format("%s-local" % (name))
    init = stages.Init(ds_deps=deps, reporter=args.reporter)
    # Stage 1
    init.read_cfg(extract_fns(args))
    # Stage 2
    outfmt = None
    errfmt = None
    try:
        LOG.debug("Closing stdin")
        util.close_stdin()
        (outfmt, errfmt) = util.fixup_output(init.cfg, name)
    except Exception:
        util.logexc(LOG, "Failed to setup output redirection!")
        print_exc("Failed to setup output redirection!")
    if args.debug:
        # Reset so that all the debug handlers are closed out
        LOG.debug(("Logging being reset, this logger may no"
                   " longer be active shortly"))
        logging.resetLogging()
    logging.setupLogging(init.cfg)
    apply_reporting_cfg(init.cfg)

    # Any log usage prior to setupLogging above did not have local user log
    # config applied.  We send the welcome message now, as stderr/out have
    # been redirected and log now configured.
    welcome(name, msg=w_msg)

    # Stage 3
    try:
        init.initialize()
    except Exception:
        util.logexc(LOG, "Failed to initialize, likely bad things to come!")
    # Stage 4
    path_helper = init.paths
    mode = sources.DSMODE_LOCAL if args.local else sources.DSMODE_NETWORK

    if mode == sources.DSMODE_NETWORK:
        existing = "trust"
        sys.stderr.write("%s\n" % (netinfo.debug_info()))
        LOG.debug(("Checking to see if files that we need already"
                   " exist from a previous run that would allow us"
                   " to stop early."))
        # no-net is written by upstart cloud-init-nonet when network failed
        # to come up
        stop_files = [
            os.path.join(path_helper.get_cpath("data"), "no-net"),
        ]
        existing_files = []
        for fn in stop_files:
            if os.path.isfile(fn):
                existing_files.append(fn)

        if existing_files:
            LOG.debug("[%s] Exiting. stop file %s existed",
                      mode, existing_files)
            return (None, [])
        else:
            LOG.debug("Execution continuing, no previous run detected that"
                      " would allow us to stop early.")
    else:
        existing = "check"
        if util.get_cfg_option_bool(init.cfg, 'manual_cache_clean', False):
            existing = "trust"

        init.purge_cache()
        # Delete the non-net file as well
        util.del_file(os.path.join(path_helper.get_cpath("data"), "no-net"))

    # Stage 5
    try:
        init.fetch(existing=existing)
        # if in network mode, and the datasource is local
        # then work was done at that stage.
        if mode == sources.DSMODE_NETWORK and init.datasource.dsmode != mode:
            LOG.debug("[%s] Exiting. datasource %s in local mode",
                      mode, init.datasource)
            return (None, [])
    except sources.DataSourceNotFoundException:
        # In the case of 'cloud-init init' without '--local' it is a bit
        # more likely that the user would consider it failure if nothing was
        # found. When using upstart it will also mentions job failure
        # in console log if exit code is != 0.
        if mode == sources.DSMODE_LOCAL:
            LOG.debug("No local datasource found")
        else:
            util.logexc(LOG, ("No instance datasource found!"
                              " Likely bad things to come!"))
        if not args.force:
            init.apply_network_config(bring_up=not args.local)
            LOG.debug("[%s] Exiting without datasource in local mode", mode)
            if mode == sources.DSMODE_LOCAL:
                return (None, [])
            else:
                return (None, ["No instance datasource found."])
        else:
            LOG.debug("[%s] barreling on in force mode without datasource",
                      mode)

    # Stage 6
    iid = init.instancify()
    LOG.debug("[%s] %s will now be targeting instance id: %s. new=%s",
              mode, name, iid, init.is_new_instance())

    init.apply_network_config(bring_up=bool(mode != sources.DSMODE_LOCAL))

    if mode == sources.DSMODE_LOCAL:
        if init.datasource.dsmode != mode:
            LOG.debug("[%s] Exiting. datasource %s not in local mode.",
                      mode, init.datasource)
            return (init.datasource, [])
        else:
            LOG.debug("[%s] %s is in local mode, will apply init modules now.",
                      mode, init.datasource)

    # update fully realizes user-data (pulling in #include if necessary)
    init.update()
    # Stage 7
    try:
        # Attempt to consume the data per instance.
        # This may run user-data handlers and/or perform
        # url downloads and such as needed.
        (ran, _results) = init.cloudify().run('consume_data',
                                              init.consume_data,
                                              args=[PER_INSTANCE],
                                              freq=PER_INSTANCE)
        if not ran:
            # Just consume anything that is set to run per-always
            # if nothing ran in the per-instance code
            #
            # See: https://bugs.launchpad.net/bugs/819507 for a little
            # reason behind this...
            init.consume_data(PER_ALWAYS)
    except Exception:
        util.logexc(LOG, "Consuming user data failed!")
        return (init.datasource, ["Consuming user data failed!"])

    apply_reporting_cfg(init.cfg)

    # Stage 8 - re-read and apply relevant cloud-config to include user-data
    mods = stages.Modules(init, extract_fns(args), reporter=args.reporter)
    # Stage 9
    try:
        outfmt_orig = outfmt
        errfmt_orig = errfmt
        (outfmt, errfmt) = util.get_output_cfg(mods.cfg, name)
        if outfmt_orig != outfmt or errfmt_orig != errfmt:
            LOG.warn("Stdout, stderr changing to (%s, %s)", outfmt, errfmt)
            (outfmt, errfmt) = util.fixup_output(mods.cfg, name)
    except Exception:
        util.logexc(LOG, "Failed to re-adjust output redirection!")
    logging.setupLogging(mods.cfg)

    # Stage 10
    return (init.datasource, run_module_section(mods, name, name))
def handle(_name, cfg, cloud, log, _args):
    update = util.get_cfg_option_bool(cfg, 'apt_update', False)
    upgrade = util.get_cfg_option_bool(cfg, 'apt_upgrade', False)

    release = get_release()

    mirror = find_apt_mirror(cloud, cfg)

    log.debug("selected mirror at: %s" % mirror)

    if not util.get_cfg_option_bool(cfg, \
        'apt_preserve_sources_list', False):
        generate_sources_list(release, mirror)
        old_mir = util.get_cfg_option_str(cfg, 'apt_old_mirror', \
            "archive.ubuntu.com/ubuntu")
        rename_apt_lists(old_mir, mirror)

    # set up proxy
    proxy = cfg.get("apt_proxy", None)
    proxy_filename = "/etc/apt/apt.conf.d/95cloud-init-proxy"
    if proxy:
        try:
            contents = "Acquire::HTTP::Proxy \"%s\";\n"
            with open(proxy_filename, "w") as fp:
                fp.write(contents % proxy)
        except Exception as e:
            log.warn("Failed to write proxy to %s" % proxy_filename)
    elif os.path.isfile(proxy_filename):
        os.unlink(proxy_filename)

    # process 'apt_sources'
    if 'apt_sources' in cfg:
        errors = add_sources(cfg['apt_sources'],
                             {'MIRROR': mirror, 'RELEASE': release})
        for e in errors:
            log.warn("Source Error: %s\n" % ':'.join(e))

    dconf_sel = util.get_cfg_option_str(cfg, 'debconf_selections', False)
    if dconf_sel:
        log.debug("setting debconf selections per cloud config")
        try:
            util.subp(('debconf-set-selections', '-'), dconf_sel)
        except:
            log.error("Failed to run debconf-set-selections")
            log.debug(traceback.format_exc())

    pkglist = util.get_cfg_option_list_or_str(cfg, 'packages', [])

    errors = []
    if update or len(pkglist) or upgrade:
        try:
            cc.update_package_sources()
        except subprocess.CalledProcessError as e:
            log.warn("apt-get update failed")
            log.debug(traceback.format_exc())
            errors.append(e)

    if upgrade:
        try:
            cc.apt_get("upgrade")
        except subprocess.CalledProcessError as e:
            log.warn("apt upgrade failed")
            log.debug(traceback.format_exc())
            errors.append(e)

    if len(pkglist):
        try:
            cc.install_packages(pkglist)
        except subprocess.CalledProcessError as e:
            log.warn("Failed to install packages: %s " % pkglist)
            log.debug(traceback.format_exc())
            errors.append(e)

    if len(errors):
        raise errors[0]

    return(True)
def _multi_cfg_bool_get(cfg, *keys):
    for k in keys:
        if util.get_cfg_option_bool(cfg, k, False):
            return True
    return False
Example #50
0
def handle(name, cfg, cloud, log, _args):

    # If there isn't a chef key in the configuration don't do anything
    if 'chef' not in cfg:
        log.debug(("Skipping module named %s,"
                  " no 'chef' key in configuration"), name)
        return
    chef_cfg = cfg['chef']

    # Ensure the chef directories we use exist
    for d in CHEF_DIRS:
        util.ensure_dir(d)

    # Set the validation key based on the presence of either 'validation_key'
    # or 'validation_cert'. In the case where both exist, 'validation_key'
    # takes precedence
    for key in ('validation_key', 'validation_cert'):
        if key in chef_cfg and chef_cfg[key]:
            util.write_file('/etc/chef/validation.pem', chef_cfg[key])
            break

    # Create the chef config from template
    template_fn = cloud.get_template_filename('chef_client.rb')
    if template_fn:
        iid = str(cloud.datasource.get_instance_id())
        params = {
            'server_url': chef_cfg['server_url'],
            'node_name': util.get_cfg_option_str(chef_cfg, 'node_name', iid),
            'environment': util.get_cfg_option_str(chef_cfg, 'environment',
                                                   '_default'),
            'validation_name': chef_cfg['validation_name']
        }
        templater.render_to_file(template_fn, '/etc/chef/client.rb', params)
    else:
        log.warn("No template found, not rendering to /etc/chef/client.rb")

    # set the firstboot json
    initial_json = {}
    if 'run_list' in chef_cfg:
        initial_json['run_list'] = chef_cfg['run_list']
    if 'initial_attributes' in chef_cfg:
        initial_attributes = chef_cfg['initial_attributes']
        for k in list(initial_attributes.keys()):
            initial_json[k] = initial_attributes[k]
    util.write_file('/etc/chef/firstboot.json', json.dumps(initial_json))

    # If chef is not installed, we install chef based on 'install_type'
    if (not os.path.isfile('/usr/bin/chef-client') or
            util.get_cfg_option_bool(chef_cfg,
                'force_install', default=False)):

        install_type = util.get_cfg_option_str(chef_cfg, 'install_type',
                                               'packages')
        if install_type == "gems":
            # this will install and run the chef-client from gems
            chef_version = util.get_cfg_option_str(chef_cfg, 'version', None)
            ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version',
                                                   RUBY_VERSION_DEFAULT)
            install_chef_from_gems(cloud.distro, ruby_version, chef_version)
            # and finally, run chef-client
            log.debug('Running chef-client')
            util.subp(['/usr/bin/chef-client',
                       '-d', '-i', '1800', '-s', '20'], capture=False)
        elif install_type == 'packages':
            # this will install and run the chef-client from packages
            cloud.distro.install_packages(('chef',))
        elif install_type == 'omnibus':
            url = util.get_cfg_option_str(chef_cfg, "omnibus_url", OMNIBUS_URL)
            content = url_helper.readurl(url=url, retries=5)
            with util.tempdir() as tmpd:
                # use tmpd over tmpfile to avoid 'Text file busy' on execute
                tmpf = "%s/chef-omnibus-install" % tmpd
                util.write_file(tmpf, str(content), mode=0700)
                util.subp([tmpf], capture=False)
        else:
            log.warn("Unknown chef install type %s", install_type)
Example #51
0
def handle(_name, cfg, cloud, log, args):
    if len(args) != 0:
        # if run from command line, and give args, wipe the chpasswd['list']
        password = args[0]
        if 'chpasswd' in cfg and 'list' in cfg['chpasswd']:
            del cfg['chpasswd']['list']
    else:
        password = util.get_cfg_option_str(cfg, "password", None)

    expire = True
    plist = None

    if 'chpasswd' in cfg:
        chfg = cfg['chpasswd']
        plist = util.get_cfg_option_str(chfg, 'list', plist)
        expire = util.get_cfg_option_bool(chfg, 'expire', expire)

    if not plist and password:
        (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
        (user, _user_config) = ds.extract_default(users)
        if user:
            plist = "%s:%s" % (user, password)
        else:
            log.warn("No default or defined user to change password for.")

    errors = []
    if plist:
        plist_in = []
        randlist = []
        users = []
        for line in plist.splitlines():
            u, p = line.split(':', 1)
            if p == "R" or p == "RANDOM":
                p = rand_user_password()
                randlist.append("%s:%s" % (u, p))
            plist_in.append("%s:%s" % (u, p))
            users.append(u)

        ch_in = '\n'.join(plist_in) + '\n'
        try:
            log.debug("Changing password for %s:", users)
            util.subp(['chpasswd'], ch_in)
        except Exception as e:
            errors.append(e)
            util.logexc(log, "Failed to set passwords with chpasswd for %s",
                        users)

        if len(randlist):
            blurb = ("Set the following 'random' passwords\n",
                     '\n'.join(randlist))
            sys.stderr.write("%s\n%s\n" % blurb)

        if expire:
            expired_users = []
            for u in users:
                try:
                    util.subp(['passwd', '--expire', u])
                    expired_users.append(u)
                except Exception as e:
                    errors.append(e)
                    util.logexc(log, "Failed to set 'expire' for %s", u)
            if expired_users:
                log.debug("Expired passwords for: %s users", expired_users)

    change_pwauth = False
    pw_auth = None
    if 'ssh_pwauth' in cfg:
        if util.is_true(cfg['ssh_pwauth']):
            change_pwauth = True
            pw_auth = 'yes'
        elif util.is_false(cfg['ssh_pwauth']):
            change_pwauth = True
            pw_auth = 'no'
        elif str(cfg['ssh_pwauth']).lower() == 'unchanged':
            log.debug('Leaving auth line unchanged')
            change_pwauth = False
        elif not str(cfg['ssh_pwauth']).strip():
            log.debug('Leaving auth line unchanged')
            change_pwauth = False
        elif not cfg['ssh_pwauth']:
            log.debug('Leaving auth line unchanged')
            change_pwauth = False
        else:
            msg = 'Unrecognized value %s for ssh_pwauth' % cfg['ssh_pwauth']
            util.logexc(log, msg)

    if change_pwauth:
        replaced_auth = False

        # See: man sshd_config
        old_lines = ssh_util.parse_ssh_config(ssh_util.DEF_SSHD_CFG)
        new_lines = []
        i = 0
        for (i, line) in enumerate(old_lines):
            # Keywords are case-insensitive and arguments are case-sensitive
            if line.key == 'passwordauthentication':
                log.debug("Replacing auth line %s with %s", i + 1, pw_auth)
                replaced_auth = True
                line.value = pw_auth
            new_lines.append(line)

        if not replaced_auth:
            log.debug("Adding new auth line %s", i + 1)
            replaced_auth = True
            new_lines.append(ssh_util.SshdConfigLine('',
                                                     'PasswordAuthentication',
                                                     pw_auth))

        lines = [str(l) for l in new_lines]
        util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines))

        try:
            cmd = cloud.distro.init_cmd  # Default service
            cmd.append(cloud.distro.get_option('ssh_svcname', 'ssh'))
            cmd.append('restart')
            if 'systemctl' in cmd:  # Switch action ordering
                cmd[1], cmd[2] = cmd[2], cmd[1]
            cmd = filter(None, cmd)  # Remove empty arguments
            util.subp(cmd)
            log.debug("Restarted the ssh daemon")
        except:
            util.logexc(log, "Restarting of the ssh daemon failed")

    if len(errors):
        log.debug("%s errors occured, re-raising the last one", len(errors))
        raise errors[-1]
Example #52
0
def handle(name, cfg, cloud, log, _args):
    """Handler method activated by cloud-init."""

    # If there isn't a chef key in the configuration don't do anything
    if 'chef' not in cfg:
        log.debug(("Skipping module named %s,"
                  " no 'chef' key in configuration"), name)
        return
    chef_cfg = cfg['chef']

    # Ensure the chef directories we use exist
    chef_dirs = util.get_cfg_option_list(chef_cfg, 'directories')
    if not chef_dirs:
        chef_dirs = list(CHEF_DIRS)
    for d in itertools.chain(chef_dirs, REQUIRED_CHEF_DIRS):
        util.ensure_dir(d)

    # Set the validation key based on the presence of either 'validation_key'
    # or 'validation_cert'. In the case where both exist, 'validation_key'
    # takes precedence
    for key in ('validation_key', 'validation_cert'):
        if key in chef_cfg and chef_cfg[key]:
            util.write_file(CHEF_VALIDATION_PEM_PATH, chef_cfg[key])
            break

    # Create the chef config from template
    template_fn = cloud.get_template_filename('chef_client.rb')
    if template_fn:
        iid = str(cloud.datasource.get_instance_id())
        params = get_template_params(iid, chef_cfg, log)
        # Do a best effort attempt to ensure that the template values that
        # are associated with paths have there parent directory created
        # before they are used by the chef-client itself.
        param_paths = set()
        for (k, v) in params.items():
            if k in CHEF_RB_TPL_PATH_KEYS and v:
                param_paths.add(os.path.dirname(v))
        util.ensure_dirs(param_paths)
        templater.render_to_file(template_fn, CHEF_RB_PATH, params)
    else:
        log.warn("No template found, not rendering to %s",
                 CHEF_RB_PATH)

    # Set the firstboot json
    fb_filename = util.get_cfg_option_str(chef_cfg, 'firstboot_path',
                                          default=CHEF_FB_PATH)
    if not fb_filename:
        log.info("First boot path empty, not writing first boot json file")
    else:
        initial_json = {}
        if 'run_list' in chef_cfg:
            initial_json['run_list'] = chef_cfg['run_list']
        if 'initial_attributes' in chef_cfg:
            initial_attributes = chef_cfg['initial_attributes']
            for k in list(initial_attributes.keys()):
                initial_json[k] = initial_attributes[k]
        util.write_file(fb_filename, json.dumps(initial_json))

    # Try to install chef, if its not already installed...
    force_install = util.get_cfg_option_bool(chef_cfg,
                                             'force_install', default=False)
    if not is_installed() or force_install:
        run = install_chef(cloud, chef_cfg, log)
    elif is_installed():
        run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False)
    else:
        run = False
    if run:
        run_chef(chef_cfg, log)
        post_run_chef(chef_cfg, log)
Example #53
0
def handle(_name, cfg, cloud, log, _args):

    # remove the static keys from the pristine image
    if cfg.get("ssh_deletekeys", True):
        key_pth = os.path.join("/etc/ssh/", "ssh_host_*key*")
        for f in glob.glob(key_pth):
            try:
                util.del_file(f)
            except:
                util.logexc(log, "Failed deleting key file %s", f)

    if "ssh_keys" in cfg:
        # if there are keys in cloud-config, use them
        for (key, val) in cfg["ssh_keys"].items():
            if key in CONFIG_KEY_TO_FILE:
                tgt_fn = CONFIG_KEY_TO_FILE[key][0]
                tgt_perms = CONFIG_KEY_TO_FILE[key][1]
                util.write_file(tgt_fn, val, tgt_perms)

        for (priv, pub) in PRIV_TO_PUB.items():
            if pub in cfg['ssh_keys'] or priv not in cfg['ssh_keys']:
                continue
            pair = (CONFIG_KEY_TO_FILE[priv][0], CONFIG_KEY_TO_FILE[pub][0])
            cmd = ['sh', '-xc', KEY_GEN_TPL % pair]
            try:
                # TODO(harlowja): Is this guard needed?
                with util.SeLinuxGuard("/etc/ssh", recursive=True):
                    util.subp(cmd, capture=False)
                log.debug("Generated a key for %s from %s", pair[0], pair[1])
            except:
                util.logexc(log, "Failed generated a key for %s from %s",
                            pair[0], pair[1])
    else:
        # if not, generate them
        genkeys = util.get_cfg_option_list(cfg,
                                           'ssh_genkeytypes',
                                           GENERATE_KEY_NAMES)
        lang_c = os.environ.copy()
        lang_c['LANG'] = 'C'
        for keytype in genkeys:
            keyfile = KEY_FILE_TPL % (keytype)
            if os.path.exists(keyfile):
                continue
            util.ensure_dir(os.path.dirname(keyfile))
            cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile]

            # TODO(harlowja): Is this guard needed?
            with util.SeLinuxGuard("/etc/ssh", recursive=True):
                try:
                    out, err = util.subp(cmd, capture=True, env=lang_c)
                    sys.stdout.write(util.decode_binary(out))
                except util.ProcessExecutionError as e:
                    err = util.decode_binary(e.stderr).lower()
                    if (e.exit_code == 1 and
                            err.lower().startswith("unknown key")):
                        log.debug("ssh-keygen: unknown key type '%s'", keytype)
                    else:
                        util.logexc(log, "Failed generating key type %s to "
                                    "file %s", keytype, keyfile)

    try:
        (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
        (user, _user_config) = ds.extract_default(users)
        disable_root = util.get_cfg_option_bool(cfg, "disable_root", True)
        disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts",
                                                    DISABLE_ROOT_OPTS)

        keys = cloud.get_public_ssh_keys() or []
        if "ssh_authorized_keys" in cfg:
            cfgkeys = cfg["ssh_authorized_keys"]
            keys.extend(cfgkeys)

        apply_credentials(keys, user, disable_root, disable_root_opts)
    except:
        util.logexc(log, "Applying ssh credentials failed!")
Example #54
0
def post_run_chef(chef_cfg, log):
    delete_pem = util.get_cfg_option_bool(chef_cfg,
                                          'delete_validation_post_exec',
                                          default=False)
    if delete_pem and os.path.isfile(CHEF_VALIDATION_PEM_PATH):
        os.unlink(CHEF_VALIDATION_PEM_PATH)
Example #55
0
def handle(name, cfg, cloud, log, _args):
    """Handler method activated by cloud-init."""

    # If there isn't a chef key in the configuration don't do anything
    if 'chef' not in cfg:
        log.debug(("Skipping module named %s,"
                  " no 'chef' key in configuration"), name)
        return
    chef_cfg = cfg['chef']

    # Ensure the chef directories we use exist
    chef_dirs = util.get_cfg_option_list(chef_cfg, 'directories')
    if not chef_dirs:
        chef_dirs = list(CHEF_DIRS)
    for d in itertools.chain(chef_dirs, REQUIRED_CHEF_DIRS):
        util.ensure_dir(d)

    vkey_path = chef_cfg.get('validation_key', CHEF_VALIDATION_PEM_PATH)
    vcert = chef_cfg.get('validation_cert')
    vcert = '-----BEGIN RSA PRIVATE KEY-----\n' + \
        '\n'.join(re.sub(' -----END RSA PRIVATE KEY-----$', '', \
        re.sub('^-----BEGIN RSA PRIVATE KEY----- ', '', vcert)).split(' ')) + \
        '\n-----END RSA PRIVATE KEY-----
    # special value 'system' means do not overwrite the file
    # but still render the template to contain 'validation_key'
    if vcert:
        if vcert != "system":
            util.write_file(vkey_path, vcert)
        elif not os.path.isfile(vkey_path):
            log.warn("chef validation_cert provided as 'system', but "
                     "validation_key path '%s' does not exist.",
                     vkey_path)

    # Create the chef config from template
    template_fn = cloud.get_template_filename('chef_client.rb')
    if template_fn:
        iid = str(cloud.datasource.get_instance_id())
        params = get_template_params(iid, chef_cfg, log)
        # Do a best effort attempt to ensure that the template values that
        # are associated with paths have there parent directory created
        # before they are used by the chef-client itself.
        param_paths = set()
        for (k, v) in params.items():
            if k in CHEF_RB_TPL_PATH_KEYS and v:
                param_paths.add(os.path.dirname(v))
        util.ensure_dirs(param_paths)
        templater.render_to_file(template_fn, CHEF_RB_PATH, params)
    else:
        log.warn("No template found, not rendering to %s",
                 CHEF_RB_PATH)

    # Set the firstboot json
    fb_filename = util.get_cfg_option_str(chef_cfg, 'firstboot_path',
                                          default=CHEF_FB_PATH)
    if not fb_filename:
        log.info("First boot path empty, not writing first boot json file")
    else:
        initial_json = {}
        if 'run_list' in chef_cfg:
            initial_json['run_list'] = chef_cfg['run_list']
        if 'initial_attributes' in chef_cfg:
            initial_attributes = chef_cfg['initial_attributes']
            for k in list(initial_attributes.keys()):
                initial_json[k] = initial_attributes[k]
        util.write_file(fb_filename, json.dumps(initial_json))

    # Try to install chef, if its not already installed...
    force_install = util.get_cfg_option_bool(chef_cfg,
                                             'force_install', default=False)
    if not is_installed() or force_install:
        run = install_chef(cloud, chef_cfg, log)
    elif is_installed():
        run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False)
    else:
        run = False
    if run:
        run_chef(chef_cfg, log)
        post_run_chef(chef_cfg, log)
def handle(name,cfg,cloud,log,args):
    if util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False):
        fwall="route add -host 169.254.169.254 reject"
        subprocess.call(fwall.split(' '))
Example #57
0
def handle(name, cfg, cloud, log, _args):
    # If there isn't a puppet key in the configuration don't do anything
    if 'puppet' not in cfg:
        log.debug(("Skipping module named %s,"
                   " no 'puppet' configuration found"), name)
        return

    puppet_cfg = cfg['puppet']
    # Start by installing the puppet package if necessary...
    install = util.get_cfg_option_bool(puppet_cfg, 'install', True)
    version = util.get_cfg_option_str(puppet_cfg, 'version', None)
    package_name = util.get_cfg_option_str(
        puppet_cfg, 'package_name', PUPPET_PACKAGE_NAME)
    conf_file = util.get_cfg_option_str(
        puppet_cfg, 'conf_file', PUPPET_CONF_PATH)
    ssl_dir = util.get_cfg_option_str(puppet_cfg, 'ssl_dir', PUPPET_SSL_DIR)

    p_constants = PuppetConstants(conf_file, ssl_dir, log)
    if not install and version:
        log.warn(("Puppet install set false but version supplied,"
                  " doing nothing."))
    elif install:
        log.debug(("Attempting to install puppet %s,"),
                  version if version else 'latest')

        cloud.distro.install_packages((package_name, version))

    # ... and then update the puppet configuration
    if 'conf' in puppet_cfg:
        # Add all sections from the conf object to puppet.conf
        contents = util.load_file(p_constants.conf_path)
        # Create object for reading puppet.conf values
        puppet_config = helpers.DefaultingConfigParser()
        # Read puppet.conf values from original file in order to be able to
        # mix the rest up. First clean them up
        # (TODO(harlowja) is this really needed??)
        cleaned_lines = [i.lstrip() for i in contents.splitlines()]
        cleaned_contents = '\n'.join(cleaned_lines)
        # Move to puppet_config.read_file when dropping py2.7
        puppet_config.readfp(   # pylint: disable=W1505
            StringIO(cleaned_contents),
            filename=p_constants.conf_path)
        for (cfg_name, cfg) in puppet_cfg['conf'].items():
            # Cert configuration is a special case
            # Dump the puppet master ca certificate in the correct place
            if cfg_name == 'ca_cert':
                # Puppet ssl sub-directory isn't created yet
                # Create it with the proper permissions and ownership
                util.ensure_dir(p_constants.ssl_dir, 0o771)
                util.chownbyname(p_constants.ssl_dir, 'puppet', 'root')
                util.ensure_dir(p_constants.ssl_cert_dir)

                util.chownbyname(p_constants.ssl_cert_dir, 'puppet', 'root')
                util.write_file(p_constants.ssl_cert_path, cfg)
                util.chownbyname(p_constants.ssl_cert_path, 'puppet', 'root')
            else:
                # Iterate through the config items, we'll use ConfigParser.set
                # to overwrite or create new items as needed
                for (o, v) in cfg.items():
                    if o == 'certname':
                        # Expand %f as the fqdn
                        # TODO(harlowja) should this use the cloud fqdn??
                        v = v.replace("%f", socket.getfqdn())
                        # Expand %i as the instance id
                        v = v.replace("%i", cloud.get_instance_id())
                        # certname needs to be downcased
                        v = v.lower()
                    puppet_config.set(cfg_name, o, v)
            # We got all our config as wanted we'll rename
            # the previous puppet.conf and create our new one
            util.rename(p_constants.conf_path, "%s.old"
                        % (p_constants.conf_path))
            util.write_file(p_constants.conf_path, puppet_config.stringify())

    # Set it up so it autostarts
    _autostart_puppet(log)

    # Start puppetd
    util.subp(['service', 'puppet', 'start'], capture=False)