def _init_base64_keys(self, reset=False):
        if reset:
            self.base64_keys = None
            self.base64_all = None

        keys = None
        if self.base64_all is None:
            keys = self.list()
            if 'base64_all' in keys:
                self.base64_all = util.is_true(self._get("base64_all"))
            else:
                self.base64_all = False

        if self.base64_all:
            # short circuit if base64_all is true
            return

        if self.base64_keys is None:
            if keys is None:
                keys = self.list()
            b64_keys = set()
            if 'base64_keys' in keys:
                b64_keys = set(self._get("base64_keys").split(","))

            # now add any b64-<keyname> that has a true value
            for key in [k[3:] for k in keys if k.startswith("b64-")]:
                if util.is_true(self._get(key)):
                    b64_keys.add(key)
                else:
                    if key in b64_keys:
                        b64_keys.remove(key)

            self.base64_keys = b64_keys
    def _init_base64_keys(self, reset=False):
        if reset:
            self.base64_keys = None
            self.base64_all = None

        keys = None
        if self.base64_all is None:
            keys = self.list()
            if "base64_all" in keys:
                self.base64_all = util.is_true(self._get("base64_all"))
            else:
                self.base64_all = False

        if self.base64_all:
            # short circuit if base64_all is true
            return

        if self.base64_keys is None:
            if keys is None:
                keys = self.list()
            b64_keys = set()
            if "base64_keys" in keys:
                b64_keys = set(self._get("base64_keys").split(","))

            # now add any b64-<keyname> that has a true value
            for key in [k[3:] for k in keys if k.startswith("b64-")]:
                if util.is_true(self._get(key)):
                    b64_keys.add(key)
                else:
                    if key in b64_keys:
                        b64_keys.remove(key)

            self.base64_keys = b64_keys
Exemple #3
0
    def handle_physical(self, command):
        """
        command = {
            'type': 'physical',
            'mac_address': 'c0:d6:9f:2c:e8:80',
            'name': 'eth0',
            'subnets': [
                {'type': 'dhcp4'}
             ],
            'accept-ra': 'true'
        }
        """

        interfaces = self._network_state.get("interfaces", {})
        iface = interfaces.get(command["name"], {})
        for param, val in command.get("params", {}).items():
            iface.update({param: val})

        # convert subnet ipv6 netmask to cidr as needed
        subnets = _normalize_subnets(command.get("subnets"))

        # automatically set 'use_ipv6' if any addresses are ipv6
        if not self.use_ipv6:
            for subnet in subnets:
                if subnet.get("type").endswith("6") or is_ipv6_address(
                    subnet.get("address")
                ):
                    self.use_ipv6 = True
                    break

        accept_ra = command.get("accept-ra", None)
        if accept_ra is not None:
            accept_ra = util.is_true(accept_ra)
        wakeonlan = command.get("wakeonlan", None)
        if wakeonlan is not None:
            wakeonlan = util.is_true(wakeonlan)
        iface.update(
            {
                "name": command.get("name"),
                "type": command.get("type"),
                "mac_address": command.get("mac_address"),
                "inet": "inet",
                "mode": "manual",
                "mtu": command.get("mtu"),
                "address": None,
                "gateway": None,
                "subnets": subnets,
                "accept-ra": accept_ra,
                "wakeonlan": wakeonlan,
            }
        )
        self._network_state["interfaces"].update({command.get("name"): iface})
        self.dump_network_state()
Exemple #4
0
    def handle_physical(self, command):
        '''
        command = {
            'type': 'physical',
            'mac_address': 'c0:d6:9f:2c:e8:80',
            'name': 'eth0',
            'subnets': [
                {'type': 'dhcp4'}
             ],
            'accept-ra': 'true'
        }
        '''

        interfaces = self._network_state.get('interfaces', {})
        iface = interfaces.get(command['name'], {})
        for param, val in command.get('params', {}).items():
            iface.update({param: val})

        # convert subnet ipv6 netmask to cidr as needed
        subnets = _normalize_subnets(command.get('subnets'))

        # automatically set 'use_ipv6' if any addresses are ipv6
        if not self.use_ipv6:
            for subnet in subnets:
                if (subnet.get('type').endswith('6')
                        or is_ipv6_addr(subnet.get('address'))):
                    self.use_ipv6 = True
                    break

        accept_ra = command.get('accept-ra', None)
        if accept_ra is not None:
            accept_ra = util.is_true(accept_ra)
        wakeonlan = command.get('wakeonlan', None)
        if wakeonlan is not None:
            wakeonlan = util.is_true(wakeonlan)
        iface.update({
            'name': command.get('name'),
            'type': command.get('type'),
            'mac_address': command.get('mac_address'),
            'inet': 'inet',
            'mode': 'manual',
            'mtu': command.get('mtu'),
            'address': None,
            'gateway': None,
            'subnets': subnets,
            'accept-ra': accept_ra,
            'wakeonlan': wakeonlan,
        })
        self._network_state['interfaces'].update({command.get('name'): iface})
        self.dump_network_state()
Exemple #5
0
    def _consume_vendordata(self, vendor_source, frequency=PER_INSTANCE):
        """
        Consume the vendordata and run the part handlers on it
        """

        # User-data should have been consumed first.
        # So we merge the other available cloud-configs (everything except
        # vendor provided), and check whether or not we should consume
        # vendor data at all. That gives user or system a chance to override.
        if vendor_source == 'vendordata':
            if not self.datasource.get_vendordata_raw():
                LOG.debug("no vendordata from datasource")
                return
            cfg_name = 'vendor_data'
        elif vendor_source == 'vendordata2':
            if not self.datasource.get_vendordata2_raw():
                LOG.debug("no vendordata2 from datasource")
                return
            cfg_name = 'vendor_data2'
        else:
            raise RuntimeError("vendor_source arg must be either 'vendordata'"
                               " or 'vendordata2'")

        _cc_merger = helpers.ConfigMerger(paths=self._paths,
                                          datasource=self.datasource,
                                          additional_fns=[],
                                          base_cfg=self.cfg,
                                          include_vendor=False)
        vdcfg = _cc_merger.cfg.get(cfg_name, {})

        if not isinstance(vdcfg, dict):
            vdcfg = {'enabled': False}
            LOG.warning("invalid %s setting. resetting to: %s",
                        cfg_name, vdcfg)

        enabled = vdcfg.get('enabled')
        no_handlers = vdcfg.get('disabled_handlers', None)

        if not util.is_true(enabled):
            LOG.debug("%s consumption is disabled.", vendor_source)
            return

        LOG.debug("%s will be consumed. disabled_handlers=%s",
                  vendor_source, no_handlers)

        # Ensure vendordata source fetched before activation (just in case.)

        # c_handlers_list keeps track of all the active handlers, while
        # excluding what the users doesn't want run, i.e. boot_hook,
        # cloud_config, shell_script
        if vendor_source == 'vendordata':
            vendor_data_msg = self.datasource.get_vendordata()
            c_handlers_list = self._default_vendordata_handlers()
        else:
            vendor_data_msg = self.datasource.get_vendordata2()
            c_handlers_list = self._default_vendordata2_handlers()

        # Run the handlers
        self._do_handlers(vendor_data_msg, c_handlers_list, frequency,
                          excluded=no_handlers)
Exemple #6
0
    def _verify_keys(self):
        '''
        Checks that the keys in the rh_subscription dict from the user-data
        are what we expect.
        '''

        for k in self.rhel_cfg:
            if k not in self.valid_rh_keys:
                bad_key = "{0} is not a valid key for rh_subscription. "\
                          "Valid keys are: "\
                          "{1}".format(k, ', '.join(self.valid_rh_keys))
                return False, bad_key

        # Check for bad auto-attach value
        if (self.auto_attach is not None) and \
                not (util.is_true(self.auto_attach) or
                     util.is_false(self.auto_attach)):
            not_bool = "The key auto-attach must be a boolean value "\
                       "(True/False "
            return False, not_bool

        if (self.servicelevel is not None) and ((not self.auto_attach) or
           (util.is_false(str(self.auto_attach)))):
            no_auto = ("The service-level key must be used in conjunction "
                       "with the auto-attach key.  Please re-run with "
                       "auto-attach: True")
            return False, no_auto
        return True, None
Exemple #7
0
def handle(name, cfg, cloud, log, _args):

    apk_section = cfg.get('apk_repos')
    if not apk_section:
        log.debug(("Skipping module named %s,"
                   " no 'apk_repos' section found"), name)
        return

    if util.is_true(apk_section.get('preserve_repositories'), False):
        log.debug(("Skipping module named %s,"
                   " 'preserve_repositories' is set"), name)
        return

    alpine_repo = apk_section.get('alpine_repo')
    if not alpine_repo:
        log.debug(("Skipping module named %s,"
                   " no 'alpine_repo' configuration found"), name)
        return

    alpine_version = alpine_repo.get('version')
    if not alpine_version:
        log.debug(("Skipping module named %s,"
                   " 'version' not specified in alpine_repo"), name)
        return

    local_repo = apk_section.get('local_repo', {})
    local_baseurl = local_repo.get('base_url')

    _write_repositories_file(alpine_repo, local_baseurl, log)
def handle_ssh_pwauth(pw_auth, distro):
    """Apply sshd PasswordAuthentication changes.

    @param pw_auth: config setting from 'pw_auth'.
                    Best given as True, False, or "unchanged".
    @param distro: an instance of the distro class for the target distribution

    @return: None"""
    cfg_name = "PasswordAuthentication"

    if util.is_true(pw_auth):
        cfg_val = "yes"
    elif util.is_false(pw_auth):
        cfg_val = "no"
    else:
        bmsg = "Leaving SSH config '%s' unchanged." % cfg_name
        if pw_auth is None or pw_auth.lower() == "unchanged":
            LOG.debug("%s ssh_pwauth=%s", bmsg, pw_auth)
        else:
            LOG.warning("%s Unrecognized value: ssh_pwauth=%s", bmsg, pw_auth)
        return

    updated = update_ssh_config({cfg_name: cfg_val})
    if not updated:
        LOG.debug("No need to restart SSH service, %s not updated.", cfg_name)
        return

    distro.manage_service("restart", distro.get_option("ssh_svcname", "ssh"))
    LOG.debug("Restarted the SSH daemon.")
Exemple #9
0
def _get_wrapper_prefix(cmd, mode):
    if isinstance(cmd, str):
        cmd = [str(cmd)]

    if util.is_true(mode) or (str(mode).lower() == "auto" and cmd[0] and util.which(cmd[0])):
        return cmd
    else:
        return []
Exemple #10
0
def handle(name, cfg, cloud, log, args):
    """Handler method activated by cloud-init."""

    if not isinstance(cloud.distro, ubuntu.Distro):
        log.debug("%s: distro is '%s', not ubuntu. returning",
                  name, cloud.distro.__class__)
        return

    cfg = util.mergemanydict([cfg, DEFAULT_CONFIG])
    target = cfg['init_switch']['target']
    reboot = cfg['init_switch']['reboot']

    if len(args) != 0:
        target = args[0]
        if len(args) > 1:
            reboot = util.is_true(args[1])

    if not target:
        log.debug("%s: target=%s. nothing to do", name, target)
        return

    if not util.which('dpkg'):
        log.warn("%s: 'dpkg' not available. Assuming not ubuntu", name)
        return

    supported = ('upstart', 'systemd')
    if target not in supported:
        log.warn("%s: target set to %s, expected one of: %s",
                 name, target, str(supported))

    if os.path.exists("/run/systemd/system"):
        current = "systemd"
    else:
        current = "upstart"

    if current == target:
        log.debug("%s: current = target = %s. nothing to do", name, target)
        return

    try:
        util.subp(['sh', '-s', target], data=SWITCH_INIT)
    except util.ProcessExecutionError as e:
        log.warn("%s: Failed to switch to init '%s'. %s", name, target, e)
        return

    if util.is_false(reboot):
        log.info("%s: switched '%s' to '%s'. reboot=false, not rebooting.",
                 name, current, target)
        return

    try:
        log.warn("%s: switched '%s' to '%s'. rebooting.",
                 name, current, target)
        logging.flushLoggers(log)
        _fire_reboot(log, wait_attempts=4, initial_sleep=4)
    except Exception as e:
        util.logexc(log, "Requested reboot did not happen!")
        raise
def handle(name, cfg, cloud, log, args):
    """Handler method activated by cloud-init."""

    if not isinstance(cloud.distro, ubuntu.Distro):
        log.debug("%s: distro is '%s', not ubuntu. returning", name,
                  cloud.distro.__class__)
        return

    cfg = util.mergemanydict([cfg, DEFAULT_CONFIG])
    target = cfg['init_switch']['target']
    reboot = cfg['init_switch']['reboot']

    if len(args) != 0:
        target = args[0]
        if len(args) > 1:
            reboot = util.is_true(args[1])

    if not target:
        log.debug("%s: target=%s. nothing to do", name, target)
        return

    if not util.which('dpkg'):
        log.warn("%s: 'dpkg' not available. Assuming not ubuntu", name)
        return

    supported = ('upstart', 'systemd')
    if target not in supported:
        log.warn("%s: target set to %s, expected one of: %s", name, target,
                 str(supported))

    if os.path.exists("/run/systemd/system"):
        current = "systemd"
    else:
        current = "upstart"

    if current == target:
        log.debug("%s: current = target = %s. nothing to do", name, target)
        return

    try:
        util.subp(['sh', '-s', target], data=SWITCH_INIT)
    except util.ProcessExecutionError as e:
        log.warn("%s: Failed to switch to init '%s'. %s", name, target, e)
        return

    if util.is_false(reboot):
        log.info("%s: switched '%s' to '%s'. reboot=false, not rebooting.",
                 name, current, target)
        return

    try:
        log.warn("%s: switched '%s' to '%s'. rebooting.", name, current,
                 target)
        logging.flushLoggers(log)
        _fire_reboot(log, wait_attempts=4, initial_sleep=4)
    except Exception as e:
        util.logexc(log, "Requested reboot did not happen!")
        raise
Exemple #12
0
    def get_metadata_from_agent(self):
        temp_hostname = self.metadata.get('local-hostname')
        hostname_command = self.ds_cfg['hostname_bounce']['hostname_command']
        with temporary_hostname(temp_hostname, self.ds_cfg,
                                hostname_command=hostname_command) \
                as previous_hostname:
            if (previous_hostname is not None
                    and util.is_true(self.ds_cfg.get('set_hostname'))):
                cfg = self.ds_cfg['hostname_bounce']
                try:
                    perform_hostname_bounce(hostname=temp_hostname,
                                            cfg=cfg,
                                            prev_hostname=previous_hostname)
                except Exception as e:
                    LOG.warn("Failed publishing hostname: %s", e)
                    util.logexc(LOG, "handling set_hostname failed")

            try:
                invoke_agent(self.ds_cfg['agent_command'])
            except util.ProcessExecutionError:
                # claim the datasource even if the command failed
                util.logexc(LOG, "agent command '%s' failed.",
                            self.ds_cfg['agent_command'])

            ddir = self.ds_cfg['data_dir']
            shcfgxml = os.path.join(ddir, "SharedConfig.xml")
            wait_for = [shcfgxml]

            fp_files = []
            key_value = None
            for pk in self.cfg.get('_pubkeys', []):
                if pk.get('value', None):
                    key_value = pk['value']
                    LOG.debug("ssh authentication: using value from fabric")
                else:
                    bname = str(pk['fingerprint'] + ".crt")
                    fp_files += [os.path.join(ddir, bname)]
                    LOG.debug(
                        "ssh authentication: using fingerprint from fabirc")

            missing = util.log_time(logfunc=LOG.debug,
                                    msg="waiting for files",
                                    func=wait_for_files,
                                    args=(wait_for + fp_files, ))
        if len(missing):
            LOG.warn("Did not find files, but going on: %s", missing)

        metadata = {}
        if shcfgxml in missing:
            LOG.warn("SharedConfig.xml missing, using static instance-id")
        else:
            try:
                metadata['instance-id'] = iid_from_shared_config(shcfgxml)
            except ValueError as e:
                LOG.warn("failed to get instance id in %s: %s", shcfgxml, e)

        metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files)
        return metadata
Exemple #13
0
def _get_wrapper_prefix(cmd, mode):
    if isinstance(cmd, str):
        cmd = [str(cmd)]

    if (util.is_true(mode) or
        (str(mode).lower() == "auto" and cmd[0] and util.which(cmd[0]))):
        return cmd
    else:
        return []
Exemple #14
0
def query_data(noun, seed_device, seed_timeout, strip=False, default=None,
               b64=None):
    """Makes a request to via the serial console via "GET <NOUN>"

        In the response, the first line is the status, while subsequent lines
        are is the value. A blank line with a "." is used to indicate end of
        response.

        If the response is expected to be base64 encoded, then set b64encoded
        to true. Unfortantely, there is no way to know if something is 100%
        encoded, so this method relies on being told if the data is base64 or
        not.
    """

    if not noun:
        return False

    ser = get_serial(seed_device, seed_timeout)
    ser.write("GET %s\n" % noun.rstrip())
    status = str(ser.readline()).rstrip()
    response = []
    eom_found = False

    if 'SUCCESS' not in status:
        ser.close()
        return default

    while not eom_found:
        m = ser.readline()
        if m.rstrip() == ".":
            eom_found = True
        else:
            response.append(m)

    ser.close()

    if b64 is None:
        b64 = query_data('b64-%s' % noun, seed_device=seed_device,
                            seed_timeout=seed_timeout, b64=False,
                            default=False, strip=True)
        b64 = util.is_true(b64)

    resp = None
    if b64 or strip:
        resp = "".join(response).rstrip()
    else:
        resp = "".join(response)

    if b64:
        try:
            return base64.b64decode(resp)
        except TypeError:
            LOG.warn("Failed base64 decoding key '%s'", noun)
            return resp

    return resp
def query_data(noun, seed_device, seed_timeout, strip=False, default=None,
               b64=None):
    """Makes a request to via the serial console via "GET <NOUN>"

        In the response, the first line is the status, while subsequent lines
        are is the value. A blank line with a "." is used to indicate end of
        response.

        If the response is expected to be base64 encoded, then set b64encoded
        to true. Unfortantely, there is no way to know if something is 100%
        encoded, so this method relies on being told if the data is base64 or
        not.
    """

    if not noun:
        return False

    ser = get_serial(seed_device, seed_timeout)
    ser.write("GET %s\n" % noun.rstrip())
    status = str(ser.readline()).rstrip()
    response = []
    eom_found = False

    if 'SUCCESS' not in status:
        ser.close()
        return default

    while not eom_found:
        m = ser.readline()
        if m.rstrip() == ".":
            eom_found = True
        else:
            response.append(m)

    ser.close()

    if b64 is None:
        b64 = query_data('b64-%s' % noun, seed_device=seed_device,
                            seed_timeout=seed_timeout, b64=False,
                            default=False, strip=True)
        b64 = util.is_true(b64)

    resp = None
    if b64 or strip:
        resp = "".join(response).rstrip()
    else:
        resp = "".join(response)

    if b64:
        try:
            return base64.b64decode(resp)
        except TypeError:
            LOG.warn("Failed base64 decoding key '%s'", noun)
            return resp

    return resp
Exemple #16
0
def handle(name, cfg, cloud, log, _args):
    if util.is_true(cfg.get('no_ssh_fingerprints', False)):
        log.debug(("Skipping module named %s, "
                   "logging of SSH fingerprints disabled"), name)
        return

    hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "md5")
    (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
    for (user_name, _cfg) in users.items():
        (key_fn, key_entries) = ssh_util.extract_authorized_keys(user_name)
        _pprint_key_entries(user_name, key_fn, key_entries, hash_meth)
Exemple #17
0
def handle(name, cfg, cloud, log, args):
    cfgin = cfg.get("snap", {})
    if not cfgin:
        LOG.debug(
            "Skipping module named %s, no 'snap' key in configuration", name
        )
        return

    if util.is_true(cfgin.get("squashfuse_in_container", False)):
        maybe_install_squashfuse(cloud)
    add_assertions(cfgin.get("assertions", []))
    run_commands(cfgin.get("commands", []))
Exemple #18
0
def handle(name, cfg, cloud, log, _args):
    if util.is_true(cfg.get('no_ssh_fingerprints', False)):
        log.debug(("Skipping module named %s, "
                   "logging of ssh fingerprints disabled"), name)
        return

    hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "md5")
    (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
    for (user_name, _cfg) in users.items():
        (key_fn, key_entries) = ssh_util.extract_authorized_keys(user_name)
        _pprint_key_entries(user_name, key_fn,
                            key_entries, hash_meth)
Exemple #19
0
def handle(name, cfg, cloud, log, args):
    cfgin = cfg.get('snap', {})
    if not cfgin:
        LOG.debug(("Skipping module named %s,"
                   " no 'snap' key in configuration"), name)
        return

    validate_cloudconfig_schema(cfg, schema)
    if util.is_true(cfgin.get('squashfuse_in_container', False)):
        maybe_install_squashfuse(cloud)
    add_assertions(cfgin.get('assertions', []))
    run_commands(cfgin.get('commands', []))
Exemple #20
0
def handle(name, cfg, cloud, log, args):
    cfgin = cfg.get('snap', {})
    if not cfgin:
        LOG.debug(("Skipping module named %s,"
                   " no 'snap' key in configuration"), name)
        return

    validate_cloudconfig_schema(cfg, schema)
    if util.is_true(cfgin.get('squashfuse_in_container', False)):
        maybe_install_squashfuse(cloud)
    add_assertions(cfgin.get('assertions', []))
    run_commands(cfgin.get('commands', []))
def handle_set_hostname(enabled, hostname, cfg):
    if not util.is_true(enabled):
        return

    if not hostname:
        LOG.warn("set_hostname was true but no local-hostname")
        return

    apply_hostname_bounce(hostname=hostname, policy=cfg['policy'],
                          interface=cfg['interface'],
                          command=cfg['command'],
                          hostname_command=cfg['hostname_command'])
def handle_set_hostname(enabled, hostname, cfg):
    if not util.is_true(enabled):
        return

    if not hostname:
        LOG.warn("set_hostname was true but no local-hostname")
        return

    apply_hostname_bounce(hostname=hostname, policy=cfg['policy'],
                          interface=cfg['interface'],
                          command=cfg['command'],
                          hostname_command=cfg['hostname_command'])
def query_data(noun,
               seed_device,
               seed_timeout,
               strip=False,
               default=None,
               b64=None):
    """Makes a request to via the serial console via "GET <NOUN>"

        In the response, the first line is the status, while subsequent lines
        are is the value. A blank line with a "." is used to indicate end of
        response.

        If the response is expected to be base64 encoded, then set b64encoded
        to true. Unfortantely, there is no way to know if something is 100%
        encoded, so this method relies on being told if the data is base64 or
        not.
    """
    if not noun:
        return False

    with contextlib.closing(get_serial(seed_device, seed_timeout)) as ser:
        client = JoyentMetadataClient(ser)
        response = client.get_metadata(noun)

    if response is None:
        return default

    if b64 is None:
        b64 = query_data('b64-%s' % noun,
                         seed_device=seed_device,
                         seed_timeout=seed_timeout,
                         b64=False,
                         default=False,
                         strip=True)
        b64 = util.is_true(b64)

    resp = None
    if b64 or strip:
        resp = "".join(response).rstrip()
    else:
        resp = "".join(response)

    if b64:
        try:
            return util.b64d(resp)
        # Bogus input produces different errors in Python 2 and 3; catch both.
        except (TypeError, binascii.Error):
            LOG.warn("Failed base64 decoding key '%s'", noun)
            return resp

    return resp
Exemple #24
0
    def get_metadata_from_agent(self):
        temp_hostname = self.metadata.get("local-hostname")
        hostname_command = self.ds_cfg["hostname_bounce"]["hostname_command"]
        with temporary_hostname(temp_hostname, self.ds_cfg, hostname_command=hostname_command) as previous_hostname:
            if previous_hostname is not None and util.is_true(self.ds_cfg.get("set_hostname")):
                cfg = self.ds_cfg["hostname_bounce"]
                try:
                    perform_hostname_bounce(hostname=temp_hostname, cfg=cfg, prev_hostname=previous_hostname)
                except Exception as e:
                    LOG.warn("Failed publishing hostname: %s", e)
                    util.logexc(LOG, "handling set_hostname failed")

            try:
                invoke_agent(self.ds_cfg["agent_command"])
            except util.ProcessExecutionError:
                # claim the datasource even if the command failed
                util.logexc(LOG, "agent command '%s' failed.", self.ds_cfg["agent_command"])

            ddir = self.ds_cfg["data_dir"]
            shcfgxml = os.path.join(ddir, "SharedConfig.xml")
            wait_for = [shcfgxml]

            fp_files = []
            key_value = None
            for pk in self.cfg.get("_pubkeys", []):
                if pk.get("value", None):
                    key_value = pk["value"]
                    LOG.debug("ssh authentication: using value from fabric")
                else:
                    bname = str(pk["fingerprint"] + ".crt")
                    fp_files += [os.path.join(ddir, bname)]
                    LOG.debug("ssh authentication: using fingerprint from fabirc")

            missing = util.log_time(
                logfunc=LOG.debug, msg="waiting for files", func=wait_for_files, args=(wait_for + fp_files,)
            )
        if len(missing):
            LOG.warn("Did not find files, but going on: %s", missing)

        metadata = {}
        if shcfgxml in missing:
            LOG.warn("SharedConfig.xml missing, using static instance-id")
        else:
            try:
                metadata["instance-id"] = iid_from_shared_config(shcfgxml)
            except ValueError as e:
                LOG.warn("failed to get instance id in %s: %s", shcfgxml, e)

        metadata["public-keys"] = key_value or pubkeys_from_crt_files(fp_files)
        return metadata
    def get_metadata_from_agent(self):
        temp_hostname = self.metadata.get('local-hostname')
        hostname_command = self.ds_cfg['hostname_bounce']['hostname_command']
        with temporary_hostname(temp_hostname, self.ds_cfg,
                                hostname_command=hostname_command) \
                as previous_hostname:
            if (previous_hostname is not None
                    and util.is_true(self.ds_cfg.get('set_hostname'))):
                cfg = self.ds_cfg['hostname_bounce']
                try:
                    perform_hostname_bounce(hostname=temp_hostname,
                                            cfg=cfg,
                                            prev_hostname=previous_hostname)
                except Exception as e:
                    LOG.warn("Failed publishing hostname: %s", e)
                    util.logexc(LOG, "handling set_hostname failed")

            try:
                invoke_agent(self.ds_cfg['agent_command'])
            except util.ProcessExecutionError:
                # claim the datasource even if the command failed
                util.logexc(LOG, "agent command '%s' failed.",
                            self.ds_cfg['agent_command'])

            ddir = self.ds_cfg['data_dir']
            shcfgxml = os.path.join(ddir, "SharedConfig.xml")
            wait_for = [shcfgxml]

            fp_files = []
            for pk in self.cfg.get('_pubkeys', []):
                bname = str(pk['fingerprint'] + ".crt")
                fp_files += [os.path.join(ddir, bname)]

            missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
                                    func=wait_for_files,
                                    args=(wait_for + fp_files,))
        if len(missing):
            LOG.warn("Did not find files, but going on: %s", missing)

        metadata = {}
        if shcfgxml in missing:
            LOG.warn("SharedConfig.xml missing, using static instance-id")
        else:
            try:
                metadata['instance-id'] = iid_from_shared_config(shcfgxml)
            except ValueError as e:
                LOG.warn("failed to get instance id in %s: %s", shcfgxml, e)
        metadata['public-keys'] = pubkeys_from_crt_files(fp_files)
        return metadata
Exemple #26
0
def handle(name, cfg, cloud, log, _args):
    if util.is_true(cfg.get("no_ssh_fingerprints", False)):
        log.debug(
            "Skipping module named %s, logging of SSH fingerprints disabled",
            name,
        )
        return

    hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "sha256")
    (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
    for (user_name, _cfg) in users.items():
        if _cfg.get("no_create_home") or _cfg.get("system"):
            continue

        (key_fn, key_entries) = ssh_util.extract_authorized_keys(user_name)
        _pprint_key_entries(user_name, key_fn, key_entries, hash_meth)
Exemple #27
0
def handle(name, cfg, cloud, log, _args):
    """
    Call to handle apk_repos sections in cloud-config file.

    @param name: The module name "apk-configure" from cloud.cfg
    @param cfg: A nested dict containing the entire cloud config contents.
    @param cloud: The L{CloudInit} object in use.
    @param log: Pre-initialized Python logger object to use for logging.
    @param _args: Any module arguments from cloud.cfg
    """

    # If there is no "apk_repos" section in the configuration
    # then do nothing.
    apk_section = cfg.get('apk_repos')
    if not apk_section:
        LOG.debug(("Skipping module named %s,"
                   " no 'apk_repos' section found"), name)
        return

    validate_cloudconfig_schema(cfg, schema)

    # If "preserve_repositories" is explicitly set to True in
    # the configuration do nothing.
    if util.is_true(apk_section.get('preserve_repositories'), False):
        LOG.debug(("Skipping module named %s,"
                   " 'preserve_repositories' is set"), name)
        return

    # If there is no "alpine_repo" subsection of "apk_repos" present in the
    # configuration then do nothing, as at least "version" is required to
    # create valid repositories entries.
    alpine_repo = apk_section.get('alpine_repo')
    if not alpine_repo:
        LOG.debug(("Skipping module named %s,"
                   " no 'alpine_repo' configuration found"), name)
        return

    # If there is no "version" value present in configuration then do nothing.
    alpine_version = alpine_repo.get('version')
    if not alpine_version:
        LOG.debug(("Skipping module named %s,"
                   " 'version' not specified in alpine_repo"), name)
        return

    local_repo = apk_section.get('local_repo_base_url', '')

    _write_repositories_file(alpine_repo, alpine_version, local_repo)
    def get_metadata_from_agent(self):
        temp_hostname = self.metadata.get('local-hostname')
        hostname_command = self.ds_cfg['hostname_bounce']['hostname_command']
        with temporary_hostname(temp_hostname, self.ds_cfg,
                                hostname_command=hostname_command) \
                as previous_hostname:
            if (previous_hostname is not None and
               util.is_true(self.ds_cfg.get('set_hostname'))):
                cfg = self.ds_cfg['hostname_bounce']
                try:
                    perform_hostname_bounce(hostname=temp_hostname,
                                            cfg=cfg,
                                            prev_hostname=previous_hostname)
                except Exception as e:
                    LOG.warn("Failed publishing hostname: %s", e)
                    util.logexc(LOG, "handling set_hostname failed")

            try:
                invoke_agent(self.ds_cfg['agent_command'])
            except util.ProcessExecutionError:
                # claim the datasource even if the command failed
                util.logexc(LOG, "agent command '%s' failed.",
                            self.ds_cfg['agent_command'])

            ddir = self.ds_cfg['data_dir']

            fp_files = []
            key_value = None
            for pk in self.cfg.get('_pubkeys', []):
                if pk.get('value', None):
                    key_value = pk['value']
                    LOG.debug("ssh authentication: using value from fabric")
                else:
                    bname = str(pk['fingerprint'] + ".crt")
                    fp_files += [os.path.join(ddir, bname)]
                    LOG.debug("ssh authentication: "
                              "using fingerprint from fabirc")

            missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
                                    func=wait_for_files,
                                    args=(fp_files,))
        if len(missing):
            LOG.warn("Did not find files, but going on: %s", missing)

        metadata = {}
        metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files)
        return metadata
Exemple #29
0
    def get_data(self):
        md = {}
        ud = ""

        if not os.path.exists(self.seed):
            LOG.debug("Host does not appear to be on SmartOS")
            return False
        self.seed = self.seed

        dmi_info = dmi_data()
        if dmi_info is False:
            LOG.debug("No dmidata utility found")
            return False

        system_uuid, system_type = dmi_info
        if 'smartdc' not in system_type.lower():
            LOG.debug("Host is not on SmartOS. system_type=%s", system_type)
            return False
        self.is_smartdc = True
        md['instance-id'] = system_uuid

        b64_keys = self.query('base64_keys', strip=True, b64=False)
        if b64_keys is not None:
            self.b64_keys = [k.strip() for k in str(b64_keys).split(',')]

        b64_all = self.query('base64_all', strip=True, b64=False)
        if b64_all is not None:
            self.b64_all = util.is_true(b64_all)

        for ci_noun, attribute in SMARTOS_ATTRIB_MAP.iteritems():
            smartos_noun, strip = attribute
            md[ci_noun] = self.query(smartos_noun, strip=strip)

        if not md['local-hostname']:
            md['local-hostname'] = system_uuid

        ud = None
        if md['user-data']:
            ud = md['user-data']
        elif md['user-script']:
            ud = md['user-script']

        self.metadata = md
        self.userdata_raw = ud
        return True
Exemple #30
0
    def get_data(self):
        md = {}
        ud = ""

        if not os.path.exists(self.seed):
            LOG.debug("Host does not appear to be on SmartOS")
            return False
        self.seed = self.seed

        dmi_info = dmi_data()
        if dmi_info is False:
            LOG.debug("No dmidata utility found")
            return False

        system_uuid, system_type = dmi_info
        if 'smartdc' not in system_type.lower():
            LOG.debug("Host is not on SmartOS. system_type=%s", system_type)
            return False
        self.is_smartdc = True
        md['instance-id'] = system_uuid

        b64_keys = self.query('base64_keys', strip=True, b64=False)
        if b64_keys is not None:
            self.b64_keys = [k.strip() for k in str(b64_keys).split(',')]

        b64_all = self.query('base64_all', strip=True, b64=False)
        if b64_all is not None:
            self.b64_all = util.is_true(b64_all)

        for ci_noun, attribute in SMARTOS_ATTRIB_MAP.iteritems():
            smartos_noun, strip = attribute
            md[ci_noun] = self.query(smartos_noun, strip=strip)

        if not md['local-hostname']:
            md['local-hostname'] = system_uuid

        ud = None
        if md['user-data']:
            ud = md['user-data']
        elif md['user-script']:
            ud = md['user-script']

        self.metadata = md
        self.userdata_raw = ud
        return True
Exemple #31
0
def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'):
    """
    Set a temporary hostname, restoring the previous hostname on exit.

    Will have the value of the previous hostname when used as a context
    manager, or None if the hostname was not changed.
    """
    policy = cfg['hostname_bounce']['policy']
    previous_hostname = get_hostname(hostname_command)
    if (not util.is_true(cfg.get('set_hostname')) or util.is_false(policy)
            or (previous_hostname == temp_hostname and policy != 'force')):
        yield None
        return
    set_hostname(temp_hostname, hostname_command)
    try:
        yield previous_hostname
    finally:
        set_hostname(previous_hostname, hostname_command)
def query_data(noun, seed_device, seed_timeout, strip=False, default=None,
               b64=None):
    """Makes a request to via the serial console via "GET <NOUN>"

        In the response, the first line is the status, while subsequent lines
        are is the value. A blank line with a "." is used to indicate end of
        response.

        If the response is expected to be base64 encoded, then set b64encoded
        to true. Unfortantely, there is no way to know if something is 100%
        encoded, so this method relies on being told if the data is base64 or
        not.
    """
    if not noun:
        return False

    with contextlib.closing(get_serial(seed_device, seed_timeout)) as ser:
        client = JoyentMetadataClient(ser)
        response = client.get_metadata(noun)

    if response is None:
        return default

    if b64 is None:
        b64 = query_data('b64-%s' % noun, seed_device=seed_device,
                         seed_timeout=seed_timeout, b64=False,
                         default=False, strip=True)
        b64 = util.is_true(b64)

    resp = None
    if b64 or strip:
        resp = "".join(response).rstrip()
    else:
        resp = "".join(response)

    if b64:
        try:
            return util.b64d(resp)
        # Bogus input produces different errors in Python 2 and 3; catch both.
        except (TypeError, binascii.Error):
            LOG.warn("Failed base64 decoding key '%s'", noun)
            return resp

    return resp
Exemple #33
0
    def _consume_vendordata(self, frequency=PER_INSTANCE):
        """
        Consume the vendordata and run the part handlers on it
        """
        # User-data should have been consumed first.
        # So we merge the other available cloud-configs (everything except
        # vendor provided), and check whether or not we should consume
        # vendor data at all. That gives user or system a chance to override.
        if not self.datasource.get_vendordata_raw():
            LOG.debug("no vendordata from datasource")
            return

        _cc_merger = helpers.ConfigMerger(paths=self._paths,
                                          datasource=self.datasource,
                                          additional_fns=[],
                                          base_cfg=self.cfg,
                                          include_vendor=False)
        vdcfg = _cc_merger.cfg.get('vendor_data', {})

        if not isinstance(vdcfg, dict):
            vdcfg = {'enabled': False}
            LOG.warning("invalid 'vendor_data' setting. resetting to: %s",
                        vdcfg)

        enabled = vdcfg.get('enabled')
        no_handlers = vdcfg.get('disabled_handlers', None)

        if not util.is_true(enabled):
            LOG.debug("vendordata consumption is disabled.")
            return

        LOG.debug("vendor data will be consumed. disabled_handlers=%s",
                  no_handlers)

        # Ensure vendordata source fetched before activation (just incase)
        vendor_data_msg = self.datasource.get_vendordata()

        # This keeps track of all the active handlers, while excluding what the
        # users doesn't want run, i.e. boot_hook, cloud_config, shell_script
        c_handlers_list = self._default_vendordata_handlers()

        # Run the handlers
        self._do_handlers(vendor_data_msg, c_handlers_list, frequency,
                          excluded=no_handlers)
def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'):
    """
    Set a temporary hostname, restoring the previous hostname on exit.

    Will have the value of the previous hostname when used as a context
    manager, or None if the hostname was not changed.
    """
    policy = cfg['hostname_bounce']['policy']
    previous_hostname = get_hostname(hostname_command)
    if (not util.is_true(cfg.get('set_hostname')) or
       util.is_false(policy) or
       (previous_hostname == temp_hostname and policy != 'force')):
        yield None
        return
    set_hostname(temp_hostname, hostname_command)
    try:
        yield previous_hostname
    finally:
        set_hostname(previous_hostname, hostname_command)
Exemple #35
0
    def bounce_network_with_azure_hostname(self):
        # When using cloud-init to provision, we have to set the hostname from
        # the metadata and "bounce" the network to force DDNS to update via
        # dhclient
        azure_hostname = self.metadata.get('local-hostname')
        LOG.debug("Hostname in metadata is %s", azure_hostname)
        hostname_command = self.ds_cfg['hostname_bounce']['hostname_command']

        with temporary_hostname(azure_hostname, self.ds_cfg,
                                hostname_command=hostname_command) \
                as previous_hostname:
            if (previous_hostname is not None and
                    util.is_true(self.ds_cfg.get('set_hostname'))):
                cfg = self.ds_cfg['hostname_bounce']

                # "Bouncing" the network
                try:
                    perform_hostname_bounce(hostname=azure_hostname,
                                            cfg=cfg,
                                            prev_hostname=previous_hostname)
                except Exception as e:
                    LOG.warning("Failed publishing hostname: %s", e)
                    util.logexc(LOG, "handling set_hostname failed")
def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"):
    """Apply sshd PasswordAuthentication changes.

    @param pw_auth: config setting from 'pw_auth'.
                    Best given as True, False, or "unchanged".
    @param service_cmd: The service command list (['service'])
    @param service_name: The name of the sshd service for the system.

    @return: None"""
    cfg_name = "PasswordAuthentication"
    if service_cmd is None:
        service_cmd = ["service"]

    if util.is_true(pw_auth):
        cfg_val = 'yes'
    elif util.is_false(pw_auth):
        cfg_val = 'no'
    else:
        bmsg = "Leaving ssh config '%s' unchanged." % cfg_name
        if pw_auth is None or pw_auth.lower() == 'unchanged':
            LOG.debug("%s ssh_pwauth=%s", bmsg, pw_auth)
        else:
            LOG.warning("%s Unrecognized value: ssh_pwauth=%s", bmsg, pw_auth)
        return

    updated = update_ssh_config({cfg_name: cfg_val})
    if not updated:
        LOG.debug("No need to restart ssh service, %s not updated.", cfg_name)
        return

    if 'systemctl' in service_cmd:
        cmd = list(service_cmd) + ["restart", service_name]
    else:
        cmd = list(service_cmd) + [service_name, "restart"]
    util.subp(cmd)
    LOG.debug("Restarted the ssh daemon.")
Exemple #37
0
def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"):
    """Apply sshd PasswordAuthentication changes.

    @param pw_auth: config setting from 'pw_auth'.
                    Best given as True, False, or "unchanged".
    @param service_cmd: The service command list (['service'])
    @param service_name: The name of the sshd service for the system.

    @return: None"""
    cfg_name = "PasswordAuthentication"
    if service_cmd is None:
        service_cmd = ["service"]

    if util.is_true(pw_auth):
        cfg_val = 'yes'
    elif util.is_false(pw_auth):
        cfg_val = 'no'
    else:
        bmsg = "Leaving ssh config '%s' unchanged." % cfg_name
        if pw_auth is None or pw_auth.lower() == 'unchanged':
            LOG.debug("%s ssh_pwauth=%s", bmsg, pw_auth)
        else:
            LOG.warning("%s Unrecognized value: ssh_pwauth=%s", bmsg, pw_auth)
        return

    updated = update_ssh_config({cfg_name: cfg_val})
    if not updated:
        LOG.debug("No need to restart ssh service, %s not updated.", cfg_name)
        return

    if 'systemctl' in service_cmd:
        cmd = list(service_cmd) + ["restart", service_name]
    else:
        cmd = list(service_cmd) + [service_name, "restart"]
    util.subp(cmd)
    LOG.debug("Restarted the ssh daemon.")
def handle_ssh_pwauth(pw_auth, distro: Distro):
    """Apply sshd PasswordAuthentication changes.

    @param pw_auth: config setting from 'pw_auth'.
                    Best given as True, False, or "unchanged".
    @param distro: an instance of the distro class for the target distribution

    @return: None"""
    service = distro.get_option("ssh_svcname", "ssh")
    restart_ssh = True
    try:
        distro.manage_service("status", service)
    except subp.ProcessExecutionError as e:
        uses_systemd = distro.uses_systemd()
        if not uses_systemd:
            LOG.debug(
                "Writing config 'ssh_pwauth: %s'. SSH service '%s'"
                " will not be restarted because it is not running or not"
                " available.",
                pw_auth,
                service,
            )
            restart_ssh = False
        elif e.exit_code == 3:
            # Service is not running. Write ssh config.
            LOG.debug(
                "Writing config 'ssh_pwauth: %s'. SSH service '%s'"
                " will not be restarted because it is stopped.",
                pw_auth,
                service,
            )
            restart_ssh = False
        elif e.exit_code == 4:
            # Service status is unknown
            LOG.warning(
                "Ignoring config 'ssh_pwauth: %s'."
                " SSH service '%s' is not installed.",
                pw_auth,
                service,
            )
            return
        else:
            LOG.warning(
                "Ignoring config 'ssh_pwauth: %s'."
                " SSH service '%s' is not available. Error: %s.",
                pw_auth,
                service,
                e,
            )
            return

    cfg_name = "PasswordAuthentication"

    if isinstance(pw_auth, str):
        LOG.warning(
            "DEPRECATION: The 'ssh_pwauth' config key should be set to "
            "a boolean value. The string format is deprecated and will be "
            "removed in a future version of cloud-init.")
    if util.is_true(pw_auth):
        cfg_val = "yes"
    elif util.is_false(pw_auth):
        cfg_val = "no"
    else:
        bmsg = "Leaving SSH config '%s' unchanged." % cfg_name
        if pw_auth is None or pw_auth.lower() == "unchanged":
            LOG.debug("%s ssh_pwauth=%s", bmsg, pw_auth)
        else:
            LOG.warning("%s Unrecognized value: ssh_pwauth=%s", bmsg, pw_auth)
        return

    updated = update_ssh_config({cfg_name: cfg_val})
    if not updated:
        LOG.debug("No need to restart SSH service, %s not updated.", cfg_name)
        return

    if restart_ssh:
        distro.manage_service("restart", service)
        LOG.debug("Restarted the SSH daemon.")
    else:
        LOG.debug("Not restarting SSH service: service is stopped.")
Exemple #39
0
def _normalize_users(u_cfg, def_user_cfg=None):
    if isinstance(u_cfg, dict):
        ad_ucfg = []
        for k, v in u_cfg.items():
            if isinstance(v, (bool, int, float, str)):
                if util.is_true(v):
                    ad_ucfg.append(str(k))
            elif isinstance(v, dict):
                v["name"] = k
                ad_ucfg.append(v)
            else:
                raise TypeError("Unmappable user value type %s for key %s" %
                                (type_utils.obj_name(v), k))
        u_cfg = ad_ucfg
    elif isinstance(u_cfg, str):
        u_cfg = util.uniq_merge_sorted(u_cfg)

    users = {}
    for user_config in u_cfg:
        if isinstance(user_config, (list, str)):
            for u in util.uniq_merge(user_config):
                if u and u not in users:
                    users[u] = {}
        elif isinstance(user_config, dict):
            n = user_config.pop("name", "default")
            prev_config = users.get(n) or {}
            users[n] = util.mergemanydict([prev_config, user_config])
        else:
            raise TypeError("User config must be dictionary/list or string "
                            " types only and not %s" %
                            (type_utils.obj_name(user_config)))

    # Ensure user options are in the right python friendly format
    if users:
        c_users = {}
        for uname, uconfig in users.items():
            c_uconfig = {}
            for k, v in uconfig.items():
                k = k.replace("-", "_").strip()
                if k:
                    c_uconfig[k] = v
            c_users[uname] = c_uconfig
        users = c_users

    # Fix the default user into the actual default user name and replace it.
    def_user = None
    if users and "default" in users:
        def_config = users.pop("default")
        if def_user_cfg:
            # Pickup what the default 'real name' is and any groups that are
            # provided by the default config
            def_user_cfg = def_user_cfg.copy()
            def_user = def_user_cfg.pop("name")
            def_groups = def_user_cfg.pop("groups", [])
            # Pick any config + groups for the user name that we may have
            # extracted previously
            parsed_config = users.pop(def_user, {})
            parsed_groups = parsed_config.get("groups", [])
            # Now merge the extracted groups with the default config provided
            users_groups = util.uniq_merge_sorted(parsed_groups, def_groups)
            parsed_config["groups"] = ",".join(users_groups)
            # The real config for the default user is the combination of the
            # default user config provided by the distro, the default user
            # config provided by the above merging for the user 'default' and
            # then the parsed config from the user's 'real name' which does not
            # have to be 'default' (but could be)
            users[def_user] = util.mergemanydict(
                [def_user_cfg, def_config, parsed_config])

    # Ensure that only the default user that we found (if any) is actually
    # marked as the default user
    for uname, uconfig in users.items():
        uconfig["default"] = uname == def_user if def_user else False

    return users
Exemple #40
0
    def get_data(self):
        md = {}
        ud = ""

        if not device_exists(self.seed):
            LOG.debug("No metadata device '%s' found for SmartOS datasource",
                      self.seed)
            return False

        uname_arch = os.uname()[4]
        if uname_arch.startswith("arm") or uname_arch == "aarch64":
            # Disabling because dmidcode in dmi_data() crashes kvm process
            LOG.debug("Disabling SmartOS datasource on arm (LP: #1243287)")
            return False

        # SDC KVM instances will provide dmi data, LX-brand does not
        if self.smartos_type == 'kvm':
            dmi_info = dmi_data()
            if dmi_info is False:
                LOG.debug("No dmidata utility found")
                return False

            system_type = dmi_info
            if 'smartdc' not in system_type.lower():
                LOG.debug("Host is not on SmartOS. system_type=%s",
                          system_type)
                return False
            LOG.debug("Host is SmartOS, guest in KVM")

        seed_obj = self._get_seed_file_object()
        if seed_obj is None:
            LOG.debug('Seed file object not found.')
            return False
        with contextlib.closing(seed_obj) as seed:
            b64_keys = self.query('base64_keys', seed, strip=True, b64=False)
            if b64_keys is not None:
                self.b64_keys = [k.strip() for k in str(b64_keys).split(',')]

            b64_all = self.query('base64_all', seed, strip=True, b64=False)
            if b64_all is not None:
                self.b64_all = util.is_true(b64_all)

            for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items():
                smartos_noun, strip = attribute
                md[ci_noun] = self.query(smartos_noun, seed, strip=strip)

        # @datadictionary: This key may contain a program that is written
        # to a file in the filesystem of the guest on each boot and then
        # executed. It may be of any format that would be considered
        # executable in the guest instance.
        #
        # We write 'user-script' and 'operator-script' into the
        # instance/data directory. The default vendor-data then handles
        # executing them later.
        data_d = os.path.join(self.paths.get_cpath(), 'instances',
                              md['instance-id'], 'data')
        user_script = os.path.join(data_d, 'user-script')
        u_script_l = "%s/user-script" % LEGACY_USER_D
        write_boot_content(md.get('user-script'),
                           content_f=user_script,
                           link=u_script_l,
                           shebang=True,
                           mode=0o700)

        operator_script = os.path.join(data_d, 'operator-script')
        write_boot_content(md.get('operator-script'),
                           content_f=operator_script,
                           shebang=False,
                           mode=0o700)

        # @datadictionary:  This key has no defined format, but its value
        # is written to the file /var/db/mdata-user-data on each boot prior
        # to the phase that runs user-script. This file is not to be executed.
        # This allows a configuration file of some kind to be injected into
        # the machine to be consumed by the user-script when it runs.
        u_data = md.get('legacy-user-data')
        u_data_f = "%s/mdata-user-data" % LEGACY_USER_D
        write_boot_content(u_data, u_data_f)

        # Handle the cloud-init regular meta
        if not md['local-hostname']:
            md['local-hostname'] = md['instance-id']

        ud = None
        if md['user-data']:
            ud = md['user-data']

        if not md['vendor-data']:
            md['vendor-data'] = BUILTIN_VENDOR_DATA % {
                'user_script':
                user_script,
                'operator_script':
                operator_script,
                'per_boot_d':
                os.path.join(self.paths.get_cpath("scripts"), 'per-boot'),
            }

        self.metadata = util.mergemanydict([md, self.metadata])
        self.userdata_raw = ud
        self.vendordata_raw = md['vendor-data']

        self._set_provisioned()
        return True
    def get_data(self):
        md = {}
        ud = ""

        if not os.path.exists(self.seed):
            LOG.debug("Host does not appear to be on SmartOS")
            return False

        dmi_info = dmi_data()
        if dmi_info is False:
            LOG.debug("No dmidata utility found")
            return False

        system_uuid, system_type = tuple(dmi_info)
        if 'smartdc' not in system_type.lower():
            LOG.debug("Host is not on SmartOS. system_type=%s", system_type)
            return False
        self.is_smartdc = True
        md['instance-id'] = system_uuid

        b64_keys = self.query('base64_keys', strip=True, b64=False)
        if b64_keys is not None:
            self.b64_keys = [k.strip() for k in str(b64_keys).split(',')]

        b64_all = self.query('base64_all', strip=True, b64=False)
        if b64_all is not None:
            self.b64_all = util.is_true(b64_all)

        for ci_noun, attribute in SMARTOS_ATTRIB_MAP.iteritems():
            smartos_noun, strip = attribute
            md[ci_noun] = self.query(smartos_noun, strip=strip)

        # @datadictionary: This key may contain a program that is written
        # to a file in the filesystem of the guest on each boot and then
        # executed. It may be of any format that would be considered
        # executable in the guest instance.
        #
        # We write 'user-script' and 'operator-script' into the
        # instance/data directory. The default vendor-data then handles
        # executing them later.
        data_d = os.path.join(self.paths.get_cpath(), 'instances',
                              md['instance-id'], 'data')
        user_script = os.path.join(data_d, 'user-script')
        u_script_l = "%s/user-script" % LEGACY_USER_D
        write_boot_content(md.get('user-script'),
                           content_f=user_script,
                           link=u_script_l,
                           shebang=True,
                           mode=0700)

        operator_script = os.path.join(data_d, 'operator-script')
        write_boot_content(md.get('operator-script'),
                           content_f=operator_script,
                           shebang=False,
                           mode=0700)

        # @datadictionary:  This key has no defined format, but its value
        # is written to the file /var/db/mdata-user-data on each boot prior
        # to the phase that runs user-script. This file is not to be executed.
        # This allows a configuration file of some kind to be injected into
        # the machine to be consumed by the user-script when it runs.
        u_data = md.get('legacy-user-data')
        u_data_f = "%s/mdata-user-data" % LEGACY_USER_D
        write_boot_content(u_data, u_data_f)

        # Handle the cloud-init regular meta
        if not md['local-hostname']:
            md['local-hostname'] = system_uuid

        ud = None
        if md['user-data']:
            ud = md['user-data']

        if not md['vendor-data']:
            md['vendor-data'] = BUILTIN_VENDOR_DATA % {
                'user_script':
                user_script,
                'operator_script':
                operator_script,
                'per_boot_d':
                os.path.join(self.paths.get_cpath("scripts"), 'per-boot'),
            }

        self.metadata = util.mergemanydict([md, self.metadata])
        self.userdata_raw = ud
        self.vendordata_raw = md['vendor-data']
        return True
Exemple #42
0
    def get_data(self):
        md = {}
        ud = ""

        if not os.path.exists(self.seed):
            LOG.debug("Host does not appear to be on SmartOS")
            return False

        dmi_info = dmi_data()
        if dmi_info is False:
            LOG.debug("No dmidata utility found")
            return False

        system_uuid, system_type = dmi_info
        if 'smartdc' not in system_type.lower():
            LOG.debug("Host is not on SmartOS. system_type=%s", system_type)
            return False
        self.is_smartdc = True
        md['instance-id'] = system_uuid

        b64_keys = self.query('base64_keys', strip=True, b64=False)
        if b64_keys is not None:
            self.b64_keys = [k.strip() for k in str(b64_keys).split(',')]

        b64_all = self.query('base64_all', strip=True, b64=False)
        if b64_all is not None:
            self.b64_all = util.is_true(b64_all)

        for ci_noun, attribute in SMARTOS_ATTRIB_MAP.iteritems():
            smartos_noun, strip = attribute
            md[ci_noun] = self.query(smartos_noun, strip=strip)

        # @datadictionary: This key may contain a program that is written
        # to a file in the filesystem of the guest on each boot and then
        # executed. It may be of any format that would be considered
        # executable in the guest instance.
        u_script = md.get('user-script')
        u_script_f = "%s/99_user_script" % self.user_script_d
        u_script_l = "%s/user-script" % LEGACY_USER_D
        write_boot_content(u_script, u_script_f, link=u_script_l, shebang=True,
                           mode=0700)

        # @datadictionary:  This key has no defined format, but its value
        # is written to the file /var/db/mdata-user-data on each boot prior
        # to the phase that runs user-script. This file is not to be executed.
        # This allows a configuration file of some kind to be injected into
        # the machine to be consumed by the user-script when it runs.
        u_data = md.get('legacy-user-data')
        u_data_f = "%s/mdata-user-data" % LEGACY_USER_D
        write_boot_content(u_data, u_data_f)

        # Handle the cloud-init regular meta
        if not md['local-hostname']:
            md['local-hostname'] = system_uuid

        ud = None
        if md['user-data']:
            ud = md['user-data']

        self.metadata = util.mergemanydict([md, self.metadata])
        self.userdata_raw = ud
        self.vendordata_raw = md['vendordata']
        return True
Exemple #43
0
def _extract_addresses(config, entry, ifname, features=None):
    """This method parse a cloudinit.net.network_state dictionary (config) and
       maps netstate keys/values into a dictionary (entry) to represent
       netplan yaml.

    An example config dictionary might look like:

    {'mac_address': '52:54:00:12:34:00',
     'name': 'interface0',
     'subnets': [
        {'address': '192.168.1.2/24',
         'mtu': 1501,
         'type': 'static'},
        {'address': '2001:4800:78ff:1b:be76:4eff:fe06:1000",
         'mtu': 1480,
         'netmask': 64,
         'type': 'static'}],
      'type: physical',
      'accept-ra': 'true'
    }

    An entry dictionary looks like:

    {'set-name': 'interface0',
     'match': {'macaddress': '52:54:00:12:34:00'},
     'mtu': 1501}

    After modification returns

    {'set-name': 'interface0',
     'match': {'macaddress': '52:54:00:12:34:00'},
     'mtu': 1501,
     'address': ['192.168.1.2/24', '2001:4800:78ff:1b:be76:4eff:fe06:1000"],
     'ipv6-mtu': 1480}

    """
    def _listify(obj, token=' '):
        "Helper to convert strings to list of strings, handle single string"
        if not obj or type(obj) not in [str]:
            return obj
        if token in obj:
            return obj.split(token)
        else:
            return [
                obj,
            ]

    if features is None:
        features = []
    addresses = []
    routes = []
    nameservers = []
    searchdomains = []
    subnets = config.get('subnets', [])
    if subnets is None:
        subnets = []
    for subnet in subnets:
        sn_type = subnet.get('type')
        if sn_type.startswith('dhcp'):
            if sn_type == 'dhcp':
                sn_type += '4'
            entry.update({sn_type: True})
        elif sn_type in IPV6_DYNAMIC_TYPES:
            entry.update({'dhcp6': True})
        elif sn_type in ['static', 'static6']:
            addr = "%s" % subnet.get('address')
            if 'prefix' in subnet:
                addr += "/%d" % subnet.get('prefix')
            if 'gateway' in subnet and subnet.get('gateway'):
                gateway = subnet.get('gateway')
                if ":" in gateway:
                    entry.update({'gateway6': gateway})
                else:
                    entry.update({'gateway4': gateway})
            if 'dns_nameservers' in subnet:
                nameservers += _listify(subnet.get('dns_nameservers', []))
            if 'dns_search' in subnet:
                searchdomains += _listify(subnet.get('dns_search', []))
            if 'mtu' in subnet:
                mtukey = 'mtu'
                if subnet_is_ipv6(subnet) and 'ipv6-mtu' in features:
                    mtukey = 'ipv6-mtu'
                entry.update({mtukey: subnet.get('mtu')})
            for route in subnet.get('routes', []):
                to_net = "%s/%s" % (route.get('network'), route.get('prefix'))
                new_route = {
                    'via': route.get('gateway'),
                    'to': to_net,
                }
                if 'metric' in route:
                    new_route.update({'metric': route.get('metric', 100)})
                routes.append(new_route)

            addresses.append(addr)

    if 'mtu' in config:
        entry_mtu = entry.get('mtu')
        if entry_mtu and config['mtu'] != entry_mtu:
            LOG.warning(
                "Network config: ignoring %s device-level mtu:%s because"
                " ipv4 subnet-level mtu:%s provided.", ifname, config['mtu'],
                entry_mtu)
        else:
            entry['mtu'] = config['mtu']
    if len(addresses) > 0:
        entry.update({'addresses': addresses})
    if len(routes) > 0:
        entry.update({'routes': routes})
    if len(nameservers) > 0:
        ns = {'addresses': nameservers}
        entry.update({'nameservers': ns})
    if len(searchdomains) > 0:
        ns = entry.get('nameservers', {})
        ns.update({'search': searchdomains})
        entry.update({'nameservers': ns})
    if 'accept-ra' in config and config['accept-ra'] is not None:
        entry.update({'accept-ra': util.is_true(config.get('accept-ra'))})
Exemple #44
0
def _normalize_users(u_cfg, def_user_cfg=None):
    if isinstance(u_cfg, dict):
        ad_ucfg = []
        for (k, v) in u_cfg.items():
            if isinstance(v, (bool, int, float) + six.string_types):
                if util.is_true(v):
                    ad_ucfg.append(str(k))
            elif isinstance(v, dict):
                v['name'] = k
                ad_ucfg.append(v)
            else:
                raise TypeError(("Unmappable user value type %s"
                                 " for key %s") % (type_utils.obj_name(v), k))
        u_cfg = ad_ucfg
    elif isinstance(u_cfg, six.string_types):
        u_cfg = util.uniq_merge_sorted(u_cfg)

    users = {}
    for user_config in u_cfg:
        if isinstance(user_config, (list,) + six.string_types):
            for u in util.uniq_merge(user_config):
                if u and u not in users:
                    users[u] = {}
        elif isinstance(user_config, dict):
            if 'name' in user_config:
                n = user_config.pop('name')
                prev_config = users.get(n) or {}
                users[n] = util.mergemanydict([prev_config,
                                               user_config])
            else:
                # Assume the default user then
                prev_config = users.get('default') or {}
                users['default'] = util.mergemanydict([prev_config,
                                                       user_config])
        else:
            raise TypeError(("User config must be dictionary/list "
                             " or string types only and not %s") %
                            type_utils.obj_name(user_config))

    # Ensure user options are in the right python friendly format
    if users:
        c_users = {}
        for (uname, uconfig) in users.items():
            c_uconfig = {}
            for (k, v) in uconfig.items():
                k = k.replace('-', '_').strip()
                if k:
                    c_uconfig[k] = v
            c_users[uname] = c_uconfig
        users = c_users

    # Fixup the default user into the real
    # default user name and replace it...
    def_user = None
    if users and 'default' in users:
        def_config = users.pop('default')
        if def_user_cfg:
            # Pickup what the default 'real name' is
            # and any groups that are provided by the
            # default config
            def_user_cfg = def_user_cfg.copy()
            def_user = def_user_cfg.pop('name')
            def_groups = def_user_cfg.pop('groups', [])
            # Pickup any config + groups for that user name
            # that we may have previously extracted
            parsed_config = users.pop(def_user, {})
            parsed_groups = parsed_config.get('groups', [])
            # Now merge our extracted groups with
            # anything the default config provided
            users_groups = util.uniq_merge_sorted(parsed_groups, def_groups)
            parsed_config['groups'] = ",".join(users_groups)
            # The real config for the default user is the
            # combination of the default user config provided
            # by the distro, the default user config provided
            # by the above merging for the user 'default' and
            # then the parsed config from the user's 'real name'
            # which does not have to be 'default' (but could be)
            users[def_user] = util.mergemanydict([def_user_cfg,
                                                  def_config,
                                                  parsed_config])

    # Ensure that only the default user that we
    # found (if any) is actually marked as being
    # the default user
    if users:
        for (uname, uconfig) in users.items():
            if def_user and uname == def_user:
                uconfig['default'] = True
            else:
                uconfig['default'] = False

    return users
Exemple #45
0
def handle(_name, cfg, cloud, log, args):
    if len(args) != 0:
        # if run from command line, and give args, wipe the chpasswd['list']
        password = args[0]
        if 'chpasswd' in cfg and 'list' in cfg['chpasswd']:
            del cfg['chpasswd']['list']
    else:
        password = util.get_cfg_option_str(cfg, "password", None)

    expire = True
    plist = None

    if 'chpasswd' in cfg:
        chfg = cfg['chpasswd']
        plist = util.get_cfg_option_str(chfg, 'list', plist)
        expire = util.get_cfg_option_bool(chfg, 'expire', expire)

    if not plist and password:
        (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
        (user, _user_config) = ug_util.extract_default(users)
        if user:
            plist = "%s:%s" % (user, password)
        else:
            log.warn("No default or defined user to change password for.")

    errors = []
    if plist:
        plist_in = []
        randlist = []
        users = []
        for line in plist.splitlines():
            u, p = line.split(':', 1)
            if p == "R" or p == "RANDOM":
                p = rand_user_password()
                randlist.append("%s:%s" % (u, p))
            plist_in.append("%s:%s" % (u, p))
            users.append(u)

        ch_in = '\n'.join(plist_in) + '\n'
        try:
            log.debug("Changing password for %s:", users)
            util.subp(['chpasswd'], ch_in)
        except Exception as e:
            errors.append(e)
            util.logexc(log, "Failed to set passwords with chpasswd for %s",
                        users)

        if len(randlist):
            blurb = ("Set the following 'random' passwords\n",
                     '\n'.join(randlist))
            sys.stderr.write("%s\n%s\n" % blurb)

        if expire:
            expired_users = []
            for u in users:
                try:
                    util.subp(['passwd', '--expire', u])
                    expired_users.append(u)
                except Exception as e:
                    errors.append(e)
                    util.logexc(log, "Failed to set 'expire' for %s", u)
            if expired_users:
                log.debug("Expired passwords for: %s users", expired_users)

    change_pwauth = False
    pw_auth = None
    if 'ssh_pwauth' in cfg:
        if util.is_true(cfg['ssh_pwauth']):
            change_pwauth = True
            pw_auth = 'yes'
        elif util.is_false(cfg['ssh_pwauth']):
            change_pwauth = True
            pw_auth = 'no'
        elif str(cfg['ssh_pwauth']).lower() == 'unchanged':
            log.debug('Leaving auth line unchanged')
            change_pwauth = False
        elif not str(cfg['ssh_pwauth']).strip():
            log.debug('Leaving auth line unchanged')
            change_pwauth = False
        elif not cfg['ssh_pwauth']:
            log.debug('Leaving auth line unchanged')
            change_pwauth = False
        else:
            msg = 'Unrecognized value %s for ssh_pwauth' % cfg['ssh_pwauth']
            util.logexc(log, msg)

    if change_pwauth:
        replaced_auth = False

        # See: man sshd_config
        old_lines = ssh_util.parse_ssh_config(ssh_util.DEF_SSHD_CFG)
        new_lines = []
        i = 0
        for (i, line) in enumerate(old_lines):
            # Keywords are case-insensitive and arguments are case-sensitive
            if line.key == 'passwordauthentication':
                log.debug("Replacing auth line %s with %s", i + 1, pw_auth)
                replaced_auth = True
                line.value = pw_auth
            new_lines.append(line)

        if not replaced_auth:
            log.debug("Adding new auth line %s", i + 1)
            replaced_auth = True
            new_lines.append(ssh_util.SshdConfigLine('',
                                                     'PasswordAuthentication',
                                                     pw_auth))

        lines = [str(l) for l in new_lines]
        util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines))

        try:
            cmd = cloud.distro.init_cmd  # Default service
            cmd.append(cloud.distro.get_option('ssh_svcname', 'ssh'))
            cmd.append('restart')
            if 'systemctl' in cmd:  # Switch action ordering
                cmd[1], cmd[2] = cmd[2], cmd[1]
            cmd = filter(None, cmd)  # Remove empty arguments
            util.subp(cmd)
            log.debug("Restarted the ssh daemon")
        except Exception:
            util.logexc(log, "Restarting of the ssh daemon failed")

    if len(errors):
        log.debug("%s errors occured, re-raising the last one", len(errors))
        raise errors[-1]
def handle(_name, cfg, cloud, log, args):
    if len(args) != 0:
        # if run from command line, and give args, wipe the chpasswd['list']
        password = args[0]
        if 'chpasswd' in cfg and 'list' in cfg['chpasswd']:
            del cfg['chpasswd']['list']
    else:
        password = util.get_cfg_option_str(cfg, "password", None)

    expire = True
    plist = None

    if 'chpasswd' in cfg:
        chfg = cfg['chpasswd']
        plist = util.get_cfg_option_str(chfg, 'list', plist)
        expire = util.get_cfg_option_bool(chfg, 'expire', expire)

    if not plist and password:
        (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
        (user, _user_config) = ds.extract_default(users)
        if user:
            plist = "%s:%s" % (user, password)
        else:
            log.warn("No default or defined user to change password for.")

    errors = []
    if plist:
        plist_in = []
        randlist = []
        users = []
        for line in plist.splitlines():
            u, p = line.split(':', 1)
            if p == "R" or p == "RANDOM":
                p = rand_user_password()
                randlist.append("%s:%s" % (u, p))
            plist_in.append("%s:%s" % (u, p))
            users.append(u)

        ch_in = '\n'.join(plist_in) + '\n'
        try:
            log.debug("Changing password for %s:", users)
            util.subp(['chpasswd'], ch_in)
        except Exception as e:
            errors.append(e)
            util.logexc(log, "Failed to set passwords with chpasswd for %s",
                        users)

        if len(randlist):
            blurb = ("Set the following 'random' passwords\n",
                     '\n'.join(randlist))
            sys.stderr.write("%s\n%s\n" % blurb)

        if expire:
            expired_users = []
            for u in users:
                try:
                    util.subp(['passwd', '--expire', u])
                    expired_users.append(u)
                except Exception as e:
                    errors.append(e)
                    util.logexc(log, "Failed to set 'expire' for %s", u)
            if expired_users:
                log.debug("Expired passwords for: %s users", expired_users)

    change_pwauth = False
    pw_auth = None
    if 'ssh_pwauth' in cfg:
        if util.is_true(cfg['ssh_pwauth']):
            change_pwauth = True
            pw_auth = 'yes'
        elif util.is_false(cfg['ssh_pwauth']):
            change_pwauth = True
            pw_auth = 'no'
        elif str(cfg['ssh_pwauth']).lower() == 'unchanged':
            log.debug('Leaving auth line unchanged')
            change_pwauth = False
        elif not str(cfg['ssh_pwauth']).strip():
            log.debug('Leaving auth line unchanged')
            change_pwauth = False
        elif not cfg['ssh_pwauth']:
            log.debug('Leaving auth line unchanged')
            change_pwauth = False
        else:
            msg = 'Unrecognized value %s for ssh_pwauth' % cfg['ssh_pwauth']
            util.logexc(log, msg)

    if change_pwauth:
        replaced_auth = False

        # See: man sshd_config
        old_lines = ssh_util.parse_ssh_config(ssh_util.DEF_SSHD_CFG)
        new_lines = []
        i = 0
        for (i, line) in enumerate(old_lines):
            # Keywords are case-insensitive and arguments are case-sensitive
            if line.key == 'passwordauthentication':
                log.debug("Replacing auth line %s with %s", i + 1, pw_auth)
                replaced_auth = True
                line.value = pw_auth
            new_lines.append(line)

        if not replaced_auth:
            log.debug("Adding new auth line %s", i + 1)
            replaced_auth = True
            new_lines.append(ssh_util.SshdConfigLine('',
                                                     'PasswordAuthentication',
                                                     pw_auth))

        lines = [str(l) for l in new_lines]
        util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines))

        try:
            cmd = cloud.distro.init_cmd  # Default service
            cmd.append(cloud.distro.get_option('ssh_svcname', 'ssh'))
            cmd.append('restart')
            if 'systemctl' in cmd:  # Switch action ordering
                cmd[1], cmd[2] = cmd[2], cmd[1]
            cmd = filter(None, cmd)  # Remove empty arguments
            util.subp(cmd)
            log.debug("Restarted the ssh daemon")
        except:
            util.logexc(log, "Restarting of the ssh daemon failed")

    if len(errors):
        log.debug("%s errors occured, re-raising the last one", len(errors))
        raise errors[-1]
Exemple #47
0
def _extract_addresses(config, entry, ifname, features=None):
    """This method parse a cloudinit.net.network_state dictionary (config) and
       maps netstate keys/values into a dictionary (entry) to represent
       netplan yaml.

    An example config dictionary might look like:

    {'mac_address': '52:54:00:12:34:00',
     'name': 'interface0',
     'subnets': [
        {'address': '192.168.1.2/24',
         'mtu': 1501,
         'type': 'static'},
        {'address': '2001:4800:78ff:1b:be76:4eff:fe06:1000",
         'mtu': 1480,
         'netmask': 64,
         'type': 'static'}],
      'type: physical',
      'accept-ra': 'true'
    }

    An entry dictionary looks like:

    {'set-name': 'interface0',
     'match': {'macaddress': '52:54:00:12:34:00'},
     'mtu': 1501}

    After modification returns

    {'set-name': 'interface0',
     'match': {'macaddress': '52:54:00:12:34:00'},
     'mtu': 1501,
     'address': ['192.168.1.2/24', '2001:4800:78ff:1b:be76:4eff:fe06:1000"],
     'ipv6-mtu': 1480}

    """
    def _listify(obj, token=" "):
        "Helper to convert strings to list of strings, handle single string"
        if not obj or type(obj) not in [str]:
            return obj
        if token in obj:
            return obj.split(token)
        else:
            return [
                obj,
            ]

    if features is None:
        features = []
    addresses = []
    routes = []
    nameservers = []
    searchdomains = []
    subnets = config.get("subnets", [])
    if subnets is None:
        subnets = []
    for subnet in subnets:
        sn_type = subnet.get("type")
        if sn_type.startswith("dhcp"):
            if sn_type == "dhcp":
                sn_type += "4"
            entry.update({sn_type: True})
        elif sn_type in IPV6_DYNAMIC_TYPES:
            entry.update({"dhcp6": True})
        elif sn_type in ["static", "static6"]:
            addr = "%s" % subnet.get("address")
            if "prefix" in subnet:
                addr += "/%d" % subnet.get("prefix")
            if "gateway" in subnet and subnet.get("gateway"):
                gateway = subnet.get("gateway")
                if ":" in gateway:
                    entry.update({"gateway6": gateway})
                else:
                    entry.update({"gateway4": gateway})
            if "dns_nameservers" in subnet:
                nameservers += _listify(subnet.get("dns_nameservers", []))
            if "dns_search" in subnet:
                searchdomains += _listify(subnet.get("dns_search", []))
            if "mtu" in subnet:
                mtukey = "mtu"
                if subnet_is_ipv6(subnet) and "ipv6-mtu" in features:
                    mtukey = "ipv6-mtu"
                entry.update({mtukey: subnet.get("mtu")})
            for route in subnet.get("routes", []):
                to_net = "%s/%s" % (route.get("network"), route.get("prefix"))
                new_route = {
                    "via": route.get("gateway"),
                    "to": to_net,
                }
                if "metric" in route:
                    new_route.update({"metric": route.get("metric", 100)})
                routes.append(new_route)

            addresses.append(addr)

    if "mtu" in config:
        entry_mtu = entry.get("mtu")
        if entry_mtu and config["mtu"] != entry_mtu:
            LOG.warning(
                "Network config: ignoring %s device-level mtu:%s because"
                " ipv4 subnet-level mtu:%s provided.",
                ifname,
                config["mtu"],
                entry_mtu,
            )
        else:
            entry["mtu"] = config["mtu"]
    if len(addresses) > 0:
        entry.update({"addresses": addresses})
    if len(routes) > 0:
        entry.update({"routes": routes})
    if len(nameservers) > 0:
        ns = {"addresses": nameservers}
        entry.update({"nameservers": ns})
    if len(searchdomains) > 0:
        ns = entry.get("nameservers", {})
        ns.update({"search": searchdomains})
        entry.update({"nameservers": ns})
    if "accept-ra" in config and config["accept-ra"] is not None:
        entry.update({"accept-ra": util.is_true(config.get("accept-ra"))})
Exemple #48
0
def _normalize_users(u_cfg, def_user_cfg=None):
    if isinstance(u_cfg, dict):
        ad_ucfg = []
        for (k, v) in u_cfg.items():
            if isinstance(v, (bool, int, float) + six.string_types):
                if util.is_true(v):
                    ad_ucfg.append(str(k))
            elif isinstance(v, dict):
                v['name'] = k
                ad_ucfg.append(v)
            else:
                raise TypeError(("Unmappable user value type %s"
                                 " for key %s") % (type_utils.obj_name(v), k))
        u_cfg = ad_ucfg
    elif isinstance(u_cfg, six.string_types):
        u_cfg = util.uniq_merge_sorted(u_cfg)

    users = {}
    for user_config in u_cfg:
        if isinstance(user_config, (list, ) + six.string_types):
            for u in util.uniq_merge(user_config):
                if u and u not in users:
                    users[u] = {}
        elif isinstance(user_config, dict):
            if 'name' in user_config:
                n = user_config.pop('name')
                prev_config = users.get(n) or {}
                users[n] = util.mergemanydict([prev_config, user_config])
            else:
                # Assume the default user then
                prev_config = users.get('default') or {}
                users['default'] = util.mergemanydict(
                    [prev_config, user_config])
        else:
            raise TypeError(("User config must be dictionary/list "
                             " or string types only and not %s") %
                            type_utils.obj_name(user_config))

    # Ensure user options are in the right python friendly format
    if users:
        c_users = {}
        for (uname, uconfig) in users.items():
            c_uconfig = {}
            for (k, v) in uconfig.items():
                k = k.replace('-', '_').strip()
                if k:
                    c_uconfig[k] = v
            c_users[uname] = c_uconfig
        users = c_users

    # Fixup the default user into the real
    # default user name and replace it...
    def_user = None
    if users and 'default' in users:
        def_config = users.pop('default')
        if def_user_cfg:
            # Pickup what the default 'real name' is
            # and any groups that are provided by the
            # default config
            def_user_cfg = def_user_cfg.copy()
            def_user = def_user_cfg.pop('name')
            def_groups = def_user_cfg.pop('groups', [])
            # Pickup any config + groups for that user name
            # that we may have previously extracted
            parsed_config = users.pop(def_user, {})
            parsed_groups = parsed_config.get('groups', [])
            # Now merge our extracted groups with
            # anything the default config provided
            users_groups = util.uniq_merge_sorted(parsed_groups, def_groups)
            parsed_config['groups'] = ",".join(users_groups)
            # The real config for the default user is the
            # combination of the default user config provided
            # by the distro, the default user config provided
            # by the above merging for the user 'default' and
            # then the parsed config from the user's 'real name'
            # which does not have to be 'default' (but could be)
            users[def_user] = util.mergemanydict(
                [def_user_cfg, def_config, parsed_config])

    # Ensure that only the default user that we
    # found (if any) is actually marked as being
    # the default user
    if users:
        for (uname, uconfig) in users.items():
            if def_user and uname == def_user:
                uconfig['default'] = True
            else:
                uconfig['default'] = False

    return users
    def get_data(self):
        md = {}
        ud = ""

        if not device_exists(self.seed):
            LOG.debug("No serial device '%s' found for SmartOS datasource",
                      self.seed)
            return False

        uname_arch = os.uname()[4]
        if uname_arch.startswith("arm") or uname_arch == "aarch64":
            # Disabling because dmidcode in dmi_data() crashes kvm process
            LOG.debug("Disabling SmartOS datasource on arm (LP: #1243287)")
            return False

        dmi_info = dmi_data()
        if dmi_info is False:
            LOG.debug("No dmidata utility found")
            return False

        system_uuid, system_type = tuple(dmi_info)
        if 'smartdc' not in system_type.lower():
            LOG.debug("Host is not on SmartOS. system_type=%s", system_type)
            return False
        self.is_smartdc = True
        md['instance-id'] = system_uuid

        b64_keys = self.query('base64_keys', strip=True, b64=False)
        if b64_keys is not None:
            self.b64_keys = [k.strip() for k in str(b64_keys).split(',')]

        b64_all = self.query('base64_all', strip=True, b64=False)
        if b64_all is not None:
            self.b64_all = util.is_true(b64_all)

        for ci_noun, attribute in SMARTOS_ATTRIB_MAP.iteritems():
            smartos_noun, strip = attribute
            md[ci_noun] = self.query(smartos_noun, strip=strip)

        # @datadictionary: This key may contain a program that is written
        # to a file in the filesystem of the guest on each boot and then
        # executed. It may be of any format that would be considered
        # executable in the guest instance.
        #
        # We write 'user-script' and 'operator-script' into the
        # instance/data directory. The default vendor-data then handles
        # executing them later.
        data_d = os.path.join(self.paths.get_cpath(), 'instances',
                              md['instance-id'], 'data')
        user_script = os.path.join(data_d, 'user-script')
        u_script_l = "%s/user-script" % LEGACY_USER_D
        write_boot_content(md.get('user-script'), content_f=user_script,
                           link=u_script_l, shebang=True, mode=0700)

        operator_script = os.path.join(data_d, 'operator-script')
        write_boot_content(md.get('operator-script'),
                           content_f=operator_script, shebang=False, mode=0700)

        # @datadictionary:  This key has no defined format, but its value
        # is written to the file /var/db/mdata-user-data on each boot prior
        # to the phase that runs user-script. This file is not to be executed.
        # This allows a configuration file of some kind to be injected into
        # the machine to be consumed by the user-script when it runs.
        u_data = md.get('legacy-user-data')
        u_data_f = "%s/mdata-user-data" % LEGACY_USER_D
        write_boot_content(u_data, u_data_f)

        # Handle the cloud-init regular meta
        if not md['local-hostname']:
            md['local-hostname'] = system_uuid

        ud = None
        if md['user-data']:
            ud = md['user-data']

        if not md['vendor-data']:
            md['vendor-data'] = BUILTIN_VENDOR_DATA % {
                'user_script': user_script,
                'operator_script': operator_script,
                'per_boot_d': os.path.join(self.paths.get_cpath("scripts"),
                                           'per-boot'),
            }

        self.metadata = util.mergemanydict([md, self.metadata])
        self.userdata_raw = ud
        self.vendordata_raw = md['vendor-data']
        return True