def apply_hostname_bounce(hostname, policy, interface, command,
                          hostname_command="hostname"):
    # set the hostname to 'hostname' if it is not already set to that.
    # then, if policy is not off, bounce the interface using command
    prev_hostname = util.subp(hostname_command, capture=True)[0].strip()

    util.subp([hostname_command, hostname])

    msg = ("phostname=%s hostname=%s policy=%s interface=%s" %
           (prev_hostname, hostname, policy, interface))

    if util.is_false(policy):
        LOG.debug("pubhname: policy false, skipping [%s]", msg)
        return

    if prev_hostname == hostname and policy != "force":
        LOG.debug("pubhname: no change, policy != force. skipping. [%s]", msg)
        return

    env = os.environ.copy()
    env['interface'] = interface
    env['hostname'] = hostname
    env['old_hostname'] = prev_hostname

    if command == "builtin":
        command = BOUNCE_COMMAND

    LOG.debug("pubhname: publishing hostname [%s]", msg)
    shell = not isinstance(command, (list, tuple))
    # capture=False, see comments in bug 1202758 and bug 1206164.
    util.log_time(logfunc=LOG.debug, msg="publishing hostname",
        get_uptime=True, func=util.subp,
        kwargs={'args': command, 'shell': shell, 'capture': False,
                'env': env})
Example #2
0
    def _get_data(self):
        strict_mode, _sleep = read_strict_mode(
            util.get_cfg_by_path(self.sys_cfg, STRICT_ID_PATH,
                                 STRICT_ID_DEFAULT), ("warn", None))

        LOG.debug("strict_mode: %s, cloud_name=%s cloud_platform=%s",
                  strict_mode, self.cloud_name, self.platform)
        if strict_mode == "true" and self.cloud_name == CloudNames.UNKNOWN:
            return False
        elif self.cloud_name == CloudNames.NO_EC2_METADATA:
            return False

        if self.perform_dhcp_setup:  # Setup networking in init-local stage.
            if util.is_FreeBSD():
                LOG.debug("FreeBSD doesn't support running dhclient with -sf")
                return False
            try:
                with EphemeralDHCPv4(self.fallback_interface):
                    self._crawled_metadata = util.log_time(
                        logfunc=LOG.debug, msg='Crawl of metadata service',
                        func=self.crawl_metadata)
            except NoDHCPLeaseError:
                return False
        else:
            self._crawled_metadata = util.log_time(
                logfunc=LOG.debug, msg='Crawl of metadata service',
                func=self.crawl_metadata)
        if not self._crawled_metadata:
            return False
        self.metadata = self._crawled_metadata.get('meta-data', None)
        self.userdata_raw = self._crawled_metadata.get('user-data', None)
        self.identity = self._crawled_metadata.get(
            'dynamic', {}).get('instance-identity', {}).get('document', {})
        return True
Example #3
0
    def package_command(self, command, args=None, pkgs=None):
        if pkgs is None:
            pkgs = []

        e = os.environ.copy()
        # See: http://manpages.ubuntu.com/manpages/xenial/man7/debconf.7.html
        e['DEBIAN_FRONTEND'] = 'noninteractive'

        wcfg = self.get_option("apt_get_wrapper", APT_GET_WRAPPER)
        cmd = _get_wrapper_prefix(
            wcfg.get('command', APT_GET_WRAPPER['command']),
            wcfg.get('enabled', APT_GET_WRAPPER['enabled']))

        cmd.extend(list(self.get_option("apt_get_command", APT_GET_COMMAND)))

        if args and isinstance(args, str):
            cmd.append(args)
        elif args and isinstance(args, list):
            cmd.extend(args)

        subcmd = command
        if command == "upgrade":
            subcmd = self.get_option("apt_get_upgrade_subcommand",
                                     "dist-upgrade")

        cmd.append(subcmd)

        pkglist = util.expand_package_list('%s=%s', pkgs)
        cmd.extend(pkglist)

        # Allow the output of this to flow outwards (ie not be captured)
        util.log_time(logfunc=LOG.debug,
                      msg="apt-%s [%s]" % (command, ' '.join(cmd)),
                      func=subp.subp,
                      args=(cmd,), kwargs={'env': e, 'capture': False})
Example #4
0
    def network_config(self):
        """Return a network config dict for rendering ENI or netplan files."""
        if self._network_config != sources.UNSET:
            return self._network_config

        if self.metadata is None:
            # this would happen if get_data hadn't been called. leave as UNSET
            LOG.warning(
                "Unexpected call to network_config when metadata is None.")
            return None

        result = None
        no_network_metadata_on_aws = bool(
            'network' not in self.metadata and
            self.cloud_platform == Platforms.AWS)
        if no_network_metadata_on_aws:
            LOG.debug("Metadata 'network' not present:"
                      " Refreshing stale metadata from prior to upgrade.")
            util.log_time(
                logfunc=LOG.debug, msg='Re-crawl of metadata service',
                func=self._crawl_metadata)

        # Limit network configuration to only the primary/fallback nic
        iface = self.fallback_interface
        macs_to_nics = {net.get_interface_mac(iface): iface}
        net_md = self.metadata.get('network')
        if isinstance(net_md, dict):
            result = convert_ec2_metadata_network_config(
                net_md, macs_to_nics=macs_to_nics, fallback_nic=iface)
        else:
            LOG.warning("Metadata 'network' key not valid: %s.", net_md)
        self._network_config = result

        return self._network_config
def perform_hostname_bounce(hostname, cfg, prev_hostname):
    # set the hostname to 'hostname' if it is not already set to that.
    # then, if policy is not off, bounce the interface using command
    # Returns True if the network was bounced, False otherwise.
    command = cfg['command']
    interface = cfg['interface']
    policy = cfg['policy']

    msg = ("hostname=%s policy=%s interface=%s" %
           (hostname, policy, interface))
    env = os.environ.copy()
    env['interface'] = interface
    env['hostname'] = hostname
    env['old_hostname'] = prev_hostname

    if command == "builtin":
        if util.is_FreeBSD():
            command = BOUNCE_COMMAND_FREEBSD
        elif util.which('ifup'):
            command = BOUNCE_COMMAND_IFUP
        else:
            LOG.debug(
                "Skipping network bounce: ifupdown utils aren't present.")
            # Don't bounce as networkd handles hostname DDNS updates
            return False
    LOG.debug("pubhname: publishing hostname [%s]", msg)
    shell = not isinstance(command, (list, tuple))
    # capture=False, see comments in bug 1202758 and bug 1206164.
    util.log_time(logfunc=LOG.debug, msg="publishing hostname",
                  get_uptime=True, func=util.subp,
                  kwargs={'args': command, 'shell': shell, 'capture': False,
                          'env': env})
    return True
Example #6
0
def get_metadata_from_imds(fallback_nic, retries):
    """Query Azure's network metadata service, returning a dictionary.

    If network is not up, setup ephemeral dhcp on fallback_nic to talk to the
    IMDS. For more info on IMDS:
        https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service

    @param fallback_nic: String. The name of the nic which requires active
        network in order to query IMDS.
    @param retries: The number of retries of the IMDS_URL.

    @return: A dict of instance metadata containing compute and network
        info.
    """
    kwargs = {
        'logfunc': LOG.debug,
        'msg': 'Crawl of Azure Instance Metadata Service (IMDS)',
        'func': _get_metadata_from_imds,
        'args': (retries, )
    }
    if net.is_up(fallback_nic):
        return util.log_time(**kwargs)
    else:
        with EphemeralDHCPv4(fallback_nic):
            return util.log_time(**kwargs)
Example #7
0
def wait_for_physdevs(netcfg, strict=True):
    physdevs = extract_physdevs(netcfg)

    # set of expected iface names and mac addrs
    expected_ifaces = dict([(iface[0], iface[1]) for iface in physdevs])
    expected_macs = set(expected_ifaces.keys())

    # set of current macs
    present_macs = get_interfaces_by_mac().keys()

    # compare the set of expected mac address values to
    # the current macs present; we only check MAC as cloud-init
    # has not yet renamed interfaces and the netcfg may include
    # such renames.
    for _ in range(0, 5):
        if expected_macs.issubset(present_macs):
            LOG.debug('net: all expected physical devices present')
            return

        missing = expected_macs.difference(present_macs)
        LOG.debug('net: waiting for expected net devices: %s', missing)
        for mac in missing:
            # trigger a settle, unless this interface exists
            syspath = sys_dev_path(expected_ifaces[mac])
            settle = partial(util.udevadm_settle, exists=syspath)
            msg = 'Waiting for udev events to settle or %s exists' % syspath
            util.log_time(LOG.debug, msg, func=settle)

        # update present_macs after settles
        present_macs = get_interfaces_by_mac().keys()

    msg = 'Not all expected physical devices present: %s' % missing
    LOG.warning(msg)
    if strict:
        raise RuntimeError(msg)
Example #8
0
    def package_command(self, command, args=None, pkgs=None):
        if pkgs is None:
            pkgs = []

        e = os.environ.copy()
        # See: http://tiny.cc/kg91fw
        # Or: http://tiny.cc/mh91fw
        e['DEBIAN_FRONTEND'] = 'noninteractive'

        wcfg = self.get_option("apt_get_wrapper", APT_GET_WRAPPER)
        cmd = _get_wrapper_prefix(
            wcfg.get('command', APT_GET_WRAPPER['command']),
            wcfg.get('enabled', APT_GET_WRAPPER['enabled']))

        cmd.extend(list(self.get_option("apt_get_command", APT_GET_COMMAND)))

        if args and isinstance(args, str):
            cmd.append(args)
        elif args and isinstance(args, list):
            cmd.extend(args)

        subcmd = command
        if command == "upgrade":
            subcmd = self.get_option("apt_get_upgrade_subcommand",
                                     "dist-upgrade")

        cmd.append(subcmd)

        pkglist = util.expand_package_list('%s=%s', pkgs)
        cmd.extend(pkglist)

        # Allow the output of this to flow outwards (ie not be captured)
        util.log_time(logfunc=LOG.debug,
            msg="apt-%s [%s]" % (command, ' '.join(cmd)), func=util.subp,
            args=(cmd,), kwargs={'env': e, 'capture': False})
Example #9
0
def setup_swapfile(fname, size=None, maxsize=None):
    """
    fname: full path string of filename to setup
    size: the size to create. set to "auto" for recommended
    maxsize: the maximum size
    """
    swap_dir = os.path.dirname(fname)
    if str(size).lower() == "auto":
        try:
            memsize = util.read_meminfo()["total"]
        except IOError:
            LOG.debug("Not creating swap: failed to read meminfo")
            return

        util.ensure_dir(swap_dir)
        size = suggested_swapsize(fsys=swap_dir,
                                  maxsize=maxsize,
                                  memsize=memsize)

    mibsize = str(int(size / (2**20)))
    if not size:
        LOG.debug("Not creating swap: suggested size was 0")
        return

    util.log_time(
        LOG.debug,
        msg="Setting up swap file",
        func=create_swapfile,
        args=[fname, mibsize],
    )

    return fname
Example #10
0
def perform_hostname_bounce(hostname, cfg, prev_hostname):
    # set the hostname to 'hostname' if it is not already set to that.
    # then, if policy is not off, bounce the interface using command
    command = cfg["command"]
    interface = cfg["interface"]
    policy = cfg["policy"]

    msg = "hostname=%s policy=%s interface=%s" % (hostname, policy, interface)
    env = os.environ.copy()
    env["interface"] = interface
    env["hostname"] = hostname
    env["old_hostname"] = prev_hostname

    if command == "builtin":
        command = BOUNCE_COMMAND

    LOG.debug("pubhname: publishing hostname [%s]", msg)
    shell = not isinstance(command, (list, tuple))
    # capture=False, see comments in bug 1202758 and bug 1206164.
    util.log_time(
        logfunc=LOG.debug,
        msg="publishing hostname",
        get_uptime=True,
        func=util.subp,
        kwargs={"args": command, "shell": shell, "capture": False, "env": env},
    )
Example #11
0
def perform_hostname_bounce(hostname, cfg, prev_hostname):
    # set the hostname to 'hostname' if it is not already set to that.
    # then, if policy is not off, bounce the interface using command
    command = cfg['command']
    interface = cfg['interface']
    policy = cfg['policy']

    msg = ("hostname=%s policy=%s interface=%s" %
           (hostname, policy, interface))
    env = os.environ.copy()
    env['interface'] = interface
    env['hostname'] = hostname
    env['old_hostname'] = prev_hostname

    if command == "builtin":
        command = BOUNCE_COMMAND

    LOG.debug("pubhname: publishing hostname [%s]", msg)
    shell = not isinstance(command, (list, tuple))
    # capture=False, see comments in bug 1202758 and bug 1206164.
    util.log_time(logfunc=LOG.debug,
                  msg="publishing hostname",
                  get_uptime=True,
                  func=util.subp,
                  kwargs={
                      'args': command,
                      'shell': shell,
                      'capture': False,
                      'env': env
                  })
def apply_hostname_bounce(hostname, policy, interface, command,
                          hostname_command="hostname"):
    # set the hostname to 'hostname' if it is not already set to that.
    # then, if policy is not off, bounce the interface using command
    prev_hostname = util.subp(hostname_command, capture=True)[0].strip()

    util.subp([hostname_command, hostname])

    msg = ("phostname=%s hostname=%s policy=%s interface=%s" %
           (prev_hostname, hostname, policy, interface))

    if util.is_false(policy):
        LOG.debug("pubhname: policy false, skipping [%s]", msg)
        return

    if prev_hostname == hostname and policy != "force":
        LOG.debug("pubhname: no change, policy != force. skipping. [%s]", msg)
        return

    env = os.environ.copy()
    env['interface'] = interface
    env['hostname'] = hostname
    env['old_hostname'] = prev_hostname

    if command == "builtin":
        command = BOUNCE_COMMAND

    LOG.debug("pubhname: publishing hostname [%s]", msg)
    shell = not isinstance(command, (list, tuple))
    # capture=False, see comments in bug 1202758 and bug 1206164.
    util.log_time(logfunc=LOG.debug, msg="publishing hostname",
        get_uptime=True, func=util.subp,
        kwargs={'args': command, 'shell': shell, 'capture': False,
                'env': env})
Example #13
0
    def _get_data(self):
        strict_mode, _sleep = read_strict_mode(
            util.get_cfg_by_path(self.sys_cfg, STRICT_ID_PATH,
                                 STRICT_ID_DEFAULT), ("warn", None))

        LOG.debug("strict_mode: %s, cloud_name=%s cloud_platform=%s",
                  strict_mode, self.cloud_name, self.platform)
        if strict_mode == "true" and self.cloud_name == CloudNames.UNKNOWN:
            return False
        elif self.cloud_name == CloudNames.NO_EC2_METADATA:
            return False

        if self.perform_dhcp_setup:  # Setup networking in init-local stage.
            if util.is_FreeBSD():
                LOG.debug("FreeBSD doesn't support running dhclient with -sf")
                return False
            try:
                with EphemeralDHCPv4(self.fallback_interface):
                    self._crawled_metadata = util.log_time(
                        logfunc=LOG.debug, msg='Crawl of metadata service',
                        func=self.crawl_metadata)
            except NoDHCPLeaseError:
                return False
        else:
            self._crawled_metadata = util.log_time(
                logfunc=LOG.debug, msg='Crawl of metadata service',
                func=self.crawl_metadata)
        if not self._crawled_metadata:
            return False
        self.metadata = self._crawled_metadata.get('meta-data', None)
        self.userdata_raw = self._crawled_metadata.get('user-data', None)
        self.identity = self._crawled_metadata.get(
            'dynamic', {}).get('instance-identity', {}).get('document', {})
        return True
Example #14
0
    def network_config(self):
        """Return a network config dict for rendering ENI or netplan files."""
        if self._network_config != sources.UNSET:
            return self._network_config

        if self.metadata is None:
            # this would happen if get_data hadn't been called. leave as UNSET
            LOG.warning(
                "Unexpected call to network_config when metadata is None.")
            return None

        result = None
        no_network_metadata_on_aws = bool(
            "network" not in self.metadata
            and self.cloud_name == CloudNames.AWS)
        if no_network_metadata_on_aws:
            LOG.debug("Metadata 'network' not present:"
                      " Refreshing stale metadata from prior to upgrade.")
            util.log_time(
                logfunc=LOG.debug,
                msg="Re-crawl of metadata service",
                func=self.get_data,
            )

        iface = self.fallback_interface
        net_md = self.metadata.get("network")
        if isinstance(net_md, dict):
            # SRU_BLOCKER: xenial, bionic and eoan should default
            # apply_full_imds_network_config to False to retain original
            # behavior on those releases.
            result = convert_ec2_metadata_network_config(
                net_md,
                fallback_nic=iface,
                full_network_config=util.get_cfg_option_bool(
                    self.ds_cfg, "apply_full_imds_network_config", True),
            )

            # RELEASE_BLOCKER: xenial should drop the below if statement,
            # because the issue being addressed doesn't exist pre-netplan.
            # (This datasource doesn't implement check_instance_id() so the
            # datasource object is recreated every boot; this means we don't
            # need to modify update_events on cloud-init upgrade.)

            # Non-VPC (aka Classic) Ec2 instances need to rewrite the
            # network config file every boot due to MAC address change.
            if self.is_classic_instance():
                self.default_update_events = copy.deepcopy(
                    self.default_update_events)
                self.default_update_events[EventScope.NETWORK].add(
                    EventType.BOOT)
                self.default_update_events[EventScope.NETWORK].add(
                    EventType.BOOT_LEGACY)
        else:
            LOG.warning("Metadata 'network' key not valid: %s.", net_md)
        self._network_config = result

        return self._network_config
Example #15
0
    def wait_for_physdevs(self,
                          netcfg: NetworkConfig,
                          *,
                          strict: bool = True) -> None:
        """Wait for all the physical devices in `netcfg` to exist on the system

        Specifically, this will call `self.settle` 5 times, and check after
        each one if the physical devices are now present in the system.

        :param netcfg:
            The NetworkConfig from which to extract physical devices to wait
            for.
        :param strict:
            Raise a `RuntimeError` if any physical devices are not present
            after waiting.
        """
        physdevs = self.extract_physdevs(netcfg)

        # set of expected iface names and mac addrs
        expected_ifaces = dict([(iface[0], iface[1]) for iface in physdevs])
        expected_macs = set(expected_ifaces.keys())

        # set of current macs
        present_macs = self.get_interfaces_by_mac().keys()

        # compare the set of expected mac address values to
        # the current macs present; we only check MAC as cloud-init
        # has not yet renamed interfaces and the netcfg may include
        # such renames.
        for _ in range(0, 5):
            if expected_macs.issubset(present_macs):
                LOG.debug("net: all expected physical devices present")
                return

            missing = expected_macs.difference(present_macs)
            LOG.debug("net: waiting for expected net devices: %s", missing)
            for mac in missing:
                # trigger a settle, unless this interface exists
                devname = expected_ifaces[mac]
                msg = "Waiting for settle or {} exists".format(devname)
                util.log_time(
                    LOG.debug,
                    msg,
                    func=self.settle,
                    kwargs={"exists": devname},
                )

            # update present_macs after settles
            present_macs = self.get_interfaces_by_mac().keys()

        msg = "Not all expected physical devices present: %s" % missing
        LOG.warning(msg)
        if strict:
            raise RuntimeError(msg)
Example #16
0
    def _get_data(self):
        (is_upcloud, server_uuid) = self._get_sysinfo()

        # only proceed if we know we are on UpCloud
        if not is_upcloud:
            return False

        LOG.info("Running on UpCloud. server_uuid=%s", server_uuid)

        if self.perform_dhcp_setup:  # Setup networking in init-local stage.
            try:
                LOG.debug("Finding a fallback NIC")
                nic = cloudnet.find_fallback_nic()
                LOG.debug("Discovering metadata via DHCP interface %s", nic)
                with EphemeralDHCPv4(nic):
                    md = util.log_time(
                        logfunc=LOG.debug,
                        msg="Reading from metadata service",
                        func=self._read_metadata,
                    )
            except (NoDHCPLeaseError, sources.InvalidMetaDataException) as e:
                util.logexc(LOG, str(e))
                return False
        else:
            try:
                LOG.debug(
                    "Discovering metadata without DHCP-configured networking"
                )
                md = util.log_time(
                    logfunc=LOG.debug,
                    msg="Reading from metadata service",
                    func=self._read_metadata,
                )
            except sources.InvalidMetaDataException as e:
                util.logexc(LOG, str(e))
                LOG.info(
                    "No DHCP-enabled interfaces available, "
                    "unable to fetch metadata for %s",
                    server_uuid,
                )
                return False

        self.metadata_full = md
        self.metadata["instance-id"] = md.get("instance_id", server_uuid)
        self.metadata["local-hostname"] = md.get("hostname")
        self.metadata["network"] = md.get("network")
        self.metadata["public-keys"] = md.get("public_keys")
        self.metadata["availability_zone"] = md.get("region", "default")
        self.vendordata_raw = md.get("vendor_data", None)
        self.userdata_raw = md.get("user_data", None)

        return True
Example #17
0
def handle(_name, cfg, cloud, log, _args):
    """
    See doc/examples/cloud-config-disk-setup.txt for documentation on the
    format.
    """
    device_aliases = cfg.get("device_aliases", {})

    def alias_to_device(cand):
        name = device_aliases.get(cand)
        return cloud.device_name_to_device(name or cand) or name

    disk_setup = cfg.get("disk_setup")
    if isinstance(disk_setup, dict):
        update_disk_setup_devices(disk_setup, alias_to_device)
        log.debug("Partitioning disks: %s", str(disk_setup))
        for disk, definition in disk_setup.items():
            if not isinstance(definition, dict):
                log.warning("Invalid disk definition for %s" % disk)
                continue

            try:
                log.debug("Creating new partition table/disk")
                util.log_time(
                    logfunc=LOG.debug,
                    msg="Creating partition on %s" % disk,
                    func=mkpart,
                    args=(disk, definition),
                )
            except Exception as e:
                util.logexc(LOG, "Failed partitioning operation\n%s" % e)

    fs_setup = cfg.get("fs_setup")
    if isinstance(fs_setup, list):
        log.debug("setting up filesystems: %s", str(fs_setup))
        update_fs_setup_devices(fs_setup, alias_to_device)
        for definition in fs_setup:
            if not isinstance(definition, dict):
                log.warning("Invalid file system definition: %s" % definition)
                continue

            try:
                log.debug("Creating new filesystem.")
                device = definition.get("device")
                util.log_time(
                    logfunc=LOG.debug,
                    msg="Creating fs for %s" % device,
                    func=mkfs,
                    args=(definition,),
                )
            except Exception as e:
                util.logexc(LOG, "Failed during filesystem operation\n%s" % e)
    def _get_data(self):
        if not on_scaleway():
            return False

        if self._fallback_interface is None:
            self._fallback_interface = net.find_fallback_nic()
        try:
            with EphemeralDHCPv4(self._fallback_interface):
                util.log_time(
                    logfunc=LOG.debug, msg='Crawl of metadata service',
                    func=self._crawl_metadata)
        except (NoDHCPLeaseError) as e:
            util.logexc(LOG, str(e))
            return False
        return True
Example #19
0
    def _get_data(self):
        if not on_scaleway():
            return False

        if self._fallback_interface is None:
            self._fallback_interface = net.find_fallback_nic()
        try:
            with EphemeralDHCPv4(self._fallback_interface):
                util.log_time(logfunc=LOG.debug,
                              msg='Crawl of metadata service',
                              func=self._crawl_metadata)
        except (NoDHCPLeaseError) as e:
            util.logexc(LOG, str(e))
            return False
        return True
Example #20
0
def setup_swapfile(fname, size=None, maxsize=None):
    """
    fname: full path string of filename to setup
    size: the size to create. set to "auto" for recommended
    maxsize: the maximum size
    """
    tdir = os.path.dirname(fname)
    if str(size).lower() == "auto":
        try:
            memsize = util.read_meminfo()['total']
        except IOError:
            LOG.debug("Not creating swap: failed to read meminfo")
            return

        util.ensure_dir(tdir)
        size = suggested_swapsize(fsys=tdir, maxsize=maxsize, memsize=memsize)

    if not size:
        LOG.debug("Not creating swap: suggested size was 0")
        return

    mbsize = str(int(size / (2**20)))
    msg = "creating swap file '%s' of %sMB" % (fname, mbsize)
    try:
        util.ensure_dir(tdir)
        # Check if filesystem is safe for fallocate
        fname_fs_type = util.get_mount_info(fname)[1]
        if fname_fs_type in ['xfs']:
            create_swapfile_command = 'dd if=/dev/zero "of=$1" bs=1M "count=$2"'
        else:
            create_swapfile_command = 'fallocate -l "${2}M" "$1"'

        util.log_time(LOG.debug,
                      msg,
                      func=util.subp,
                      args=[[
                          'sh', '-c',
                          ('rm -f "$1" && umask 0066 && '
                           '%s && '
                           'mkswap "$1" || { r=$?; rm -f "$1"; exit $r; }' %
                           create_swapfile_command), 'setup_swap', fname,
                          mbsize
                      ]])

    except Exception as e:
        raise IOError("Failed %s: %s" % (msg, e))

    return fname
Example #21
0
    def _get_data(self):
        seed_ret = {}
        if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")):
            self.userdata_raw = seed_ret['user-data']
            self.metadata = seed_ret['meta-data']
            LOG.debug("Using seeded ec2 data from %s", self.seed_dir)
            self._cloud_platform = Platforms.SEEDED
            return True

        strict_mode, _sleep = read_strict_mode(
            util.get_cfg_by_path(self.sys_cfg, STRICT_ID_PATH,
                                 STRICT_ID_DEFAULT), ("warn", None))

        LOG.debug("strict_mode: %s, cloud_platform=%s",
                  strict_mode, self.cloud_platform)
        if strict_mode == "true" and self.cloud_platform == Platforms.UNKNOWN:
            return False
        elif self.cloud_platform == Platforms.NO_EC2_METADATA:
            return False

        if self.perform_dhcp_setup:  # Setup networking in init-local stage.
            if util.is_FreeBSD():
                LOG.debug("FreeBSD doesn't support running dhclient with -sf")
                return False
            try:
                with EphemeralDHCPv4(self.fallback_interface):
                    return util.log_time(
                        logfunc=LOG.debug, msg='Crawl of metadata service',
                        func=self._crawl_metadata)
            except NoDHCPLeaseError:
                return False
        else:
            return self._crawl_metadata()
Example #22
0
    def _get_data(self) -> bool:
        """Crawl LXD socket API instance data and return True on success"""
        if not self._is_platform_viable():
            LOG.debug("Not an LXD datasource: No LXD socket found.")
            return False

        self._crawled_metadata = util.log_time(
            logfunc=LOG.debug,
            msg="Crawl of metadata service",
            func=read_metadata,
        )
        self.metadata = _raw_instance_data_to_dict(
            "meta-data", self._crawled_metadata.get("meta-data"))
        config = self._crawled_metadata.get("config", {})
        user_metadata = config.get("user.meta-data", {})
        if user_metadata:
            user_metadata = _raw_instance_data_to_dict("user.meta-data",
                                                       user_metadata)
        if not isinstance(self.metadata, dict):
            self.metadata = util.mergemanydict(
                [util.load_yaml(self.metadata), user_metadata])
        if "user-data" in self._crawled_metadata:
            self.userdata_raw = self._crawled_metadata["user-data"]
        if "network-config" in self._crawled_metadata:
            self._network_config = _raw_instance_data_to_dict(
                "network-config", self._crawled_metadata["network-config"])
        if "vendor-data" in self._crawled_metadata:
            self.vendordata_raw = self._crawled_metadata["vendor-data"]
        return True
    def _crawl_metadata(self):
        """Crawl metadata service when available.

        @returns: Dictionary with all metadata discovered for this datasource.
        @raise: InvalidMetaDataException on unreadable or broken
            metadata.
        """
        try:
            if not self.wait_for_metadata_service():
                raise sources.InvalidMetaDataException(
                    'No active metadata service found')
        except IOError as e:
            raise sources.InvalidMetaDataException(
                'IOError contacting metadata service: {error}'.format(
                    error=str(e)))

        url_params = self.get_url_params()

        try:
            result = util.log_time(
                LOG.debug, 'Crawl of openstack metadata service',
                read_metadata_service, args=[self.metadata_address],
                kwargs={'ssl_details': self.ssl_details,
                        'retries': url_params.num_retries,
                        'timeout': url_params.timeout_seconds})
        except openstack.NonReadable as e:
            raise sources.InvalidMetaDataException(str(e))
        except (openstack.BrokenMetadata, IOError):
            msg = 'Broken metadata address {addr}'.format(
                addr=self.metadata_address)
            raise sources.InvalidMetaDataException(msg)
        return result
Example #24
0
    def _get_data(self):
        url_params = self.get_url_params()
        network_context = noop()
        if self.perform_dhcp_setup:
            network_context = EphemeralDHCPv4(self.fallback_interface)
        with network_context:
            ret = util.log_time(
                LOG.debug,
                "Crawl of GCE metadata service",
                read_md,
                kwargs={
                    "address": self.metadata_address,
                    "url_params": url_params,
                },
            )

        if not ret["success"]:
            if ret["platform_reports_gce"]:
                LOG.warning(ret["reason"])
            else:
                LOG.debug(ret["reason"])
            return False
        self.metadata = ret["meta-data"]
        self.userdata_raw = ret["user-data"]
        return True
Example #25
0
    def _get_data(self):
        """Crawl and process datasource metadata caching metadata as attrs.

        @return: True on success, False on error, invalid or disabled
            datasource.
        """
        if not self._is_platform_viable():
            return False
        try:
            crawled_data = util.log_time(logfunc=LOG.debug,
                                         msg='Crawl of metadata service',
                                         func=self.crawl_metadata)
        except sources.InvalidMetaDataException as e:
            LOG.warning('Could not crawl Azure metadata: %s', e)
            return False
        if self.distro and self.distro.name == 'ubuntu':
            maybe_remove_ubuntu_network_config_scripts()

        # Process crawled data and augment with various config defaults
        self.cfg = util.mergemanydict(
            [crawled_data['cfg'], BUILTIN_CLOUD_CONFIG])
        self._metadata_imds = crawled_data['metadata']['imds']
        self.metadata = util.mergemanydict(
            [crawled_data['metadata'], DEFAULT_METADATA])
        self.userdata_raw = crawled_data['userdata_raw']

        user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
        self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])

        # walinux agent writes files world readable, but expects
        # the directory to be protected.
        write_files(self.ds_cfg['data_dir'],
                    crawled_data['files'],
                    dirmode=0o700)
        return True
Example #26
0
    def _crawl_metadata(self):
        """Crawl metadata service when available.

        @returns: Dictionary with all metadata discovered for this datasource.
        @raise: InvalidMetaDataException on unreadable or broken
            metadata.
        """
        try:
            if not self.wait_for_metadata_service():
                raise sources.InvalidMetaDataException(
                    'No active metadata service found')
        except IOError as e:
            raise sources.InvalidMetaDataException(
                'IOError contacting metadata service: {error}'.format(
                    error=str(e)))

        url_params = self.get_url_params()

        try:
            result = util.log_time(LOG.debug,
                                   'Crawl of openstack metadata service',
                                   read_metadata_service,
                                   args=[self.metadata_address],
                                   kwargs={
                                       'ssl_details': self.ssl_details,
                                       'retries': url_params.num_retries,
                                       'timeout': url_params.timeout_seconds
                                   })
        except openstack.NonReadable as e:
            raise sources.InvalidMetaDataException(str(e))
        except (openstack.BrokenMetadata, IOError) as e:
            msg = 'Broken metadata address {addr}'.format(
                addr=self.metadata_address)
            raise sources.InvalidMetaDataException(msg) from e
        return result
Example #27
0
    def get_metadata_from_agent(self):
        temp_hostname = self.metadata.get('local-hostname')
        hostname_command = self.ds_cfg['hostname_bounce']['hostname_command']
        with temporary_hostname(temp_hostname, self.ds_cfg,
                                hostname_command=hostname_command) \
                as previous_hostname:
            if (previous_hostname is not None
                    and util.is_true(self.ds_cfg.get('set_hostname'))):
                cfg = self.ds_cfg['hostname_bounce']
                try:
                    perform_hostname_bounce(hostname=temp_hostname,
                                            cfg=cfg,
                                            prev_hostname=previous_hostname)
                except Exception as e:
                    LOG.warn("Failed publishing hostname: %s", e)
                    util.logexc(LOG, "handling set_hostname failed")

            try:
                invoke_agent(self.ds_cfg['agent_command'])
            except util.ProcessExecutionError:
                # claim the datasource even if the command failed
                util.logexc(LOG, "agent command '%s' failed.",
                            self.ds_cfg['agent_command'])

            ddir = self.ds_cfg['data_dir']
            shcfgxml = os.path.join(ddir, "SharedConfig.xml")
            wait_for = [shcfgxml]

            fp_files = []
            key_value = None
            for pk in self.cfg.get('_pubkeys', []):
                if pk.get('value', None):
                    key_value = pk['value']
                    LOG.debug("ssh authentication: using value from fabric")
                else:
                    bname = str(pk['fingerprint'] + ".crt")
                    fp_files += [os.path.join(ddir, bname)]
                    LOG.debug(
                        "ssh authentication: using fingerprint from fabirc")

            missing = util.log_time(logfunc=LOG.debug,
                                    msg="waiting for files",
                                    func=wait_for_files,
                                    args=(wait_for + fp_files, ))
        if len(missing):
            LOG.warn("Did not find files, but going on: %s", missing)

        metadata = {}
        if shcfgxml in missing:
            LOG.warn("SharedConfig.xml missing, using static instance-id")
        else:
            try:
                metadata['instance-id'] = iid_from_shared_config(shcfgxml)
            except ValueError as e:
                LOG.warn("failed to get instance id in %s: %s", shcfgxml, e)

        metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files)
        return metadata
Example #28
0
    def _get_data(self):
        """Crawl metadata, parse and persist that data for this instance.

        @return: True when metadata discovered indicates OpenStack datasource.
            False when unable to contact metadata service or when metadata
            format is invalid or disabled.
        """
        oracle_considered = "Oracle" in self.sys_cfg.get("datasource_list")
        if not detect_openstack(accept_oracle=not oracle_considered):
            return False

        if self.perform_dhcp_setup:  # Setup networking in init-local stage.
            try:
                with EphemeralDHCPv4(self.fallback_interface):
                    results = util.log_time(
                        logfunc=LOG.debug,
                        msg="Crawl of metadata service",
                        func=self._crawl_metadata,
                    )
            except (NoDHCPLeaseError, sources.InvalidMetaDataException) as e:
                util.logexc(LOG, str(e))
                return False
        else:
            try:
                results = self._crawl_metadata()
            except sources.InvalidMetaDataException as e:
                util.logexc(LOG, str(e))
                return False

        self.dsmode = self._determine_dsmode([results.get("dsmode")])
        if self.dsmode == sources.DSMODE_DISABLED:
            return False
        md = results.get("metadata", {})
        md = util.mergemanydict([md, DEFAULT_METADATA])
        self.metadata = md
        self.ec2_metadata = results.get("ec2-metadata")
        self.network_json = results.get("networkdata")
        self.userdata_raw = results.get("userdata")
        self.version = results["version"]
        self.files.update(results.get("files", {}))

        vd = results.get("vendordata")
        self.vendordata_pure = vd
        try:
            self.vendordata_raw = sources.convert_vendordata(vd)
        except ValueError as e:
            LOG.warning("Invalid content in vendor-data: %s", e)
            self.vendordata_raw = None

        vd2 = results.get("vendordata2")
        self.vendordata2_pure = vd2
        try:
            self.vendordata2_raw = sources.convert_vendordata(vd2)
        except ValueError as e:
            LOG.warning("Invalid content in vendor-data2: %s", e)
            self.vendordata2_raw = None

        return True
Example #29
0
    def network_config(self):
        """Return a network config dict for rendering ENI or netplan files."""
        if self._network_config != sources.UNSET:
            return self._network_config

        if self.metadata is None:
            # this would happen if get_data hadn't been called. leave as UNSET
            LOG.warning(
                "Unexpected call to network_config when metadata is None.")
            return None

        result = None
        no_network_metadata_on_aws = bool(
            'network' not in self.metadata and
            self.cloud_name == CloudNames.AWS)
        if no_network_metadata_on_aws:
            LOG.debug("Metadata 'network' not present:"
                      " Refreshing stale metadata from prior to upgrade.")
            util.log_time(
                logfunc=LOG.debug, msg='Re-crawl of metadata service',
                func=self.get_data)

        # Limit network configuration to only the primary/fallback nic
        iface = self.fallback_interface
        macs_to_nics = {net.get_interface_mac(iface): iface}
        net_md = self.metadata.get('network')
        if isinstance(net_md, dict):
            result = convert_ec2_metadata_network_config(
                net_md, macs_to_nics=macs_to_nics, fallback_nic=iface)

            # RELEASE_BLOCKER: xenial should drop the below if statement,
            # because the issue being addressed doesn't exist pre-netplan.
            # (This datasource doesn't implement check_instance_id() so the
            # datasource object is recreated every boot; this means we don't
            # need to modify update_events on cloud-init upgrade.)

            # Non-VPC (aka Classic) Ec2 instances need to rewrite the
            # network config file every boot due to MAC address change.
            if self.is_classic_instance():
                self.update_events['network'].add(EventType.BOOT)
        else:
            LOG.warning("Metadata 'network' key not valid: %s.", net_md)
        self._network_config = result

        return self._network_config
Example #30
0
    def network_config(self):
        """Return a network config dict for rendering ENI or netplan files."""
        if self._network_config != sources.UNSET:
            return self._network_config

        if self.metadata is None:
            # this would happen if get_data hadn't been called. leave as UNSET
            LOG.warning(
                "Unexpected call to network_config when metadata is None.")
            return None

        result = None
        no_network_metadata_on_aws = bool(
            'network' not in self.metadata
            and self.cloud_name == CloudNames.AWS)
        if no_network_metadata_on_aws:
            LOG.debug("Metadata 'network' not present:"
                      " Refreshing stale metadata from prior to upgrade.")
            util.log_time(logfunc=LOG.debug,
                          msg='Re-crawl of metadata service',
                          func=self.get_data)

        # Limit network configuration to only the primary/fallback nic
        iface = self.fallback_interface
        macs_to_nics = {net.get_interface_mac(iface): iface}
        net_md = self.metadata.get('network')
        if isinstance(net_md, dict):
            result = convert_ec2_metadata_network_config(
                net_md, macs_to_nics=macs_to_nics, fallback_nic=iface)
            # RELEASE_BLOCKER: Xenial debian/postinst needs to add
            # EventType.BOOT on upgrade path for classic.

            # Non-VPC (aka Classic) Ec2 instances need to rewrite the
            # network config file every boot due to MAC address change.
            if self.is_classic_instance():
                self.update_events['network'].add(EventType.BOOT)
        else:
            LOG.warning("Metadata 'network' key not valid: %s.", net_md)
        self._network_config = result

        return self._network_config
Example #31
0
def handle(_name, cfg, _cloud, log, _args):
    if "growpart" not in cfg:
        log.debug(
            "No 'growpart' entry in cfg.  Using default: %s" % DEFAULT_CONFIG
        )
        cfg["growpart"] = DEFAULT_CONFIG

    mycfg = cfg.get("growpart")
    if not isinstance(mycfg, dict):
        log.warning("'growpart' in config was not a dict")
        return

    mode = mycfg.get("mode", "auto")
    if util.is_false(mode):
        if mode != "off":
            log.warning(
                f"DEPRECATED: growpart mode '{mode}' is deprecated. "
                "Use 'off' instead."
            )
        log.debug("growpart disabled: mode=%s" % mode)
        return

    if util.is_false(mycfg.get("ignore_growroot_disabled", False)):
        if os.path.isfile("/etc/growroot-disabled"):
            log.debug("growpart disabled: /etc/growroot-disabled exists")
            log.debug("use ignore_growroot_disabled to ignore")
            return

    devices = util.get_cfg_option_list(mycfg, "devices", ["/"])
    if not len(devices):
        log.debug("growpart: empty device list")
        return

    try:
        resizer = resizer_factory(mode)
    except (ValueError, TypeError) as e:
        log.debug("growpart unable to find resizer for '%s': %s" % (mode, e))
        if mode != "auto":
            raise e
        return

    resized = util.log_time(
        logfunc=log.debug,
        msg="resize_devices",
        func=resize_devices,
        args=(resizer, devices),
    )
    for (entry, action, msg) in resized:
        if action == RESIZE.CHANGED:
            log.info("'%s' resized: %s" % (entry, msg))
        else:
            log.debug("'%s' %s: %s" % (entry, action, msg))
Example #32
0
def handle(_name, cfg, cloud, log, _args):
    """
    See doc/examples/cloud-config_disk-setup.txt for documentation on the
    format.
    """
    disk_setup = cfg.get("disk_setup")
    if isinstance(disk_setup, dict):
        update_disk_setup_devices(disk_setup, cloud.device_name_to_device)
        log.debug("Partitioning disks: %s", str(disk_setup))
        for disk, definition in disk_setup.items():
            if not isinstance(definition, dict):
                log.warn("Invalid disk definition for %s" % disk)
                continue

            try:
                log.debug("Creating new partition table/disk")
                util.log_time(logfunc=LOG.debug,
                              msg="Creating partition on %s" % disk,
                              func=mkpart, args=(disk, definition))
            except Exception as e:
                util.logexc(LOG, "Failed partitioning operation\n%s" % e)

    fs_setup = cfg.get("fs_setup")
    if isinstance(fs_setup, list):
        log.debug("setting up filesystems: %s", str(fs_setup))
        update_fs_setup_devices(fs_setup, cloud.device_name_to_device)
        for definition in fs_setup:
            if not isinstance(definition, dict):
                log.warn("Invalid file system definition: %s" % definition)
                continue

            try:
                log.debug("Creating new filesystem.")
                device = definition.get('device')
                util.log_time(logfunc=LOG.debug,
                              msg="Creating fs for %s" % device,
                              func=mkfs, args=(definition,))
            except Exception as e:
                util.logexc(LOG, "Failed during filesystem operation\n%s" % e)
Example #33
0
def setup_swapfile(fname, size=None, maxsize=None):
    """
    fname: full path string of filename to setup
    size: the size to create. set to "auto" for recommended
    maxsize: the maximum size
    """
    tdir = os.path.dirname(fname)
    if str(size).lower() == "auto":
        try:
            memsize = util.read_meminfo()['total']
        except IOError as e:
            LOG.debug("Not creating swap. failed to read meminfo")
            return

        util.ensure_dir(tdir)
        size = suggested_swapsize(fsys=tdir, maxsize=maxsize,
                                  memsize=memsize)

    if not size:
        LOG.debug("Not creating swap: suggested size was 0")
        return

    mbsize = str(int(size / (2 ** 20)))
    msg = "creating swap file '%s' of %sMB" % (fname, mbsize)
    try:
        util.ensure_dir(tdir)
        util.log_time(LOG.debug, msg, func=util.subp,
            args=[['sh', '-c',
                   ('rm -f "$1" && umask 0066 && '
                    '{ fallocate -l "${2}M" "$1" || '
                    '  dd if=/dev/zero "of=$1" bs=1M "count=$2"; } && '
                    'mkswap "$1" || { r=$?; rm -f "$1"; exit $r; }'),
                   'setup_swap', fname, mbsize]])

    except Exception as e:
        raise IOError("Failed %s: %s" % (msg, e))

    return fname
Example #34
0
    def get_metadata_from_agent(self):
        temp_hostname = self.metadata.get("local-hostname")
        hostname_command = self.ds_cfg["hostname_bounce"]["hostname_command"]
        with temporary_hostname(temp_hostname, self.ds_cfg, hostname_command=hostname_command) as previous_hostname:
            if previous_hostname is not None and util.is_true(self.ds_cfg.get("set_hostname")):
                cfg = self.ds_cfg["hostname_bounce"]
                try:
                    perform_hostname_bounce(hostname=temp_hostname, cfg=cfg, prev_hostname=previous_hostname)
                except Exception as e:
                    LOG.warn("Failed publishing hostname: %s", e)
                    util.logexc(LOG, "handling set_hostname failed")

            try:
                invoke_agent(self.ds_cfg["agent_command"])
            except util.ProcessExecutionError:
                # claim the datasource even if the command failed
                util.logexc(LOG, "agent command '%s' failed.", self.ds_cfg["agent_command"])

            ddir = self.ds_cfg["data_dir"]
            shcfgxml = os.path.join(ddir, "SharedConfig.xml")
            wait_for = [shcfgxml]

            fp_files = []
            key_value = None
            for pk in self.cfg.get("_pubkeys", []):
                if pk.get("value", None):
                    key_value = pk["value"]
                    LOG.debug("ssh authentication: using value from fabric")
                else:
                    bname = str(pk["fingerprint"] + ".crt")
                    fp_files += [os.path.join(ddir, bname)]
                    LOG.debug("ssh authentication: using fingerprint from fabirc")

            missing = util.log_time(
                logfunc=LOG.debug, msg="waiting for files", func=wait_for_files, args=(wait_for + fp_files,)
            )
        if len(missing):
            LOG.warn("Did not find files, but going on: %s", missing)

        metadata = {}
        if shcfgxml in missing:
            LOG.warn("SharedConfig.xml missing, using static instance-id")
        else:
            try:
                metadata["instance-id"] = iid_from_shared_config(shcfgxml)
            except ValueError as e:
                LOG.warn("failed to get instance id in %s: %s", shcfgxml, e)

        metadata["public-keys"] = key_value or pubkeys_from_crt_files(fp_files)
        return metadata
Example #35
0
    def _get_data(self):
        ret = util.log_time(
            LOG.debug, 'Crawl of GCE metadata service',
            read_md, kwargs={'address': self.metadata_address})

        if not ret['success']:
            if ret['platform_reports_gce']:
                LOG.warning(ret['reason'])
            else:
                LOG.debug(ret['reason'])
            return False
        self.metadata = ret['meta-data']
        self.userdata_raw = ret['user-data']
        return True
    def get_metadata_from_agent(self):
        temp_hostname = self.metadata.get('local-hostname')
        hostname_command = self.ds_cfg['hostname_bounce']['hostname_command']
        with temporary_hostname(temp_hostname, self.ds_cfg,
                                hostname_command=hostname_command) \
                as previous_hostname:
            if (previous_hostname is not None
                    and util.is_true(self.ds_cfg.get('set_hostname'))):
                cfg = self.ds_cfg['hostname_bounce']
                try:
                    perform_hostname_bounce(hostname=temp_hostname,
                                            cfg=cfg,
                                            prev_hostname=previous_hostname)
                except Exception as e:
                    LOG.warn("Failed publishing hostname: %s", e)
                    util.logexc(LOG, "handling set_hostname failed")

            try:
                invoke_agent(self.ds_cfg['agent_command'])
            except util.ProcessExecutionError:
                # claim the datasource even if the command failed
                util.logexc(LOG, "agent command '%s' failed.",
                            self.ds_cfg['agent_command'])

            ddir = self.ds_cfg['data_dir']
            shcfgxml = os.path.join(ddir, "SharedConfig.xml")
            wait_for = [shcfgxml]

            fp_files = []
            for pk in self.cfg.get('_pubkeys', []):
                bname = str(pk['fingerprint'] + ".crt")
                fp_files += [os.path.join(ddir, bname)]

            missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
                                    func=wait_for_files,
                                    args=(wait_for + fp_files,))
        if len(missing):
            LOG.warn("Did not find files, but going on: %s", missing)

        metadata = {}
        if shcfgxml in missing:
            LOG.warn("SharedConfig.xml missing, using static instance-id")
        else:
            try:
                metadata['instance-id'] = iid_from_shared_config(shcfgxml)
            except ValueError as e:
                LOG.warn("failed to get instance id in %s: %s", shcfgxml, e)
        metadata['public-keys'] = pubkeys_from_crt_files(fp_files)
        return metadata
    def _get_data(self):
        """Crawl metadata, parse and persist that data for this instance.

        @return: True when metadata discovered indicates OpenStack datasource.
            False when unable to contact metadata service or when metadata
            format is invalid or disabled.
        """
        oracle_considered = 'Oracle' in self.sys_cfg.get('datasource_list')
        if not detect_openstack(accept_oracle=not oracle_considered):
            return False

        if self.perform_dhcp_setup:  # Setup networking in init-local stage.
            try:
                with EphemeralDHCPv4(self.fallback_interface):
                    results = util.log_time(
                        logfunc=LOG.debug, msg='Crawl of metadata service',
                        func=self._crawl_metadata)
            except (NoDHCPLeaseError, sources.InvalidMetaDataException) as e:
                util.logexc(LOG, str(e))
                return False
        else:
            try:
                results = self._crawl_metadata()
            except sources.InvalidMetaDataException as e:
                util.logexc(LOG, str(e))
                return False

        self.dsmode = self._determine_dsmode([results.get('dsmode')])
        if self.dsmode == sources.DSMODE_DISABLED:
            return False
        md = results.get('metadata', {})
        md = util.mergemanydict([md, DEFAULT_METADATA])
        self.metadata = md
        self.ec2_metadata = results.get('ec2-metadata')
        self.network_json = results.get('networkdata')
        self.userdata_raw = results.get('userdata')
        self.version = results['version']
        self.files.update(results.get('files', {}))

        vd = results.get('vendordata')
        self.vendordata_pure = vd
        try:
            self.vendordata_raw = sources.convert_vendordata(vd)
        except ValueError as e:
            LOG.warning("Invalid content in vendor-data: %s", e)
            self.vendordata_raw = None

        return True
Example #38
0
    def crawl_metadata(self):
        """
        Crawl the metadata service when available.

        @returns: Dictionary of crawled metadata content.
        """
        metadata_ready = util.log_time(logfunc=LOG.info,
                                       msg='waiting for the metadata service',
                                       func=self.wait_for_metadata_service)

        if not metadata_ready:
            return {}

        return read_metadata(self.metadata_url, self.api_version,
                             self.password_server_port, self.url_timeout,
                             self.url_retries)
Example #39
0
    def get_metadata_from_agent(self):
        temp_hostname = self.metadata.get('local-hostname')
        hostname_command = self.ds_cfg['hostname_bounce']['hostname_command']
        with temporary_hostname(temp_hostname, self.ds_cfg,
                                hostname_command=hostname_command) \
                as previous_hostname:
            if (previous_hostname is not None and
               util.is_true(self.ds_cfg.get('set_hostname'))):
                cfg = self.ds_cfg['hostname_bounce']
                try:
                    perform_hostname_bounce(hostname=temp_hostname,
                                            cfg=cfg,
                                            prev_hostname=previous_hostname)
                except Exception as e:
                    LOG.warn("Failed publishing hostname: %s", e)
                    util.logexc(LOG, "handling set_hostname failed")

            try:
                invoke_agent(self.ds_cfg['agent_command'])
            except util.ProcessExecutionError:
                # claim the datasource even if the command failed
                util.logexc(LOG, "agent command '%s' failed.",
                            self.ds_cfg['agent_command'])

            ddir = self.ds_cfg['data_dir']

            fp_files = []
            key_value = None
            for pk in self.cfg.get('_pubkeys', []):
                if pk.get('value', None):
                    key_value = pk['value']
                    LOG.debug("ssh authentication: using value from fabric")
                else:
                    bname = str(pk['fingerprint'] + ".crt")
                    fp_files += [os.path.join(ddir, bname)]
                    LOG.debug("ssh authentication: "
                              "using fingerprint from fabirc")

            missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
                                    func=wait_for_files,
                                    args=(fp_files,))
        if len(missing):
            LOG.warn("Did not find files, but going on: %s", missing)

        metadata = {}
        metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files)
        return metadata
    def get_data(self):
        try:
            if not self.wait_for_metadata_service():
                return False
        except IOError:
            return False

        try:
            results = util.log_time(LOG.debug,
                                    'Crawl of openstack metadata service',
                                    read_metadata_service,
                                    args=[self.metadata_address],
                                    kwargs={
                                        'ssl_details': self.ssl_details,
                                        'version': openstack.OS_HAVANA
                                    })
        except openstack.NonReadable:
            return False
        except (openstack.BrokenMetadata, IOError):
            util.logexc(LOG, "Broken metadata address %s",
                        self.metadata_address)
            return False

        user_dsmode = results.get('dsmode', None)
        if user_dsmode not in VALID_DSMODES + (None, ):
            LOG.warn("User specified invalid mode: %s", user_dsmode)
            user_dsmode = None
        if user_dsmode == 'disabled':
            return False

        md = results.get('metadata', {})
        md = util.mergemanydict([md, DEFAULT_METADATA])
        self.metadata = md
        self.ec2_metadata = results.get('ec2-metadata')
        self.userdata_raw = results.get('userdata')
        self.version = results['version']
        self.files.update(results.get('files', {}))

        vd = results.get('vendordata')
        self.vendordata_pure = vd
        try:
            self.vendordata_raw = openstack.convert_vendordata_json(vd)
        except ValueError as e:
            LOG.warn("Invalid content in vendor-data: %s", e)
            self.vendordata_raw = None

        return True
Example #41
0
    def get_data(self):
        try:
            if not self.wait_for_metadata_service():
                return False
        except IOError:
            return False

        try:
            results = util.log_time(LOG.debug,
                                    'Crawl of openstack metadata service',
                                    read_metadata_service,
                                    args=[self.metadata_address],
                                    kwargs={
                                        'ssl_details': self.ssl_details,
                                        'version': openstack.OS_LATEST
                                    })
        except openstack.NonReadable:
            return False
        except (openstack.BrokenMetadata, IOError):
            util.logexc(LOG, "Broken metadata address %s",
                        self.metadata_address)
            return False

        user_dsmode = results.get('dsmode', None)
        if user_dsmode not in VALID_DSMODES + (None, ):
            LOG.warn("User specified invalid mode: %s", user_dsmode)
            user_dsmode = None
        if user_dsmode == 'disabled':
            return False

        md = results.get('metadata', {})
        md = util.mergemanydict([md, DEFAULT_METADATA])
        self.metadata = md
        self.ec2_metadata = results.get('ec2-metadata')
        self.userdata_raw = results.get('userdata')
        self.version = results['version']
        self.files.update(results.get('files', {}))

        # if vendordata includes 'cloud-init', then read that explicitly
        # for cloud-init (for namespacing).
        vd = results.get('vendordata')
        if isinstance(vd, dict) and 'cloud-init' in vd:
            self.vendordata_raw = vd['cloud-init']
        else:
            self.vendordata_raw = vd

        return True
    def _get_data(self):
        try:
            if not self.wait_for_metadata_service():
                return False
        except IOError:
            return False

        (max_wait, timeout, retries) = self._get_url_settings()

        try:
            results = util.log_time(LOG.debug,
                                    'Crawl of openstack metadata service',
                                    read_metadata_service,
                                    args=[self.metadata_address],
                                    kwargs={
                                        'ssl_details': self.ssl_details,
                                        'retries': retries,
                                        'timeout': timeout
                                    })
        except openstack.NonReadable:
            return False
        except (openstack.BrokenMetadata, IOError):
            util.logexc(LOG, "Broken metadata address %s",
                        self.metadata_address)
            return False

        self.dsmode = self._determine_dsmode([results.get('dsmode')])
        if self.dsmode == sources.DSMODE_DISABLED:
            return False

        md = results.get('metadata', {})
        md = util.mergemanydict([md, DEFAULT_METADATA])
        self.metadata = md
        self.ec2_metadata = results.get('ec2-metadata')
        self.userdata_raw = results.get('userdata')
        self.version = results['version']
        self.files.update(results.get('files', {}))

        vd = results.get('vendordata')
        self.vendordata_pure = vd
        try:
            self.vendordata_raw = sources.convert_vendordata(vd)
        except ValueError as e:
            LOG.warning("Invalid content in vendor-data: %s", e)
            self.vendordata_raw = None

        return True
    def get_data(self):
        try:
            if not self.wait_for_metadata_service():
                return False
        except IOError:
            return False

        try:
            results = util.log_time(LOG.debug,
                                    'Crawl of openstack metadata service',
                                    read_metadata_service,
                                    args=[self.metadata_address],
                                    kwargs={'ssl_details': self.ssl_details,
                                            'version': openstack.OS_LATEST})
        except openstack.NonReadable:
            return False
        except (openstack.BrokenMetadata, IOError):
            util.logexc(LOG, "Broken metadata address %s",
                        self.metadata_address)
            return False

        user_dsmode = results.get('dsmode', None)
        if user_dsmode not in VALID_DSMODES + (None,):
            LOG.warn("User specified invalid mode: %s", user_dsmode)
            user_dsmode = None
        if user_dsmode == 'disabled':
            return False

        md = results.get('metadata', {})
        md = util.mergemanydict([md, DEFAULT_METADATA])
        self.metadata = md
        self.ec2_metadata = results.get('ec2-metadata')
        self.userdata_raw = results.get('userdata')
        self.version = results['version']
        self.files.update(results.get('files', {}))

        # if vendordata includes 'cloud-init', then read that explicitly
        # for cloud-init (for namespacing).
        vd = results.get('vendordata')
        if isinstance(vd, dict) and 'cloud-init' in vd:
            self.vendordata_raw = vd['cloud-init']
        else:
            self.vendordata_raw = vd

        return True
Example #44
0
def handle(_name, cfg, _cloud, log, _args):
    if _cloud.distro.name == "aix":
        return

    if 'growpart' not in cfg:
        log.debug("No 'growpart' entry in cfg.  Using default: %s" %
                  DEFAULT_CONFIG)
        cfg['growpart'] = DEFAULT_CONFIG

    mycfg = cfg.get('growpart')
    if not isinstance(mycfg, dict):
        log.warn("'growpart' in config was not a dict")
        return

    mode = mycfg.get('mode', "auto")
    if util.is_false(mode):
        log.debug("growpart disabled: mode=%s" % mode)
        return

    if util.is_false(mycfg.get('ignore_growroot_disabled', False)):
        if os.path.isfile("/etc/growroot-disabled"):
            log.debug("growpart disabled: /etc/growroot-disabled exists")
            log.debug("use ignore_growroot_disabled to ignore")
            return

    devices = util.get_cfg_option_list(cfg, "devices", ["/"])
    if not len(devices):
        log.debug("growpart: empty device list")
        return

    try:
        resizer = resizer_factory(mode)
    except (ValueError, TypeError) as e:
        log.debug("growpart unable to find resizer for '%s': %s" % (mode, e))
        if mode != "auto":
            raise e
        return

    resized = util.log_time(logfunc=log.debug, msg="resize_devices",
                            func=resize_devices, args=(resizer, devices))
    for (entry, action, msg) in resized:
        if action == RESIZE.CHANGED:
            log.info("'%s' resized: %s" % (entry, msg))
        else:
            log.debug("'%s' %s: %s" % (entry, action, msg))
    def get_data(self):
        try:
            if not self.wait_for_metadata_service():
                return False
        except IOError:
            return False

        try:
            results = util.log_time(LOG.debug,
                                    'Crawl of openstack metadata service',
                                    read_metadata_service,
                                    args=[self.metadata_address],
                                    kwargs={'ssl_details': self.ssl_details})
        except openstack.NonReadable:
            return False
        except (openstack.BrokenMetadata, IOError):
            util.logexc(LOG, "Broken metadata address %s",
                        self.metadata_address)
            return False

        user_dsmode = results.get('dsmode', None)
        if user_dsmode not in VALID_DSMODES + (None,):
            LOG.warn("User specified invalid mode: %s", user_dsmode)
            user_dsmode = None
        if user_dsmode == 'disabled':
            return False

        md = results.get('metadata', {})
        md = util.mergemanydict([md, DEFAULT_METADATA])
        self.metadata = md
        self.ec2_metadata = results.get('ec2-metadata')
        self.userdata_raw = results.get('userdata')
        self.version = results['version']
        self.files.update(results.get('files', {}))

        vd = results.get('vendordata')
        self.vendordata_pure = vd
        try:
            self.vendordata_raw = openstack.convert_vendordata_json(vd)
        except ValueError as e:
            LOG.warn("Invalid content in vendor-data: %s", e)
            self.vendordata_raw = None

        return True
    def get_data(self, retries=5, timeout=5):
        try:
            if not self.wait_for_metadata_service():
                return False
        except IOError:
            return False

        try:
            results = util.log_time(LOG.debug,
                                    'Crawl of openstack metadata service',
                                    read_metadata_service,
                                    args=[self.metadata_address],
                                    kwargs={'ssl_details': self.ssl_details,
                                            'retries': retries,
                                            'timeout': timeout})
        except openstack.NonReadable:
            return False
        except (openstack.BrokenMetadata, IOError):
            util.logexc(LOG, "Broken metadata address %s",
                        self.metadata_address)
            return False

        self.dsmode = self._determine_dsmode([results.get('dsmode')])
        if self.dsmode == sources.DSMODE_DISABLED:
            return False

        md = results.get('metadata', {})
        md = util.mergemanydict([md, DEFAULT_METADATA])
        self.metadata = md
        self.ec2_metadata = results.get('ec2-metadata')
        self.userdata_raw = results.get('userdata')
        self.version = results['version']
        self.files.update(results.get('files', {}))

        vd = results.get('vendordata')
        self.vendordata_pure = vd
        try:
            self.vendordata_raw = openstack.convert_vendordata_json(vd)
        except ValueError as e:
            LOG.warn("Invalid content in vendor-data: %s", e)
            self.vendordata_raw = None

        return True
Example #47
0
def main(sysv_args=None):
    if not sysv_args:
        sysv_args = sys.argv
    parser = argparse.ArgumentParser(prog=sysv_args[0])
    sysv_args = sysv_args[1:]

    # Top level args
    parser.add_argument('--version', '-v', action='version',
                        version='%(prog)s ' + (version.version_string()))
    parser.add_argument('--file', '-f', action='append',
                        dest='files',
                        help=('additional yaml configuration'
                              ' files to use'),
                        type=argparse.FileType('rb'))
    parser.add_argument('--debug', '-d', action='store_true',
                        help=('show additional pre-action'
                              ' logging (default: %(default)s)'),
                        default=False)
    parser.add_argument('--force', action='store_true',
                        help=('force running even if no datasource is'
                              ' found (use at your own risk)'),
                        dest='force',
                        default=False)

    parser.set_defaults(reporter=None)
    subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand')
    subparsers.required = True

    # Each action and its sub-options (if any)
    parser_init = subparsers.add_parser('init',
                                        help=('initializes cloud-init and'
                                              ' performs initial modules'))
    parser_init.add_argument("--local", '-l', action='store_true',
                             help="start in local mode (default: %(default)s)",
                             default=False)
    # This is used so that we can know which action is selected +
    # the functor to use to run this subcommand
    parser_init.set_defaults(action=('init', main_init))

    # These settings are used for the 'config' and 'final' stages
    parser_mod = subparsers.add_parser('modules',
                                       help=('activates modules using '
                                             'a given configuration key'))
    parser_mod.add_argument("--mode", '-m', action='store',
                            help=("module configuration name "
                                  "to use (default: %(default)s)"),
                            default='config',
                            choices=('init', 'config', 'final'))
    parser_mod.set_defaults(action=('modules', main_modules))

    # This subcommand allows you to run a single module
    parser_single = subparsers.add_parser('single',
                                          help=('run a single module '))
    parser_single.add_argument("--name", '-n', action="store",
                               help="module name to run",
                               required=True)
    parser_single.add_argument("--frequency", action="store",
                               help=("frequency of the module"),
                               required=False,
                               choices=list(FREQ_SHORT_NAMES.keys()))
    parser_single.add_argument("--report", action="store_true",
                               help="enable reporting",
                               required=False)
    parser_single.add_argument("module_args", nargs="*",
                               metavar='argument',
                               help=('any additional arguments to'
                                     ' pass to this module'))
    parser_single.set_defaults(action=('single', main_single))

    parser_query = subparsers.add_parser(
        'query',
        help='Query standardized instance metadata from the command line.')

    parser_dhclient = subparsers.add_parser(
        dhclient_hook.NAME, help=dhclient_hook.__doc__)
    dhclient_hook.get_parser(parser_dhclient)

    parser_features = subparsers.add_parser('features',
                                            help=('list defined features'))
    parser_features.set_defaults(action=('features', main_features))

    parser_analyze = subparsers.add_parser(
        'analyze', help='Devel tool: Analyze cloud-init logs and data')

    parser_devel = subparsers.add_parser(
        'devel', help='Run development tools')

    parser_collect_logs = subparsers.add_parser(
        'collect-logs', help='Collect and tar all cloud-init debug info')

    parser_clean = subparsers.add_parser(
        'clean', help='Remove logs and artifacts so cloud-init can re-run.')

    parser_status = subparsers.add_parser(
        'status', help='Report cloud-init status or wait on completion.')

    if sysv_args:
        # Only load subparsers if subcommand is specified to avoid load cost
        if sysv_args[0] == 'analyze':
            from cloudinit.analyze.__main__ import get_parser as analyze_parser
            # Construct analyze subcommand parser
            analyze_parser(parser_analyze)
        elif sysv_args[0] == 'devel':
            from cloudinit.cmd.devel.parser import get_parser as devel_parser
            # Construct devel subcommand parser
            devel_parser(parser_devel)
        elif sysv_args[0] == 'collect-logs':
            from cloudinit.cmd.devel.logs import (
                get_parser as logs_parser, handle_collect_logs_args)
            logs_parser(parser_collect_logs)
            parser_collect_logs.set_defaults(
                action=('collect-logs', handle_collect_logs_args))
        elif sysv_args[0] == 'clean':
            from cloudinit.cmd.clean import (
                get_parser as clean_parser, handle_clean_args)
            clean_parser(parser_clean)
            parser_clean.set_defaults(
                action=('clean', handle_clean_args))
        elif sysv_args[0] == 'query':
            from cloudinit.cmd.query import (
                get_parser as query_parser, handle_args as handle_query_args)
            query_parser(parser_query)
            parser_query.set_defaults(
                action=('render', handle_query_args))
        elif sysv_args[0] == 'status':
            from cloudinit.cmd.status import (
                get_parser as status_parser, handle_status_args)
            status_parser(parser_status)
            parser_status.set_defaults(
                action=('status', handle_status_args))

    args = parser.parse_args(args=sysv_args)

    # Subparsers.required = True and each subparser sets action=(name, functor)
    (name, functor) = args.action

    # Setup basic logging to start (until reinitialized)
    # iff in debug mode.
    if args.debug:
        logging.setupBasicLogging()

    # Setup signal handlers before running
    signal_handler.attach_handlers()

    if name in ("modules", "init"):
        functor = status_wrapper

    rname = None
    report_on = True
    if name == "init":
        if args.local:
            rname, rdesc = ("init-local", "searching for local datasources")
        else:
            rname, rdesc = ("init-network",
                            "searching for network datasources")
    elif name == "modules":
        rname, rdesc = ("modules-%s" % args.mode,
                        "running modules for %s" % args.mode)
    elif name == "single":
        rname, rdesc = ("single/%s" % args.name,
                        "running single module %s" % args.name)
        report_on = args.report
    else:
        rname = name
        rdesc = "running 'cloud-init %s'" % name
        report_on = False

    args.reporter = events.ReportEventStack(
        rname, rdesc, reporting_enabled=report_on)

    with args.reporter:
        retval = util.log_time(
            logfunc=LOG.debug, msg="cloud-init mode '%s'" % name,
            get_uptime=True, func=functor, args=(name, args))
        reporting.flush_events()
        return retval
    def get_data(self):
        found = []
        md = {}
        ud = ""
        vmwarePlatformFound = False
        vmwareImcConfigFilePath = ''

        defaults = {
            "instance-id": "iid-dsovf",
        }

        (seedfile, contents) = get_ovf_env(self.paths.seed_dir)

        system_type = util.read_dmi_data("system-product-name")
        if system_type is None:
            LOG.debug("No system-product-name found")

        if seedfile:
            # Found a seed dir
            seed = os.path.join(self.paths.seed_dir, seedfile)
            (md, ud, cfg) = read_ovf_environment(contents)
            self.environment = contents
            found.append(seed)
        elif system_type and 'vmware' in system_type.lower():
            LOG.debug("VMware Virtualization Platform found")
            if not util.get_cfg_option_bool(
                    self.sys_cfg, "disable_vmware_customization", True):
                deployPkgPluginPath = search_file("/usr/lib/vmware-tools",
                                                  "libdeployPkgPlugin.so")
                if not deployPkgPluginPath:
                    deployPkgPluginPath = search_file("/usr/lib/open-vm-tools",
                                                      "libdeployPkgPlugin.so")
                if deployPkgPluginPath:
                    # When the VM is powered on, the "VMware Tools" daemon
                    # copies the customization specification file to
                    # /var/run/vmware-imc directory. cloud-init code needs
                    # to search for the file in that directory.
                    vmwareImcConfigFilePath = util.log_time(
                        logfunc=LOG.debug,
                        msg="waiting for configuration file",
                        func=wait_for_imc_cfg_file,
                        args=("/var/run/vmware-imc", "cust.cfg"))

                if vmwareImcConfigFilePath:
                    LOG.debug("Found VMware DeployPkg Config File at %s" %
                              vmwareImcConfigFilePath)
                else:
                    LOG.debug("Did not find VMware DeployPkg Config File Path")
            else:
                LOG.debug("Customization for VMware platform is disabled.")

        if vmwareImcConfigFilePath:
            nics = ""
            try:
                cf = ConfigFile(vmwareImcConfigFilePath)
                conf = Config(cf)
                (md, ud, cfg) = read_vmware_imc(conf)
                dirpath = os.path.dirname(vmwareImcConfigFilePath)
                nics = get_nics_to_enable(dirpath)
            except Exception as e:
                LOG.debug("Error parsing the customization Config File")
                LOG.exception(e)
                set_customization_status(
                    GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
                    GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED)
                enable_nics(nics)
                return False
            finally:
                util.del_dir(os.path.dirname(vmwareImcConfigFilePath))

            try:
                LOG.debug("Applying the Network customization")
                nicConfigurator = NicConfigurator(conf.nics)
                nicConfigurator.configure()
            except Exception as e:
                LOG.debug("Error applying the Network Configuration")
                LOG.exception(e)
                set_customization_status(
                    GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
                    GuestCustEventEnum.GUESTCUST_EVENT_NETWORK_SETUP_FAILED)
                enable_nics(nics)
                return False

            vmwarePlatformFound = True
            set_customization_status(
                GuestCustStateEnum.GUESTCUST_STATE_DONE,
                GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS)
            enable_nics(nics)
        else:
            np = {'iso': transport_iso9660,
                  'vmware-guestd': transport_vmware_guestd, }
            name = None
            for (name, transfunc) in np.items():
                (contents, _dev, _fname) = transfunc()
                if contents:
                    break
            if contents:
                (md, ud, cfg) = read_ovf_environment(contents)
                self.environment = contents
                found.append(name)

        # There was no OVF transports found
        if len(found) == 0 and not vmwarePlatformFound:
            return False

        if 'seedfrom' in md and md['seedfrom']:
            seedfrom = md['seedfrom']
            seedfound = False
            for proto in self.supported_seed_starts:
                if seedfrom.startswith(proto):
                    seedfound = proto
                    break
            if not seedfound:
                LOG.debug("Seed from %s not supported by %s",
                          seedfrom, self)
                return False

            (md_seed, ud) = util.read_seeded(seedfrom, timeout=None)
            LOG.debug("Using seeded cache data from %s", seedfrom)

            md = util.mergemanydict([md, md_seed])
            found.append(seedfrom)

        # Now that we have exhausted any other places merge in the defaults
        md = util.mergemanydict([md, defaults])

        self.seed = ",".join(found)
        self.metadata = md
        self.userdata_raw = ud
        self.cfg = cfg
        return True
Example #49
0
def main(sysv_args=None):
    if sysv_args is not None:
        parser = argparse.ArgumentParser(prog=sysv_args[0])
        sysv_args = sysv_args[1:]
    else:
        parser = argparse.ArgumentParser()

    # Top level args
    parser.add_argument('--version', '-v', action='version',
                        version='%(prog)s ' + (version.version_string()))
    parser.add_argument('--file', '-f', action='append',
                        dest='files',
                        help=('additional yaml configuration'
                              ' files to use'),
                        type=argparse.FileType('rb'))
    parser.add_argument('--debug', '-d', action='store_true',
                        help=('show additional pre-action'
                              ' logging (default: %(default)s)'),
                        default=False)
    parser.add_argument('--force', action='store_true',
                        help=('force running even if no datasource is'
                              ' found (use at your own risk)'),
                        dest='force',
                        default=False)

    parser.set_defaults(reporter=None)
    subparsers = parser.add_subparsers()

    # Each action and its sub-options (if any)
    parser_init = subparsers.add_parser('init',
                                        help=('initializes cloud-init and'
                                              ' performs initial modules'))
    parser_init.add_argument("--local", '-l', action='store_true',
                             help="start in local mode (default: %(default)s)",
                             default=False)
    # This is used so that we can know which action is selected +
    # the functor to use to run this subcommand
    parser_init.set_defaults(action=('init', main_init))

    # These settings are used for the 'config' and 'final' stages
    parser_mod = subparsers.add_parser('modules',
                                       help=('activates modules using '
                                             'a given configuration key'))
    parser_mod.add_argument("--mode", '-m', action='store',
                            help=("module configuration name "
                                  "to use (default: %(default)s)"),
                            default='config',
                            choices=('init', 'config', 'final'))
    parser_mod.set_defaults(action=('modules', main_modules))

    # These settings are used when you want to query information
    # stored in the cloud-init data objects/directories/files
    parser_query = subparsers.add_parser('query',
                                         help=('query information stored '
                                               'in cloud-init'))
    parser_query.add_argument("--name", '-n', action="store",
                              help="item name to query on",
                              required=True,
                              choices=QUERY_DATA_TYPES)
    parser_query.set_defaults(action=('query', main_query))

    # This subcommand allows you to run a single module
    parser_single = subparsers.add_parser('single',
                                          help=('run a single module '))
    parser_single.set_defaults(action=('single', main_single))
    parser_single.add_argument("--name", '-n', action="store",
                               help="module name to run",
                               required=True)
    parser_single.add_argument("--frequency", action="store",
                               help=("frequency of the module"),
                               required=False,
                               choices=list(FREQ_SHORT_NAMES.keys()))
    parser_single.add_argument("--report", action="store_true",
                               help="enable reporting",
                               required=False)
    parser_single.add_argument("module_args", nargs="*",
                               metavar='argument',
                               help=('any additional arguments to'
                                     ' pass to this module'))
    parser_single.set_defaults(action=('single', main_single))

    args = parser.parse_args(args=sysv_args)

    try:
        (name, functor) = args.action
    except AttributeError:
        parser.error('too few arguments')

    # Setup basic logging to start (until reinitialized)
    # iff in debug mode...
    if args.debug:
        logging.setupBasicLogging()

    # Setup signal handlers before running
    signal_handler.attach_handlers()

    if name in ("modules", "init"):
        functor = status_wrapper

    report_on = True
    if name == "init":
        if args.local:
            rname, rdesc = ("init-local", "searching for local datasources")
        else:
            rname, rdesc = ("init-network",
                            "searching for network datasources")
    elif name == "modules":
        rname, rdesc = ("modules-%s" % args.mode,
                        "running modules for %s" % args.mode)
    elif name == "single":
        rname, rdesc = ("single/%s" % args.name,
                        "running single module %s" % args.name)
        report_on = args.report

    args.reporter = events.ReportEventStack(
        rname, rdesc, reporting_enabled=report_on)
    with args.reporter:
        return util.log_time(
            logfunc=LOG.debug, msg="cloud-init mode '%s'" % name,
            get_uptime=True, func=functor, args=(name, args))
Example #50
0
    def _get_data(self):
        found = []
        md = {}
        ud = ""
        vmwareImcConfigFilePath = None
        nicspath = None

        defaults = {
            "instance-id": "iid-dsovf",
        }

        (seedfile, contents) = get_ovf_env(self.paths.seed_dir)

        system_type = util.read_dmi_data("system-product-name")
        if system_type is None:
            LOG.debug("No system-product-name found")

        if seedfile:
            # Found a seed dir
            seed = os.path.join(self.paths.seed_dir, seedfile)
            (md, ud, cfg) = read_ovf_environment(contents)
            self.environment = contents
            found.append(seed)
        elif system_type and 'vmware' in system_type.lower():
            LOG.debug("VMware Virtualization Platform found")
            if not self.vmware_customization_supported:
                LOG.debug("Skipping the check for "
                          "VMware Customization support")
            elif not util.get_cfg_option_bool(
                    self.sys_cfg, "disable_vmware_customization", True):

                search_paths = (
                    "/usr/lib/vmware-tools", "/usr/lib64/vmware-tools",
                    "/usr/lib/open-vm-tools", "/usr/lib64/open-vm-tools")

                plugin = "libdeployPkgPlugin.so"
                deployPkgPluginPath = None
                for path in search_paths:
                    deployPkgPluginPath = search_file(path, plugin)
                    if deployPkgPluginPath:
                        LOG.debug("Found the customization plugin at %s",
                                  deployPkgPluginPath)
                        break

                if deployPkgPluginPath:
                    # When the VM is powered on, the "VMware Tools" daemon
                    # copies the customization specification file to
                    # /var/run/vmware-imc directory. cloud-init code needs
                    # to search for the file in that directory.
                    max_wait = get_max_wait_from_cfg(self.ds_cfg)
                    vmwareImcConfigFilePath = util.log_time(
                        logfunc=LOG.debug,
                        msg="waiting for configuration file",
                        func=wait_for_imc_cfg_file,
                        args=("cust.cfg", max_wait))
                else:
                    LOG.debug("Did not find the customization plugin.")

                if vmwareImcConfigFilePath:
                    LOG.debug("Found VMware Customization Config File at %s",
                              vmwareImcConfigFilePath)
                    nicspath = wait_for_imc_cfg_file(
                        filename="nics.txt", maxwait=10, naplen=5)
                else:
                    LOG.debug("Did not find VMware Customization Config File")
            else:
                LOG.debug("Customization for VMware platform is disabled.")

        if vmwareImcConfigFilePath:
            self._vmware_nics_to_enable = ""
            try:
                cf = ConfigFile(vmwareImcConfigFilePath)
                self._vmware_cust_conf = Config(cf)
                (md, ud, cfg) = read_vmware_imc(self._vmware_cust_conf)
                self._vmware_nics_to_enable = get_nics_to_enable(nicspath)
                imcdirpath = os.path.dirname(vmwareImcConfigFilePath)
                product_marker = self._vmware_cust_conf.marker_id
                hasmarkerfile = check_marker_exists(
                    product_marker, os.path.join(self.paths.cloud_dir, 'data'))
                special_customization = product_marker and not hasmarkerfile
                customscript = self._vmware_cust_conf.custom_script_name
            except Exception as e:
                _raise_error_status(
                    "Error parsing the customization Config File",
                    e,
                    GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
                    vmwareImcConfigFilePath)

            if special_customization:
                if customscript:
                    try:
                        precust = PreCustomScript(customscript, imcdirpath)
                        precust.execute()
                    except Exception as e:
                        _raise_error_status(
                            "Error executing pre-customization script",
                            e,
                            GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
                            vmwareImcConfigFilePath)

            try:
                LOG.debug("Preparing the Network configuration")
                self._network_config = get_network_config_from_conf(
                    self._vmware_cust_conf,
                    True,
                    True,
                    self.distro.osfamily)
            except Exception as e:
                _raise_error_status(
                    "Error preparing Network Configuration",
                    e,
                    GuestCustEvent.GUESTCUST_EVENT_NETWORK_SETUP_FAILED,
                    vmwareImcConfigFilePath)

            if special_customization:
                LOG.debug("Applying password customization")
                pwdConfigurator = PasswordConfigurator()
                adminpwd = self._vmware_cust_conf.admin_password
                try:
                    resetpwd = self._vmware_cust_conf.reset_password
                    if adminpwd or resetpwd:
                        pwdConfigurator.configure(adminpwd, resetpwd,
                                                  self.distro)
                    else:
                        LOG.debug("Changing password is not needed")
                except Exception as e:
                    _raise_error_status(
                        "Error applying Password Configuration",
                        e,
                        GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
                        vmwareImcConfigFilePath)

                if customscript:
                    try:
                        postcust = PostCustomScript(customscript, imcdirpath)
                        postcust.execute()
                    except Exception as e:
                        _raise_error_status(
                            "Error executing post-customization script",
                            e,
                            GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
                            vmwareImcConfigFilePath)

            if product_marker:
                try:
                    setup_marker_files(
                        product_marker,
                        os.path.join(self.paths.cloud_dir, 'data'))
                except Exception as e:
                    _raise_error_status(
                        "Error creating marker files",
                        e,
                        GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
                        vmwareImcConfigFilePath)

            self._vmware_cust_found = True
            found.append('vmware-tools')

            # TODO: Need to set the status to DONE only when the
            # customization is done successfully.
            util.del_dir(os.path.dirname(vmwareImcConfigFilePath))
            enable_nics(self._vmware_nics_to_enable)
            set_customization_status(
                GuestCustStateEnum.GUESTCUST_STATE_DONE,
                GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS)

        else:
            np = [('com.vmware.guestInfo', transport_vmware_guestinfo),
                  ('iso', transport_iso9660)]
            name = None
            for name, transfunc in np:
                contents = transfunc()
                if contents:
                    break
            if contents:
                (md, ud, cfg) = read_ovf_environment(contents)
                self.environment = contents
                found.append(name)

        # There was no OVF transports found
        if len(found) == 0:
            return False

        if 'seedfrom' in md and md['seedfrom']:
            seedfrom = md['seedfrom']
            seedfound = False
            for proto in self.supported_seed_starts:
                if seedfrom.startswith(proto):
                    seedfound = proto
                    break
            if not seedfound:
                LOG.debug("Seed from %s not supported by %s",
                          seedfrom, self)
                return False

            (md_seed, ud) = util.read_seeded(seedfrom, timeout=None)
            LOG.debug("Using seeded cache data from %s", seedfrom)

            md = util.mergemanydict([md, md_seed])
            found.append(seedfrom)

        # Now that we have exhausted any other places merge in the defaults
        md = util.mergemanydict([md, defaults])

        self.seed = ",".join(found)
        self.metadata = md
        self.userdata_raw = ud
        self.cfg = cfg
        return True
Example #51
0
def handle(name, cfg, _cloud, log, args):
    if len(args) != 0:
        resize_root = args[0]
    else:
        resize_root = util.get_cfg_option_str(cfg, "resize_rootfs", True)
    validate_cloudconfig_schema(cfg, schema)
    if not util.translate_bool(resize_root, addons=[NOBLOCK]):
        log.debug("Skipping module named %s, resizing disabled", name)
        return

    # TODO(harlowja): allow what is to be resized to be configurable??
    resize_what = "/"
    result = util.get_mount_info(resize_what, log)
    if not result:
        log.warn("Could not determine filesystem type of %s", resize_what)
        return

    (devpth, fs_type, mount_point) = result

    # if we have a zfs then our device path at this point
    # is the zfs label. For example: vmzroot/ROOT/freebsd
    # we will have to get the zpool name out of this
    # and set the resize_what variable to the zpool
    # so the _resize_zfs function gets the right attribute.
    if fs_type == 'zfs':
        zpool = devpth.split('/')[0]
        devpth = util.get_device_info_from_zpool(zpool)
        if not devpth:
            return  # could not find device from zpool
        resize_what = zpool

    info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what)
    log.debug("resize_info: %s" % info)

    devpth = maybe_get_writable_device_path(devpth, info, log)
    if not devpth:
        return  # devpath was not a writable block device

    resizer = None
    if can_skip_resize(fs_type, resize_what, devpth):
        log.debug("Skip resize filesystem type %s for %s",
                  fs_type, resize_what)
        return

    fstype_lc = fs_type.lower()
    for (pfix, root_cmd) in RESIZE_FS_PREFIXES_CMDS:
        if fstype_lc.startswith(pfix):
            resizer = root_cmd
            break

    if not resizer:
        log.warn("Not resizing unknown filesystem type %s for %s",
                 fs_type, resize_what)
        return

    resize_cmd = resizer(resize_what, devpth)
    log.debug("Resizing %s (%s) using %s", resize_what, fs_type,
              ' '.join(resize_cmd))

    if resize_root == NOBLOCK:
        # Fork to a child that will run
        # the resize command
        util.fork_cb(
            util.log_time, logfunc=log.debug, msg="backgrounded Resizing",
            func=do_resize, args=(resize_cmd, log))
    else:
        util.log_time(logfunc=log.debug, msg="Resizing",
                      func=do_resize, args=(resize_cmd, log))

    action = 'Resized'
    if resize_root == NOBLOCK:
        action = 'Resizing (via forking)'
    log.debug("%s root filesystem (type=%s, val=%s)", action, fs_type,
              resize_root)
    def get_data(self):
        # azure removes/ejects the cdrom containing the ovf-env.xml
        # file on reboot.  So, in order to successfully reboot we
        # need to look in the datadir and consider that valid
        ddir = self.ds_cfg['data_dir']

        candidates = [self.seed_dir]
        candidates.extend(list_possible_azure_ds_devs())
        if ddir:
            candidates.append(ddir)

        found = None

        for cdev in candidates:
            try:
                if cdev.startswith("/dev/"):
                    ret = util.mount_cb(cdev, load_azure_ds_dir)
                else:
                    ret = load_azure_ds_dir(cdev)

            except NonAzureDataSource:
                continue
            except BrokenAzureDataSource as exc:
                raise exc
            except util.MountFailedError:
                LOG.warn("%s was not mountable" % cdev)
                continue

            (md, self.userdata_raw, cfg, files) = ret
            self.seed = cdev
            self.metadata = util.mergemanydict([md, DEFAULT_METADATA])
            self.cfg = util.mergemanydict([cfg, BUILTIN_CLOUD_CONFIG])
            found = cdev

            LOG.debug("found datasource in %s", cdev)
            break

        if not found:
            return False

        if found == ddir:
            LOG.debug("using files cached in %s", ddir)

        # azure / hyper-v provides random data here
        seed = util.load_file("/sys/firmware/acpi/tables/OEM0", quiet=True)
        if seed:
            self.metadata['random_seed'] = seed

        # now update ds_cfg to reflect contents pass in config
        user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
        self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])
        mycfg = self.ds_cfg

        # walinux agent writes files world readable, but expects
        # the directory to be protected.
        write_files(mycfg['data_dir'], files, dirmode=0700)

        # handle the hostname 'publishing'
        try:
            handle_set_hostname(mycfg.get('set_hostname'),
                                self.metadata.get('local-hostname'),
                                mycfg['hostname_bounce'])
        except Exception as e:
            LOG.warn("Failed publishing hostname: %s" % e)
            util.logexc(LOG, "handling set_hostname failed")

        try:
            invoke_agent(mycfg['agent_command'])
        except util.ProcessExecutionError:
            # claim the datasource even if the command failed
            util.logexc(LOG, "agent command '%s' failed.",
                        mycfg['agent_command'])

        shcfgxml = os.path.join(mycfg['data_dir'], "SharedConfig.xml")
        wait_for = [shcfgxml]

        fp_files = []
        for pk in self.cfg.get('_pubkeys', []):
            bname = str(pk['fingerprint'] + ".crt")
            fp_files += [os.path.join(mycfg['data_dir'], bname)]

        missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
                                func=wait_for_files,
                                args=(wait_for + fp_files,))
        if len(missing):
            LOG.warn("Did not find files, but going on: %s", missing)

        if shcfgxml in missing:
            LOG.warn("SharedConfig.xml missing, using static instance-id")
        else:
            try:
                self.metadata['instance-id'] = iid_from_shared_config(shcfgxml)
            except ValueError as e:
                LOG.warn("failed to get instance id in %s: %s" % (shcfgxml, e))

        pubkeys = pubkeys_from_crt_files(fp_files)

        self.metadata['public-keys'] = pubkeys
        return True
Example #53
0
def find_fallback_nic(blacklist_drivers=None):
    """Return the name of the 'fallback' network device."""
    if not blacklist_drivers:
        blacklist_drivers = []

    if 'net.ifnames=0' in util.get_cmdline():
        LOG.debug('Stable ifnames disabled by net.ifnames=0 in /proc/cmdline')
    else:
        unstable = [device for device in get_devicelist()
                    if device != 'lo' and not is_renamed(device)]
        if len(unstable):
            LOG.debug('Found unstable nic names: %s; calling udevadm settle',
                      unstable)
            msg = 'Waiting for udev events to settle'
            util.log_time(LOG.debug, msg, func=util.udevadm_settle)

    # get list of interfaces that could have connections
    invalid_interfaces = set(['lo'])
    potential_interfaces = set([device for device in get_devicelist()
                                if device_driver(device) not in
                                blacklist_drivers])
    potential_interfaces = potential_interfaces.difference(invalid_interfaces)
    # sort into interfaces with carrier, interfaces which could have carrier,
    # and ignore interfaces that are definitely disconnected
    connected = []
    possibly_connected = []
    for interface in potential_interfaces:
        if interface.startswith("veth"):
            continue
        if is_bridge(interface):
            # skip any bridges
            continue
        if is_bond(interface):
            # skip any bonds
            continue
        carrier = read_sys_net_int(interface, 'carrier')
        if carrier:
            connected.append(interface)
            continue
        # check if nic is dormant or down, as this may make a nick appear to
        # not have a carrier even though it could acquire one when brought
        # online by dhclient
        dormant = read_sys_net_int(interface, 'dormant')
        if dormant:
            possibly_connected.append(interface)
            continue
        operstate = read_sys_net_safe(interface, 'operstate')
        if operstate in ['dormant', 'down', 'lowerlayerdown', 'unknown']:
            possibly_connected.append(interface)
            continue

    # don't bother with interfaces that might not be connected if there are
    # some that definitely are
    if connected:
        potential_interfaces = connected
    else:
        potential_interfaces = possibly_connected

    # if eth0 exists use it above anything else, otherwise get the interface
    # that we can read 'first' (using the sorted defintion of first).
    names = list(sorted(potential_interfaces, key=natural_sort_key))
    if DEFAULT_PRIMARY_INTERFACE in names:
        names.remove(DEFAULT_PRIMARY_INTERFACE)
        names.insert(0, DEFAULT_PRIMARY_INTERFACE)

    # pick the first that has a mac-address
    for name in names:
        if read_sys_net_safe(name, 'address'):
            return name
    return None
Example #54
0
def handle(name, cfg, _cloud, log, args):
    if len(args) != 0:
        resize_root = args[0]
    else:
        resize_root = util.get_cfg_option_str(cfg, "resize_rootfs", True)

    if not util.translate_bool(resize_root, addons=[NOBLOCK]):
        log.debug("Skipping module named %s, resizing disabled", name)
        return

    # TODO(harlowja) is the directory ok to be used??
    resize_root_d = util.get_cfg_option_str(cfg, "resize_rootfs_tmp", "/run")
    util.ensure_dir(resize_root_d)

    # TODO(harlowja): allow what is to be resized to be configurable??
    resize_what = "/"
    result = util.get_mount_info(resize_what, log)
    if not result:
        log.warn("Could not determine filesystem type of %s", resize_what)
        return

    (devpth, fs_type, mount_point) = result

    # Ensure the path is a block device.
    info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what)
    log.debug("resize_info: %s" % info)

    container = util.is_container()

    if (devpth == "/dev/root" and not os.path.exists(devpth) and
        not container):
        devpth = rootdev_from_cmdline(util.get_cmdline())
        if devpth is None:
            log.warn("Unable to find device '/dev/root'")
            return
        log.debug("Converted /dev/root to '%s' per kernel cmdline", devpth)

    try:
        statret = os.stat(devpth)
    except OSError as exc:
        if container and exc.errno == errno.ENOENT:
            log.debug("Device '%s' did not exist in container. "
                      "cannot resize: %s" % (devpth, info))
        elif exc.errno == errno.ENOENT:
            log.warn("Device '%s' did not exist. cannot resize: %s" %
                     (devpth, info))
        else:
            raise exc
        return

    if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(statret.st_mode):
        if container:
            log.debug("device '%s' not a block device in container."
                      " cannot resize: %s" % (devpth, info))
        else:
            log.warn("device '%s' not a block device. cannot resize: %s" %
                     (devpth, info))
        return

    resizer = None
    fstype_lc = fs_type.lower()
    for (pfix, root_cmd) in RESIZE_FS_PREFIXES_CMDS:
        if fstype_lc.startswith(pfix):
            resizer = root_cmd
            break

    if not resizer:
        log.warn("Not resizing unknown filesystem type %s for %s",
                 fs_type, resize_what)
        return

    resize_cmd = resizer(resize_what, devpth)
    log.debug("Resizing %s (%s) using %s", resize_what, fs_type,
              ' '.join(resize_cmd))

    if resize_root == NOBLOCK:
        # Fork to a child that will run
        # the resize command
        util.fork_cb(
            util.log_time(logfunc=log.debug, msg="backgrounded Resizing",
                func=do_resize, args=(resize_cmd, log)))
    else:
        util.log_time(logfunc=log.debug, msg="Resizing",
            func=do_resize, args=(resize_cmd, log))

    action = 'Resized'
    if resize_root == NOBLOCK:
        action = 'Resizing (via forking)'
    log.debug("%s root filesystem (type=%s, val=%s)", action, fs_type,
              resize_root)