Ejemplo n.º 1
0
    def test_already_mounted_calls_callback_with_data(self,
                                                      already_mounted_device):
        callback = mock.Mock()
        util.mount_cb(already_mounted_device,
                      callback,
                      data=mock.sentinel.data)

        assert [mock.call(mock.ANY,
                          mock.sentinel.data)] == callback.call_args_list
Ejemplo n.º 2
0
    def test_already_mounted_calls_callback(
            self, trailing_slash_in_mounts,
            already_mounted_device_and_mountdict):
        device, mount_dict = already_mounted_device_and_mountdict
        mountpoint = mount_dict["mountpoint"]
        mount_dict["mountpoint"] += trailing_slash_in_mounts

        callback = mock.Mock()
        util.mount_cb(device, callback)

        # The mountpoint passed to callback should always have a trailing
        # slash, regardless of the input
        assert [mock.call(mountpoint + "/")] == callback.call_args_list
Ejemplo n.º 3
0
    def test_normalize_mtype_on_bsd(self, m_tmpdir, m_subp, m_is_BSD,
                                    m_is_Linux, mtype, expected):
        m_is_BSD.return_value = True
        m_is_Linux.return_value = False
        m_tmpdir.return_value.__enter__ = mock.Mock(autospec=True,
                                                    return_value="/tmp/fake")
        m_tmpdir.return_value.__exit__ = mock.Mock(autospec=True,
                                                   return_value=True)
        callback = mock.Mock(autospec=True)

        util.mount_cb('/dev/fake0', callback, mtype=mtype)
        assert mock.call(
            ["mount", "-o", "ro", "-t", expected, "/dev/fake0", "/tmp/fake"],
            update_env=None) in m_subp.call_args_list
Ejemplo n.º 4
0
def read_md():
    """Read data from IBM Cloud.

    @return: None if not running on IBM Cloud.
             dictionary with guaranteed fields: metadata, version
             and optional fields: userdata, vendordata, networkdata.
             Also includes the system uuid from /sys/hypervisor/uuid."""
    platform, path = get_ibm_platform()
    if platform is None:
        LOG.debug("This is not an IBMCloud platform.")
        return None
    elif platform in PROVISIONING:
        LOG.debug("Cloud-init is disabled during provisioning: %s.", platform)
        return None

    ret = {
        "platform": platform,
        "source": path,
        "system-uuid": _read_system_uuid(),
    }

    try:
        if os.path.isdir(path):
            results = metadata_from_dir(path)
        else:
            results = util.mount_cb(path, metadata_from_dir)
    except sources.BrokenMetadata as e:
        raise RuntimeError(
            "Failed reading IBM config disk (platform=%s path=%s): %s"
            % (platform, path, e)
        ) from e

    ret.update(results)
    return ret
    def get_data(self):
        """
        Description:
            User Data is passed to the launching instance which
            is used to perform instance configuration.
        """

        dev_list = util.find_devs_with("LABEL=CLOUDMD")
        rbx_data = None
        for device in dev_list:
            try:
                rbx_data = util.mount_cb(device, read_user_data_callback,
                                         self.distro.name,
                                         mtype=['vfat','fat'])
                if rbx_data:
                    break
            except OSError as err:
                if err.errno != errno.ENOENT:
                    raise
            except util.MountFailedError as err:
                print(err)
                util.logexc(LOG, "Failed to mount %s when looking for user "
                                 "data", device)
        if not rbx_data:
            util.logexc(LOG, "Failed to load metadata and userdata")
            return False

        self.userdata_raw = rbx_data['userdata']
        self.metadata = rbx_data['metadata']
        self.cfg = rbx_data['cfg']
        return True
Ejemplo n.º 6
0
    def _get_data(self):
        defaults = {"instance-id": DEFAULT_IID}
        results = None
        seed = None

        # decide parseuser for context.sh shell reader
        parseuser = DEFAULT_PARSEUSER
        if "parseuser" in self.ds_cfg:
            parseuser = self.ds_cfg.get("parseuser")

        candidates = [self.seed_dir]
        candidates.extend(find_candidate_devs())
        for cdev in candidates:
            try:
                if os.path.isdir(self.seed_dir):
                    results = read_context_disk_dir(cdev,
                                                    self.distro,
                                                    asuser=parseuser)
                elif cdev.startswith("/dev"):
                    # util.mount_cb only handles passing a single argument
                    # through to the wrapped function, so we have to partially
                    # apply the function to pass in `distro`.  See LP: #1884979
                    partially_applied_func = functools.partial(
                        read_context_disk_dir,
                        asuser=parseuser,
                        distro=self.distro,
                    )
                    results = util.mount_cb(cdev, partially_applied_func)
            except NonContextDiskDir:
                continue
            except BrokenContextDiskDir as exc:
                raise exc
            except util.MountFailedError:
                LOG.warning("%s was not mountable", cdev)

            if results:
                seed = cdev
                LOG.debug("found datasource in %s", cdev)
                break

        if not seed:
            return False

        # merge fetched metadata with datasource defaults
        md = results["metadata"]
        md = util.mergemanydict([md, defaults])

        # check for valid user specified dsmode
        self.dsmode = self._determine_dsmode(
            [results.get("DSMODE"),
             self.ds_cfg.get("dsmode")])

        if self.dsmode == sources.DSMODE_DISABLED:
            return False

        self.seed = seed
        self.network = results.get("network-interfaces")
        self.metadata = md
        self.userdata_raw = results.get("userdata")
        return True
    def user_data_vsphere(self):
        '''
        vSphere specific userdata read

        If on vSphere the user data will be contained on the
        cdrom device in file <user_data_file>
        To access it:
           Leverage util.mount_cb to:
               mkdir <tmp mount dir>
               mount /dev/fd0 <tmp mount dir>
               The call back passed to util.mount_cb will do:
                   read <tmp mount dir>/<user_data_file>
        '''

        return_str = None
        cdrom_list = util.find_devs_with('LABEL=CDROM')
        for cdrom_dev in cdrom_list:
            try:
                return_str = util.mount_cb(cdrom_dev, read_user_data_callback)
                if return_str:
                    break
            except OSError as err:
                if err.errno != errno.ENOENT:
                    raise
            except util.MountFailedError:
                util.logexc(LOG, "Failed to mount %s when looking for user "
                            "data", cdrom_dev)

        self.userdata_raw = return_str
        self.metadata = META_DATA_NOT_SUPPORTED

        if return_str:
            return True
        else:
            return False
Ejemplo n.º 8
0
def transport_iso9660(require_iso=True):

    # Go through mounts to see if it was already mounted
    mounts = util.mounts()
    for (dev, info) in mounts.items():
        fstype = info['fstype']
        if fstype != "iso9660" and require_iso:
            continue
        if not maybe_cdrom_device(dev):
            continue
        mp = info['mountpoint']
        (_fname, contents) = get_ovf_env(mp)
        if contents is not False:
            return contents

    if require_iso:
        mtype = "iso9660"
    else:
        mtype = None

    # generate a list of devices with mtype filesystem, filter by regex
    devs = [dev for dev in
            util.find_devs_with("TYPE=%s" % mtype if mtype else None)
            if maybe_cdrom_device(dev)]
    for dev in devs:
        try:
            (_fname, contents) = util.mount_cb(dev, get_ovf_env, mtype=mtype)
        except util.MountFailedError:
            LOG.debug("%s not mountable as iso9660", dev)
            continue

        if contents is not False:
            return contents

    return None
Ejemplo n.º 9
0
def transport_iso9660(require_iso=True):

    # Go through mounts to see if it was already mounted
    mounts = util.mounts()
    for (dev, info) in mounts.items():
        fstype = info['fstype']
        if fstype != "iso9660" and require_iso:
            continue
        if not maybe_cdrom_device(dev):
            continue
        mp = info['mountpoint']
        (_fname, contents) = get_ovf_env(mp)
        if contents is not False:
            return contents

    if require_iso:
        mtype = "iso9660"
    else:
        mtype = None

    # generate a list of devices with mtype filesystem, filter by regex
    devs = [dev for dev in
            util.find_devs_with("TYPE=%s" % mtype if mtype else None)
            if maybe_cdrom_device(dev)]
    for dev in devs:
        try:
            (_fname, contents) = util.mount_cb(dev, get_ovf_env, mtype=mtype)
        except util.MountFailedError:
            LOG.debug("%s not mountable as iso9660", dev)
            continue

        if contents is not False:
            return contents

    return None
Ejemplo n.º 10
0
def get_md():
    rbx_data = None
    devices = [
        dev
        for dev, bdata in util.blkid().items()
        if bdata.get('LABEL', '').upper() == 'CLOUDMD'
    ]
    for device in devices:
        try:
            rbx_data = util.mount_cb(
                device=device,
                callback=read_user_data_callback,
                mtype=['vfat', 'fat']
            )
            if rbx_data:
                break
        except OSError as err:
            if err.errno != errno.ENOENT:
                raise
        except util.MountFailedError:
            util.logexc(LOG, "Failed to mount %s when looking for user "
                             "data", device)
    if not rbx_data:
        util.logexc(LOG, "Failed to load metadata and userdata")
        return False
    return rbx_data
Ejemplo n.º 11
0
def get_md():
    """Returns False (not found or error) or a dictionary with metadata."""
    devices = set(
        util.find_devs_with("LABEL=CLOUDMD") +
        util.find_devs_with("LABEL=cloudmd"))
    if not devices:
        return False
    for device in devices:
        try:
            rbx_data = util.mount_cb(
                device=device,
                callback=read_user_data_callback,
                mtype=["vfat", "fat", "msdosfs"],
            )
            if rbx_data:
                return rbx_data
        except OSError as err:
            if err.errno != errno.ENOENT:
                raise
        except util.MountFailedError:
            util.logexc(LOG, "Failed to mount %s when looking for user data",
                        device)

    LOG.debug("Did not find RbxCloud data, searched devices: %s",
              ",".join(devices))
    return False
Ejemplo n.º 12
0
    def user_data_vsphere(self):
        '''
        vSphere specific userdata read

        If on vSphere the user data will be contained on the
        cdrom device in file <user_data_file>
        To access it:
           Leverage util.mount_cb to:
               mkdir <tmp mount dir>
               mount /dev/fd0 <tmp mount dir>
               The call back passed to util.mount_cb will do:
                   read <tmp mount dir>/<user_data_file>
        '''

        return_str = None
        cdrom_list = util.find_devs_with('LABEL=CDROM')
        for cdrom_dev in cdrom_list:
            try:
                return_str = util.mount_cb(cdrom_dev, read_user_data_callback)
                if return_str:
                    break
            except OSError as err:
                if err.errno != errno.ENOENT:
                    raise
            except util.MountFailedError:
                util.logexc(LOG, "Failed to mount %s when looking for user "
                            "data", cdrom_dev)

        self.userdata_raw = return_str
        self.metadata = META_DATA_NOT_SUPPORTED

        if return_str:
            return True
        else:
            return False
Ejemplo n.º 13
0
    def user_data_rhevm(self):
        '''
        RHEVM specific userdata read

         If on RHEV-M the user data will be contained on the
         floppy device in file <user_data_file>
         To access it:
           modprobe floppy

           Leverage util.mount_cb to:
               mkdir <tmp mount dir>
               mount /dev/fd0 <tmp mount dir>
               The call back passed to util.mount_cb will do:
                   read <tmp mount dir>/<user_data_file>
        '''

        return_str = None

        # modprobe floppy
        try:
            cmd = CMD_PROBE_FLOPPY
            (cmd_out, _err) = util.subp(cmd)
            LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out)
        except ProcessExecutionError as _err:
            util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
            return False
        except OSError as _err:
            util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
            return False

        floppy_dev = '/dev/fd0'

        # udevadm settle for floppy device
        try:
            (cmd_out, _err) = util.udevadm_settle(exists=floppy_dev, timeout=5)
            LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out)
        except ProcessExecutionError as _err:
            util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
            return False
        except OSError as _err:
            util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
            return False

        try:
            return_str = util.mount_cb(floppy_dev, read_user_data_callback)
        except OSError as err:
            if err.errno != errno.ENOENT:
                raise
        except util.MountFailedError:
            util.logexc(LOG, "Failed to mount %s when looking for user data",
                        floppy_dev)

        self.userdata_raw = return_str
        self.metadata = META_DATA_NOT_SUPPORTED

        if return_str:
            return True
        else:
            return False
Ejemplo n.º 14
0
def transport_iso9660(require_iso=True):

    # default_regex matches values in
    # /lib/udev/rules.d/60-cdrom_id.rules
    # KERNEL!="sr[0-9]*|hd[a-z]|xvd*", GOTO="cdrom_end"
    envname = "CLOUD_INIT_CDROM_DEV_REGEX"
    default_regex = "^(sr[0-9]+|hd[a-z]|xvd.*)"

    devname_regex = os.environ.get(envname, default_regex)
    cdmatch = re.compile(devname_regex)

    # Go through mounts to see if it was already mounted
    mounts = util.mounts()
    for (dev, info) in mounts.iteritems():
        fstype = info['fstype']
        if fstype != "iso9660" and require_iso:
            continue
        if cdmatch.match(dev[5:]) is None:  # take off '/dev/'
            continue
        mp = info['mountpoint']
        (fname, contents) = get_ovf_env(mp)
        if contents is not False:
            return (contents, dev, fname)

    if require_iso:
        mtype = "iso9660"
    else:
        mtype = None

    devs = os.listdir("/dev/")
    devs.sort()
    for dev in devs:
        fullp = os.path.join("/dev/", dev)

        if (fullp in mounts or
            not cdmatch.match(dev) or os.path.isdir(fullp)):
            continue

        try:
            # See if we can read anything at all...??
            util.peek_file(fullp, 512)
        except IOError:
            continue

        try:
            (fname, contents) = util.mount_cb(fullp,
                                               get_ovf_env, mtype=mtype)
        except util.MountFailedError:
            LOG.debug("%s not mountable as iso9660" % fullp)
            continue

        if contents is not False:
            return (contents, fullp, fname)

    return (False, None, None)
Ejemplo n.º 15
0
def can_dev_be_reformatted(devpath):
    # determine if the ephemeral block device path devpath
    # is newly formatted after a resize.
    if not os.path.exists(devpath):
        return False, 'device %s does not exist' % devpath

    realpath = os.path.realpath(devpath)
    LOG.debug('Resolving realpath of %s -> %s', devpath, realpath)

    # it is possible that the block device might exist, but the kernel
    # have not yet read the partition table and sent events.  we udevadm settle
    # to hope to resolve that.  Better here would probably be to test and see,
    # and then settle if we didn't find anything and try again.
    if util.which("udevadm"):
        util.subp(["udevadm", "settle"])

    # devpath of /dev/sd[a-z] or /dev/disk/cloud/azure_resource
    # where partitions are "<devpath>1" or "<devpath>-part1" or "<devpath>p1"
    part1path = None
    for suff in ("-part", "p", ""):
        cand = devpath + suff + "1"
        if os.path.exists(cand):
            if os.path.exists(devpath + suff + "2"):
                msg = ('device %s had more than 1 partition: %s, %s' % devpath,
                       cand, devpath + suff + "2")
                return False, msg
            part1path = cand
            break

    if part1path is None:
        return False, 'device %s was not partitioned' % devpath

    real_part1path = os.path.realpath(part1path)
    ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True)
    LOG.debug('ntfs_devices found = %s', ntfs_devices)
    if real_part1path not in ntfs_devices:
        msg = ('partition 1 (%s -> %s) on device %s was not ntfs formatted' %
               (part1path, real_part1path, devpath))
        return False, msg

    def count_files(mp):
        ignored = {'dataloss_warning_readme.txt'}
        return len([f for f in os.listdir(mp) if f.lower() not in ignored])

    bmsg = ('partition 1 (%s -> %s) on device %s was ntfs formatted' %
            (part1path, real_part1path, devpath))
    try:
        file_count = util.mount_cb(part1path, count_files)
    except util.MountFailedError as e:
        return False, bmsg + ' but mount of %s failed: %s' % (part1path, e)

    if file_count != 0:
        return False, bmsg + ' but had %d files on it.' % file_count

    return True, bmsg + ' and had no important files. Safe for reformatting.'
Ejemplo n.º 16
0
    def get_data(self):
        '''
        Description:
            User Data is passed to the launching instance which
            is used to perform instance configuration.
        '''

        dev_list = util.find_devs_with("LABEL=CLOUDMD")
        for device in dev_list:
            try:
                rbx_data = util.mount_cb(device, read_user_data_callback,
                                         self.distro.name)
                if rbx_data:
                    break
            except OSError as err:
                if err.errno != errno.ENOENT:
                    raise
            except util.MountFailedError:
                util.logexc(LOG, "Failed to mount %s when looking for user "
                            "data", device)
        if not rbx_data:
            util.logexc(LOG, "Failed to load metadata and userdata")
            return False

        self.userdata_raw = rbx_data['userdata']
        self.metadata = rbx_data['metadata']
        self.cfg = rbx_data['cfg']

        LOG.debug('RBX: metadata')
        LOG.debug(self.metadata)
        if self.metadata['network-interfaces']:
            LOG.debug("Updating network interfaces from %s", self)
            netdevices = netdev_info()

            for nic, data in netdevices.items():

                ifdown_cmd = ['ifdown', nic]
                ip_down_cmd = ['ip', 'link', 'set', 'dev', nic, 'down']
                ip_flush_cmd = ['ip', 'addr', 'flush', 'dev', nic]

                try:
                    util.subp(ifdown_cmd)
                    LOG.debug("Brought '%s' down.", nic)

                    util.subp(ip_down_cmd)
                    LOG.debug("Brought '%s' down.", nic)

                    util.subp(ip_flush_cmd)
                    LOG.debug("Cleared config of  '%s'.", nic)
                except Exception:
                    LOG.debug("Clearing config of '%s' failed.", nic)

            self.distro.apply_network(self.metadata['network-interfaces'])

        return True
Ejemplo n.º 17
0
def transport_iso9660(require_iso=True):

    # default_regex matches values in
    # /lib/udev/rules.d/60-cdrom_id.rules
    # KERNEL!="sr[0-9]*|hd[a-z]|xvd*", GOTO="cdrom_end"
    envname = "CLOUD_INIT_CDROM_DEV_REGEX"
    default_regex = "^(sr[0-9]+|hd[a-z]|xvd.*)"

    devname_regex = os.environ.get(envname, default_regex)
    cdmatch = re.compile(devname_regex)

    # Go through mounts to see if it was already mounted
    mounts = util.mounts()
    for (dev, info) in mounts.items():
        fstype = info['fstype']
        if fstype != "iso9660" and require_iso:
            continue
        if cdmatch.match(dev[5:]) is None:  # take off '/dev/'
            continue
        mp = info['mountpoint']
        (fname, contents) = get_ovf_env(mp)
        if contents is not False:
            return (contents, dev, fname)

    if require_iso:
        mtype = "iso9660"
    else:
        mtype = None

    devs = os.listdir("/dev/")
    devs.sort()
    for dev in devs:
        fullp = os.path.join("/dev/", dev)

        if (fullp in mounts or
                not cdmatch.match(dev) or os.path.isdir(fullp)):
            continue

        try:
            # See if we can read anything at all...??
            util.peek_file(fullp, 512)
        except IOError:
            continue

        try:
            (fname, contents) = util.mount_cb(fullp, get_ovf_env, mtype=mtype)
        except util.MountFailedError:
            LOG.debug("%s not mountable as iso9660" % fullp)
            continue

        if contents is not False:
            return (contents, fullp, fname)

    return (False, None, None)
Ejemplo n.º 18
0
def support_new_ephemeral(cfg):
    """
    Windows Azure makes ephemeral devices ephemeral to boot; a ephemeral device
    may be presented as a fresh device, or not.

    Since the knowledge of when a disk is supposed to be plowed under is
    specific to Windows Azure, the logic resides here in the datasource. When a
    new ephemeral device is detected, cloud-init overrides the default
    frequency for both disk-setup and mounts for the current boot only.
    """
    device = find_fabric_formatted_ephemeral_part()
    if not device:
        LOG.debug("no default fabric formated ephemeral0.1 found")
        return None
    LOG.debug("fabric formated ephemeral0.1 device at %s", device)

    file_count = 0
    try:
        file_count = util.mount_cb(device, count_files)
    except Exception:
        return None
    LOG.debug("fabric prepared ephmeral0.1 has %s files on it", file_count)

    if file_count >= 1:
        LOG.debug("fabric prepared ephemeral0.1 will be preserved")
        return None
    else:
        # if device was already mounted, then we need to unmount it
        # race conditions could allow for a check-then-unmount
        # to have a false positive. so just unmount and then check.
        try:
            util.subp(['umount', device])
        except util.ProcessExecutionError as e:
            if device in util.mounts():
                LOG.warn("Failed to unmount %s, will not reformat.", device)
                LOG.debug("Failed umount: %s", e)
                return None

    LOG.debug("cloud-init will format ephemeral0.1 this boot.")
    LOG.debug("setting disk_setup and mounts modules 'always' for this boot")

    cc_modules = cfg.get('cloud_config_modules')
    if not cc_modules:
        return None

    mod_list = []
    for mod in cc_modules:
        if mod in ("disk_setup", "mounts"):
            mod_list.append([mod, PER_ALWAYS])
            LOG.debug("set module '%s' to 'always' for this boot", mod)
        else:
            mod_list.append(mod)
    return mod_list
Ejemplo n.º 19
0
    def user_data_rhevm(self):
        """
        RHEVM specific userdata read

         If on RHEV-M the user data will be contained on the
         floppy device in file <user_data_file>
         To access it:
           modprobe floppy

           Leverage util.mount_cb to:
               mkdir <tmp mount dir>
               mount /dev/fd0 <tmp mount dir>
               The call back passed to util.mount_cb will do:
                   read <tmp mount dir>/<user_data_file>
        """

        return_str = None

        # modprobe floppy
        try:
            modprobe_floppy()
        except subp.ProcessExecutionError as e:
            util.logexc(LOG, "Failed modprobe: %s", e)
            return False

        floppy_dev = "/dev/fd0"

        # udevadm settle for floppy device
        try:
            util.udevadm_settle(exists=floppy_dev, timeout=5)
        except (subp.ProcessExecutionError, OSError) as e:
            util.logexc(LOG, "Failed udevadm_settle: %s\n", e)
            return False

        try:
            return_str = util.mount_cb(floppy_dev, read_user_data_callback)
        except OSError as err:
            if err.errno != errno.ENOENT:
                raise
        except util.MountFailedError:
            util.logexc(
                LOG,
                "Failed to mount %s when looking for user data",
                floppy_dev,
            )

        self.userdata_raw = return_str
        self.metadata = META_DATA_NOT_SUPPORTED

        if return_str:
            return True
        else:
            return False
Ejemplo n.º 20
0
def support_new_ephemeral(cfg):
    """
    Windows Azure makes ephemeral devices ephemeral to boot; a ephemeral device
    may be presented as a fresh device, or not.

    Since the knowledge of when a disk is supposed to be plowed under is
    specific to Windows Azure, the logic resides here in the datasource. When a
    new ephemeral device is detected, cloud-init overrides the default
    frequency for both disk-setup and mounts for the current boot only.
    """
    device = find_ephemeral_part()
    if not device:
        LOG.debug("no default fabric formated ephemeral0.1 found")
        return None
    LOG.debug("fabric formated ephemeral0.1 device at %s", device)

    file_count = 0
    try:
        file_count = util.mount_cb(device, count_files)
    except:
        return None
    LOG.debug("fabric prepared ephmeral0.1 has %s files on it", file_count)

    if file_count >= 1:
        LOG.debug("fabric prepared ephemeral0.1 will be preserved")
        return None
    else:
        # if device was already mounted, then we need to unmount it
        # race conditions could allow for a check-then-unmount
        # to have a false positive. so just unmount and then check.
        try:
            util.subp(['umount', device])
        except util.ProcessExecutionError as e:
            if device in util.mounts():
                LOG.warn("Failed to unmount %s, will not reformat.", device)
                LOG.debug("Failed umount: %s", e)
                return None

    LOG.debug("cloud-init will format ephemeral0.1 this boot.")
    LOG.debug("setting disk_setup and mounts modules 'always' for this boot")

    cc_modules = cfg.get('cloud_config_modules')
    if not cc_modules:
        return None

    mod_list = []
    for mod in cc_modules:
        if mod in ("disk_setup", "mounts"):
            mod_list.append([mod, PER_ALWAYS])
            LOG.debug("set module '%s' to 'always' for this boot", mod)
        else:
            mod_list.append(mod)
    return mod_list
Ejemplo n.º 21
0
    def _get_data(self):
        defaults = {"instance-id": DEFAULT_IID}
        results = None
        seed = None

        # decide parseuser for context.sh shell reader
        parseuser = DEFAULT_PARSEUSER
        if 'parseuser' in self.ds_cfg:
            parseuser = self.ds_cfg.get('parseuser')

        candidates = [self.seed_dir]
        candidates.extend(find_candidate_devs())
        for cdev in candidates:
            try:
                if os.path.isdir(self.seed_dir):
                    results = read_context_disk_dir(cdev, asuser=parseuser)
                elif cdev.startswith("/dev"):
                    results = util.mount_cb(cdev,
                                            read_context_disk_dir,
                                            data=parseuser)
            except NonContextDiskDir:
                continue
            except BrokenContextDiskDir as exc:
                raise exc
            except util.MountFailedError:
                LOG.warning("%s was not mountable", cdev)

            if results:
                seed = cdev
                LOG.debug("found datasource in %s", cdev)
                break

        if not seed:
            return False

        # merge fetched metadata with datasource defaults
        md = results['metadata']
        md = util.mergemanydict([md, defaults])

        # check for valid user specified dsmode
        self.dsmode = self._determine_dsmode(
            [results.get('DSMODE'),
             self.ds_cfg.get('dsmode')])

        if self.dsmode == sources.DSMODE_DISABLED:
            return False

        self.seed = seed
        self.network_eni = results.get('network-interfaces')
        self.metadata = md
        self.userdata_raw = results.get('userdata')
        return True
Ejemplo n.º 22
0
def can_dev_be_reformatted(devpath):
    # determine if the ephemeral block device path devpath
    # is newly formatted after a resize.
    if not os.path.exists(devpath):
        return False, "device %s does not exist" % devpath

    realpath = os.path.realpath(devpath)
    LOG.debug("Resolving realpath of %s -> %s", devpath, realpath)

    # it is possible that the block device might exist, but the kernel
    # have not yet read the partition table and sent events.  we udevadm settle
    # to hope to resolve that.  Better here would probably be to test and see,
    # and then settle if we didn't find anything and try again.
    if util.which("udevadm"):
        util.subp(["udevadm", "settle"])

    # devpath of /dev/sd[a-z] or /dev/disk/cloud/azure_resource
    # where partitions are "<devpath>1" or "<devpath>-part1" or "<devpath>p1"
    part1path = None
    for suff in ("-part", "p", ""):
        cand = devpath + suff + "1"
        if os.path.exists(cand):
            if os.path.exists(devpath + suff + "2"):
                msg = ("device %s had more than 1 partition: %s, %s" % devpath, cand, devpath + suff + "2")
                return False, msg
            part1path = cand
            break

    if part1path is None:
        return False, "device %s was not partitioned" % devpath

    real_part1path = os.path.realpath(part1path)
    ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True)
    LOG.debug("ntfs_devices found = %s", ntfs_devices)
    if real_part1path not in ntfs_devices:
        msg = "partition 1 (%s -> %s) on device %s was not ntfs formatted" % (part1path, real_part1path, devpath)
        return False, msg

    def count_files(mp):
        ignored = set(["dataloss_warning_readme.txt"])
        return len([f for f in os.listdir(mp) if f.lower() not in ignored])

    bmsg = "partition 1 (%s -> %s) on device %s was ntfs formatted" % (part1path, real_part1path, devpath)
    try:
        file_count = util.mount_cb(part1path, count_files)
    except util.MountFailedError as e:
        return False, bmsg + " but mount of %s failed: %s" % (part1path, e)

    if file_count != 0:
        return False, bmsg + " but had %d files on it." % file_count

    return True, bmsg + " and had no important files. Safe for reformatting."
Ejemplo n.º 23
0
def can_dev_be_reformatted(devpath):
    """Determine if block device devpath is newly formatted ephemeral.

    A newly formatted disk will:
      a.) have a partition table (dos or gpt)
      b.) have 1 partition that is ntfs formatted, or
          have 2 partitions with the second partition ntfs formatted.
          (larger instances with >2TB ephemeral disk have gpt, and will
           have a microsoft reserved partition as part 1.  LP: #1686514)
      c.) the ntfs partition will have no files other than possibly
          'dataloss_warning_readme.txt'"""
    if not os.path.exists(devpath):
        return False, 'device %s does not exist' % devpath

    LOG.debug('Resolving realpath of %s -> %s', devpath,
              os.path.realpath(devpath))

    # devpath of /dev/sd[a-z] or /dev/disk/cloud/azure_resource
    # where partitions are "<devpath>1" or "<devpath>-part1" or "<devpath>p1"
    partitions = _partitions_on_device(devpath)
    if len(partitions) == 0:
        return False, 'device %s was not partitioned' % devpath
    elif len(partitions) > 2:
        msg = ('device %s had 3 or more partitions: %s' %
               (devpath, ' '.join([p[1] for p in partitions])))
        return False, msg
    elif len(partitions) == 2:
        cand_part, cand_path = partitions[1]
    else:
        cand_part, cand_path = partitions[0]

    if not _has_ntfs_filesystem(cand_path):
        msg = ('partition %s (%s) on device %s was not ntfs formatted' %
               (cand_part, cand_path, devpath))
        return False, msg

    def count_files(mp):
        ignored = set(['dataloss_warning_readme.txt'])
        return len([f for f in os.listdir(mp) if f.lower() not in ignored])

    bmsg = ('partition %s (%s) on device %s was ntfs formatted' %
            (cand_part, cand_path, devpath))
    try:
        file_count = util.mount_cb(cand_path, count_files)
    except util.MountFailedError as e:
        return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e)

    if file_count != 0:
        return False, bmsg + ' but had %d files on it.' % file_count

    return True, bmsg + ' and had no important files. Safe for reformatting.'
Ejemplo n.º 24
0
    def get_data(self):
        defaults = {"instance-id": DEFAULT_IID}
        results = None
        seed = None

        # decide parseuser for context.sh shell reader
        parseuser = DEFAULT_PARSEUSER
        if 'parseuser' in self.ds_cfg:
            parseuser = self.ds_cfg.get('parseuser')

        candidates = [self.seed_dir]
        candidates.extend(find_candidate_devs())
        for cdev in candidates:
            try:
                if os.path.isdir(self.seed_dir):
                    results = read_context_disk_dir(cdev, asuser=parseuser)
                elif cdev.startswith("/dev"):
                    results = util.mount_cb(cdev, read_context_disk_dir,
                                            data=parseuser)
            except NonContextDiskDir:
                continue
            except BrokenContextDiskDir as exc:
                raise exc
            except util.MountFailedError:
                LOG.warn("%s was not mountable" % cdev)

            if results:
                seed = cdev
                LOG.debug("found datasource in %s", cdev)
                break

        if not seed:
            return False

        # merge fetched metadata with datasource defaults
        md = results['metadata']
        md = util.mergemanydict([md, defaults])

        # check for valid user specified dsmode
        self.dsmode = self._determine_dsmode(
            [results.get('DSMODE'), self.ds_cfg.get('dsmode')])

        if self.dsmode == sources.DSMODE_DISABLED:
            return False

        self.seed = seed
        self.network_eni = results.get("network_config")
        self.metadata = md
        self.userdata_raw = results.get('userdata')
        return True
Ejemplo n.º 25
0
    def _get_data(self):
        defaults = {
            "instance-id": "nocloud",
            "dsmode": self.dsmode,
        }

        found = []
        mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': "",
                  'network-config': None}

        try:
            # Parse the system serial label from dmi. If not empty, try parsing
            # like the commandline
            md = {}
            serial = util.read_dmi_data('system-serial-number')
            if serial and load_cmdline_data(md, serial):
                found.append("dmi")
                mydata = _merge_new_seed(mydata, {'meta-data': md})
        except Exception:
            util.logexc(LOG, "Unable to parse dmi data")
            return False

        try:
            # Parse the kernel command line, getting data passed in
            md = {}
            if load_cmdline_data(md):
                found.append("cmdline")
                mydata = _merge_new_seed(mydata, {'meta-data': md})
        except Exception:
            util.logexc(LOG, "Unable to parse command line data")
            return False

        # Check to see if the seed dir has data.
        pp2d_kwargs = {'required': ['user-data', 'meta-data'],
                       'optional': ['vendor-data', 'network-config']}

        for path in self.seed_dirs:
            try:
                seeded = util.pathprefix2dict(path, **pp2d_kwargs)
                found.append(path)
                LOG.debug("Using seeded data from %s", path)
                mydata = _merge_new_seed(mydata, seeded)
                break
            except ValueError:
                pass

        # If the datasource config had a 'seedfrom' entry, then that takes
        # precedence over a 'seedfrom' that was found in a filesystem
        # but not over external media
        if self.ds_cfg.get('seedfrom'):
            found.append("ds_config_seedfrom")
            mydata['meta-data']["seedfrom"] = self.ds_cfg['seedfrom']

        # fields appropriately named can also just come from the datasource
        # config (ie, 'user-data', 'meta-data', 'vendor-data' there)
        if 'user-data' in self.ds_cfg and 'meta-data' in self.ds_cfg:
            mydata = _merge_new_seed(mydata, self.ds_cfg)
            found.append("ds_config")

        def _pp2d_callback(mp, data):
            return util.pathprefix2dict(mp, **data)

        label = self.ds_cfg.get('fs_label', "cidata")
        if label is not None:
            # Query optical drive to get it in blkid cache for 2.6 kernels
            util.find_devs_with(path="/dev/sr0")
            util.find_devs_with(path="/dev/sr1")

            fslist = util.find_devs_with("TYPE=vfat")
            fslist.extend(util.find_devs_with("TYPE=iso9660"))

            label_list = util.find_devs_with("LABEL=%s" % label.upper())
            label_list.extend(util.find_devs_with("LABEL=%s" % label.lower()))

            devlist = list(set(fslist) & set(label_list))
            devlist.sort(reverse=True)

            for dev in devlist:
                try:
                    LOG.debug("Attempting to use data from %s", dev)

                    try:
                        seeded = util.mount_cb(dev, _pp2d_callback,
                                               pp2d_kwargs)
                    except ValueError:
                        if dev in label_list:
                            LOG.warning("device %s with label=%s not a"
                                        "valid seed.", dev, label)
                        continue

                    mydata = _merge_new_seed(mydata, seeded)

                    LOG.debug("Using data from %s", dev)
                    found.append(dev)
                    break
                except OSError as e:
                    if e.errno != errno.ENOENT:
                        raise
                except util.MountFailedError:
                    util.logexc(LOG, "Failed to mount %s when looking for "
                                "data", dev)

        # There was no indication on kernel cmdline or data
        # in the seeddir suggesting this handler should be used.
        if len(found) == 0:
            return False

        # The special argument "seedfrom" indicates we should
        # attempt to seed the userdata / metadata from its value
        # its primarily value is in allowing the user to type less
        # on the command line, ie: ds=nocloud;s=http://bit.ly/abcdefg
        if "seedfrom" in mydata['meta-data']:
            seedfrom = mydata['meta-data']["seedfrom"]
            seedfound = False
            for proto in self.supported_seed_starts:
                if seedfrom.startswith(proto):
                    seedfound = proto
                    break
            if not seedfound:
                LOG.debug("Seed from %s not supported by %s", seedfrom, self)
                return False

            # This could throw errors, but the user told us to do it
            # so if errors are raised, let them raise
            (md_seed, ud) = util.read_seeded(seedfrom, timeout=None)
            LOG.debug("Using seeded cache data from %s", seedfrom)

            # Values in the command line override those from the seed
            mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
                                                      md_seed])
            mydata['user-data'] = ud
            found.append(seedfrom)

        # Now that we have exhausted any other places merge in the defaults
        mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
                                                  defaults])

        self.dsmode = self._determine_dsmode(
            [mydata['meta-data'].get('dsmode')])

        if self.dsmode == sources.DSMODE_DISABLED:
            LOG.debug("%s: not claiming datasource, dsmode=%s", self,
                      self.dsmode)
            return False

        self.seed = ",".join(found)
        self.metadata = mydata['meta-data']
        self.userdata_raw = mydata['user-data']
        self.vendordata_raw = mydata['vendor-data']
        self._network_config = mydata['network-config']
        self._network_eni = mydata['meta-data'].get('network-interfaces')
        return True
Ejemplo n.º 26
0
    def _get_data(self):
        defaults = {
            "instance-id": "nocloud",
            "dsmode": self.dsmode,
        }

        found = []
        mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': "",
                  'network-config': None}

        try:
            # Parse the system serial label from dmi. If not empty, try parsing
            # like the commandline
            md = {}
            serial = util.read_dmi_data('system-serial-number')
            if serial and load_cmdline_data(md, serial):
                found.append("dmi")
                mydata = _merge_new_seed(mydata, {'meta-data': md})
        except Exception:
            util.logexc(LOG, "Unable to parse dmi data")
            return False

        try:
            # Parse the kernel command line, getting data passed in
            md = {}
            if load_cmdline_data(md):
                found.append("cmdline")
                mydata = _merge_new_seed(mydata, {'meta-data': md})
        except Exception:
            util.logexc(LOG, "Unable to parse command line data")
            return False

        # Check to see if the seed dir has data.
        pp2d_kwargs = {'required': ['user-data', 'meta-data'],
                       'optional': ['vendor-data', 'network-config']}

        for path in self.seed_dirs:
            try:
                seeded = util.pathprefix2dict(path, **pp2d_kwargs)
                found.append(path)
                LOG.debug("Using seeded data from %s", path)
                mydata = _merge_new_seed(mydata, seeded)
                break
            except ValueError:
                pass

        # If the datasource config had a 'seedfrom' entry, then that takes
        # precedence over a 'seedfrom' that was found in a filesystem
        # but not over external media
        if self.ds_cfg.get('seedfrom'):
            found.append("ds_config_seedfrom")
            mydata['meta-data']["seedfrom"] = self.ds_cfg['seedfrom']

        # fields appropriately named can also just come from the datasource
        # config (ie, 'user-data', 'meta-data', 'vendor-data' there)
        if 'user-data' in self.ds_cfg and 'meta-data' in self.ds_cfg:
            mydata = _merge_new_seed(mydata, self.ds_cfg)
            found.append("ds_config")

        def _pp2d_callback(mp, data):
            return util.pathprefix2dict(mp, **data)

        label = self.ds_cfg.get('fs_label', "cidata")
        if label is not None:
            for dev in self._get_devices(label):
                try:
                    LOG.debug("Attempting to use data from %s", dev)

                    try:
                        seeded = util.mount_cb(dev, _pp2d_callback,
                                               pp2d_kwargs)
                    except ValueError:
                        LOG.warning("device %s with label=%s not a"
                                    "valid seed.", dev, label)
                        continue

                    mydata = _merge_new_seed(mydata, seeded)

                    LOG.debug("Using data from %s", dev)
                    found.append(dev)
                    break
                except OSError as e:
                    if e.errno != errno.ENOENT:
                        raise
                except util.MountFailedError:
                    util.logexc(LOG, "Failed to mount %s when looking for "
                                "data", dev)

        # There was no indication on kernel cmdline or data
        # in the seeddir suggesting this handler should be used.
        if len(found) == 0:
            return False

        # The special argument "seedfrom" indicates we should
        # attempt to seed the userdata / metadata from its value
        # its primarily value is in allowing the user to type less
        # on the command line, ie: ds=nocloud;s=http://bit.ly/abcdefg
        if "seedfrom" in mydata['meta-data']:
            seedfrom = mydata['meta-data']["seedfrom"]
            seedfound = False
            for proto in self.supported_seed_starts:
                if seedfrom.startswith(proto):
                    seedfound = proto
                    break
            if not seedfound:
                LOG.debug("Seed from %s not supported by %s", seedfrom, self)
                return False

            # This could throw errors, but the user told us to do it
            # so if errors are raised, let them raise
            (md_seed, ud) = util.read_seeded(seedfrom, timeout=None)
            LOG.debug("Using seeded cache data from %s", seedfrom)

            # Values in the command line override those from the seed
            mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
                                                      md_seed])
            mydata['user-data'] = ud
            found.append(seedfrom)

        # Now that we have exhausted any other places merge in the defaults
        mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
                                                  defaults])

        self.dsmode = self._determine_dsmode(
            [mydata['meta-data'].get('dsmode')])

        if self.dsmode == sources.DSMODE_DISABLED:
            LOG.debug("%s: not claiming datasource, dsmode=%s", self,
                      self.dsmode)
            return False

        self.seed = ",".join(found)
        self.metadata = mydata['meta-data']
        self.userdata_raw = mydata['user-data']
        self.vendordata_raw = mydata['vendor-data']
        self._network_config = mydata['network-config']
        self._network_eni = mydata['meta-data'].get('network-interfaces')
        return True
Ejemplo n.º 27
0
    def get_data(self):
        found = None
        md = {}
        results = {}
        if os.path.isdir(self.seed_dir):
            try:
                results = read_config_drive(self.seed_dir)
                found = self.seed_dir
            except openstack.NonReadable:
                util.logexc(LOG, "Failed reading config drive from %s",
                            self.seed_dir)
        if not found:
            for dev in find_candidate_devs():
                try:
                    # Set mtype if freebsd and turn off sync
                    if dev.startswith("/dev/cd"):
                        mtype = "cd9660"
                        sync = False
                    else:
                        mtype = None
                        sync = True
                    results = util.mount_cb(dev, read_config_drive,
                                            mtype=mtype, sync=sync)
                    found = dev
                except openstack.NonReadable:
                    pass
                except util.MountFailedError:
                    pass
                except openstack.BrokenMetadata:
                    util.logexc(LOG, "Broken config drive: %s", dev)
                if found:
                    break
        if not found:
            return False

        md = results.get('metadata', {})
        md = util.mergemanydict([md, DEFAULT_METADATA])

        self.dsmode = self._determine_dsmode(
            [results.get('dsmode'), self.ds_cfg.get('dsmode'),
             sources.DSMODE_PASS if results['version'] == 1 else None])

        if self.dsmode == sources.DSMODE_DISABLED:
            return False

        prev_iid = get_previous_iid(self.paths)
        cur_iid = md['instance-id']
        if prev_iid != cur_iid:
            # better would be to handle this centrally, allowing
            # the datasource to do something on new instance id
            # note, networking is only rendered here if dsmode is DSMODE_PASS
            # which means "DISABLED, but render files and networking"
            on_first_boot(results, distro=self.distro,
                          network=self.dsmode == sources.DSMODE_PASS)

        # This is legacy and sneaky.  If dsmode is 'pass' then do not claim
        # the datasource was used, even though we did run on_first_boot above.
        if self.dsmode == sources.DSMODE_PASS:
            LOG.debug("%s: not claiming datasource, dsmode=%s", self,
                      self.dsmode)
            return False

        self.source = found
        self.metadata = md
        self.ec2_metadata = results.get('ec2-metadata')
        self.userdata_raw = results.get('userdata')
        self.version = results['version']
        self.files.update(results.get('files', {}))

        vd = results.get('vendordata')
        self.vendordata_pure = vd
        try:
            self.vendordata_raw = sources.convert_vendordata(vd)
        except ValueError as e:
            LOG.warn("Invalid content in vendor-data: %s", e)
            self.vendordata_raw = None

        # network_config is an /etc/network/interfaces formated file and is
        # obsolete compared to networkdata (from network_data.json) but both
        # might be present.
        self.network_eni = results.get("network_config")
        self.network_json = results.get('networkdata')
        return True
Ejemplo n.º 28
0
def can_dev_be_reformatted(devpath, preserve_ntfs):
    """Determine if the ephemeral drive at devpath should be reformatted.

    A fresh ephemeral disk is formatted by Azure and will:
      a.) have a partition table (dos or gpt)
      b.) have 1 partition that is ntfs formatted, or
          have 2 partitions with the second partition ntfs formatted.
          (larger instances with >2TB ephemeral disk have gpt, and will
           have a microsoft reserved partition as part 1.  LP: #1686514)
      c.) the ntfs partition will have no files other than possibly
          'dataloss_warning_readme.txt'

    User can indicate that NTFS should never be destroyed by setting
    DS_CFG_KEY_PRESERVE_NTFS in dscfg.
    If data is found on NTFS, user is warned to set DS_CFG_KEY_PRESERVE_NTFS
    to make sure cloud-init does not accidentally wipe their data.
    If cloud-init cannot mount the disk to check for data, destruction
    will be allowed, unless the dscfg key is set."""
    if preserve_ntfs:
        msg = ('config says to never destroy NTFS (%s.%s), skipping checks' %
               (".".join(DS_CFG_PATH), DS_CFG_KEY_PRESERVE_NTFS))
        return False, msg

    if not os.path.exists(devpath):
        return False, 'device %s does not exist' % devpath

    LOG.debug('Resolving realpath of %s -> %s', devpath,
              os.path.realpath(devpath))

    # devpath of /dev/sd[a-z] or /dev/disk/cloud/azure_resource
    # where partitions are "<devpath>1" or "<devpath>-part1" or "<devpath>p1"
    partitions = _partitions_on_device(devpath)
    if len(partitions) == 0:
        return False, 'device %s was not partitioned' % devpath
    elif len(partitions) > 2:
        msg = ('device %s had 3 or more partitions: %s' %
               (devpath, ' '.join([p[1] for p in partitions])))
        return False, msg
    elif len(partitions) == 2:
        cand_part, cand_path = partitions[1]
    else:
        cand_part, cand_path = partitions[0]

    if not _has_ntfs_filesystem(cand_path):
        msg = ('partition %s (%s) on device %s was not ntfs formatted' %
               (cand_part, cand_path, devpath))
        return False, msg

    def count_files(mp):
        ignored = set(['dataloss_warning_readme.txt'])
        return len([f for f in os.listdir(mp) if f.lower() not in ignored])

    bmsg = ('partition %s (%s) on device %s was ntfs formatted' %
            (cand_part, cand_path, devpath))
    try:
        file_count = util.mount_cb(cand_path,
                                   count_files,
                                   mtype="ntfs",
                                   update_env_for_mount={'LANG': 'C'})
    except util.MountFailedError as e:
        if "unknown filesystem type 'ntfs'" in str(e):
            return True, (bmsg + ' but this system cannot mount NTFS,'
                          ' assuming there are no important files.'
                          ' Formatting allowed.')
        return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e)

    if file_count != 0:
        LOG.warning(
            "it looks like you're using NTFS on the ephemeral disk, "
            'to ensure that filesystem does not get wiped, set '
            '%s.%s in config', '.'.join(DS_CFG_PATH), DS_CFG_KEY_PRESERVE_NTFS)
        return False, bmsg + ' but had %d files on it.' % file_count

    return True, bmsg + ' and had no important files. Safe for reformatting.'
Ejemplo n.º 29
0
    def crawl_metadata(self):
        """Walk all instance metadata sources returning a dict on success.

        @return: A dictionary of any metadata content for this instance.
        @raise: InvalidMetaDataException when the expected metadata service is
            unavailable, broken or disabled.
        """
        crawled_data = {}
        # azure removes/ejects the cdrom containing the ovf-env.xml
        # file on reboot.  So, in order to successfully reboot we
        # need to look in the datadir and consider that valid
        ddir = self.ds_cfg['data_dir']

        candidates = [self.seed_dir]
        if os.path.isfile(REPROVISION_MARKER_FILE):
            candidates.insert(0, "IMDS")
        candidates.extend(list_possible_azure_ds_devs())
        if ddir:
            candidates.append(ddir)

        found = None
        reprovision = False
        for cdev in candidates:
            try:
                if cdev == "IMDS":
                    ret = None
                    reprovision = True
                elif cdev.startswith("/dev/"):
                    if util.is_FreeBSD():
                        ret = util.mount_cb(cdev,
                                            load_azure_ds_dir,
                                            mtype="udf",
                                            sync=False)
                    else:
                        ret = util.mount_cb(cdev, load_azure_ds_dir)
                else:
                    ret = load_azure_ds_dir(cdev)

            except NonAzureDataSource:
                continue
            except BrokenAzureDataSource as exc:
                msg = 'BrokenAzureDataSource: %s' % exc
                raise sources.InvalidMetaDataException(msg)
            except util.MountFailedError:
                LOG.warning("%s was not mountable", cdev)
                continue

            perform_reprovision = reprovision or self._should_reprovision(ret)
            if perform_reprovision:
                if util.is_FreeBSD():
                    msg = "Free BSD is not supported for PPS VMs"
                    LOG.error(msg)
                    raise sources.InvalidMetaDataException(msg)
                ret = self._reprovision()
            imds_md = get_metadata_from_imds(self.fallback_interface,
                                             retries=10)
            (md, userdata_raw, cfg, files) = ret
            self.seed = cdev
            crawled_data.update({
                'cfg':
                cfg,
                'files':
                files,
                'metadata':
                util.mergemanydict([md, {
                    'imds': imds_md
                }]),
                'userdata_raw':
                userdata_raw
            })
            found = cdev

            LOG.debug("found datasource in %s", cdev)
            break

        if not found:
            raise sources.InvalidMetaDataException('No Azure metadata found')

        if found == ddir:
            LOG.debug("using files cached in %s", ddir)

        seed = _get_random_seed()
        if seed:
            crawled_data['metadata']['random_seed'] = seed
        crawled_data['metadata']['instance-id'] = util.read_dmi_data(
            'system-uuid')

        if perform_reprovision:
            LOG.info("Reporting ready to Azure after getting ReprovisionData")
            use_cached_ephemeral = (net.is_up(self.fallback_interface) and
                                    getattr(self, '_ephemeral_dhcp_ctx', None))
            if use_cached_ephemeral:
                self._report_ready(lease=self._ephemeral_dhcp_ctx.lease)
                self._ephemeral_dhcp_ctx.clean_network()  # Teardown ephemeral
            else:
                with EphemeralDHCPv4() as lease:
                    self._report_ready(lease=lease)

        return crawled_data
Ejemplo n.º 30
0
    def get_data(self):
        defaults = {
            "instance-id": "nocloud",
            "dsmode": self.dsmode,
        }

        found = []
        mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': ""}

        try:
            # Parse the kernel command line, getting data passed in
            md = {}
            if parse_cmdline_data(self.cmdline_id, md):
                found.append("cmdline")
            mydata['meta-data'].update(md)
        except:
            util.logexc(LOG, "Unable to parse command line data")
            return False

        # Check to see if the seed dir has data.
        pp2d_kwargs = {
            'required': ['user-data', 'meta-data'],
            'optional': ['vendor-data']
        }

        try:
            seeded = util.pathprefix2dict(self.seed_dir, **pp2d_kwargs)
            found.append(self.seed_dir)
            LOG.debug("Using seeded data from %s", self.seed_dir)
        except ValueError as e:
            pass

        if self.seed_dir in found:
            mydata = _merge_new_seed(mydata, seeded)

        # If the datasource config had a 'seedfrom' entry, then that takes
        # precedence over a 'seedfrom' that was found in a filesystem
        # but not over external media
        if self.ds_cfg.get('seedfrom'):
            found.append("ds_config_seedfrom")
            mydata['meta-data']["seedfrom"] = self.ds_cfg['seedfrom']

        # fields appropriately named can also just come from the datasource
        # config (ie, 'user-data', 'meta-data', 'vendor-data' there)
        if 'user-data' in self.ds_cfg and 'meta-data' in self.ds_cfg:
            mydata = _merge_new_seed(mydata, self.ds_cfg)
            found.append("ds_config")

        def _pp2d_callback(mp, data):
            return util.pathprefix2dict(mp, **data)

        label = self.ds_cfg.get('fs_label', "cidata")
        if label is not None:
            # Query optical drive to get it in blkid cache for 2.6 kernels
            util.find_devs_with(path="/dev/sr0")
            util.find_devs_with(path="/dev/sr1")

            fslist = util.find_devs_with("TYPE=vfat")
            fslist.extend(util.find_devs_with("TYPE=iso9660"))

            label_list = util.find_devs_with("LABEL=%s" % label)
            devlist = list(set(fslist) & set(label_list))
            devlist.sort(reverse=True)

            for dev in devlist:
                try:
                    LOG.debug("Attempting to use data from %s", dev)

                    try:
                        seeded = util.mount_cb(dev, _pp2d_callback,
                                               pp2d_kwargs)
                    except ValueError as e:
                        if dev in label_list:
                            LOG.warn(
                                "device %s with label=%s not a"
                                "valid seed.", dev, label)
                        continue

                    mydata = _merge_new_seed(mydata, seeded)

                    # For seed from a device, the default mode is 'net'.
                    # that is more likely to be what is desired.  If they want
                    # dsmode of local, then they must specify that.
                    if 'dsmode' not in mydata['meta-data']:
                        mydata['dsmode'] = "net"

                    LOG.debug("Using data from %s", dev)
                    found.append(dev)
                    break
                except OSError as e:
                    if e.errno != errno.ENOENT:
                        raise
                except util.MountFailedError:
                    util.logexc(LOG, "Failed to mount %s when looking for "
                                "data", dev)

        # There was no indication on kernel cmdline or data
        # in the seeddir suggesting this handler should be used.
        if len(found) == 0:
            return False

        seeded_interfaces = None

        # The special argument "seedfrom" indicates we should
        # attempt to seed the userdata / metadata from its value
        # its primarily value is in allowing the user to type less
        # on the command line, ie: ds=nocloud;s=http://bit.ly/abcdefg
        if "seedfrom" in mydata['meta-data']:
            seedfrom = mydata['meta-data']["seedfrom"]
            seedfound = False
            for proto in self.supported_seed_starts:
                if seedfrom.startswith(proto):
                    seedfound = proto
                    break
            if not seedfound:
                LOG.debug("Seed from %s not supported by %s", seedfrom, self)
                return False

            if 'network-interfaces' in mydata['meta-data']:
                seeded_interfaces = self.dsmode

            # This could throw errors, but the user told us to do it
            # so if errors are raised, let them raise
            (md_seed, ud) = util.read_seeded(seedfrom, timeout=None)
            LOG.debug("Using seeded cache data from %s", seedfrom)

            # Values in the command line override those from the seed
            mydata['meta-data'] = util.mergemanydict(
                [mydata['meta-data'], md_seed])
            mydata['user-data'] = ud
            found.append(seedfrom)

        # Now that we have exhausted any other places merge in the defaults
        mydata['meta-data'] = util.mergemanydict(
            [mydata['meta-data'], defaults])

        # Update the network-interfaces if metadata had 'network-interfaces'
        # entry and this is the local datasource, or 'seedfrom' was used
        # and the source of the seed was self.dsmode
        # ('local' for NoCloud, 'net' for NoCloudNet')
        if ('network-interfaces' in mydata['meta-data']
                and (self.dsmode in ("local", seeded_interfaces))):
            LOG.debug("Updating network interfaces from %s", self)
            self.distro.apply_network(
                mydata['meta-data']['network-interfaces'])

        if mydata['meta-data']['dsmode'] == self.dsmode:
            self.seed = ",".join(found)
            self.metadata = mydata['meta-data']
            self.userdata_raw = mydata['user-data']
            self.vendordata_raw = mydata['vendor-data']
            return True

        LOG.debug("%s: not claiming datasource, dsmode=%s", self, md['dsmode'])
        return False
Ejemplo n.º 31
0
    def get_data(self):
        # azure removes/ejects the cdrom containing the ovf-env.xml
        # file on reboot.  So, in order to successfully reboot we
        # need to look in the datadir and consider that valid
        ddir = self.ds_cfg['data_dir']

        candidates = [self.seed_dir]
        candidates.extend(list_possible_azure_ds_devs())
        if ddir:
            candidates.append(ddir)

        found = None

        for cdev in candidates:
            try:
                if cdev.startswith("/dev/"):
                    ret = util.mount_cb(cdev, load_azure_ds_dir)
                else:
                    ret = load_azure_ds_dir(cdev)

            except NonAzureDataSource:
                continue
            except BrokenAzureDataSource as exc:
                raise exc
            except util.MountFailedError:
                LOG.warn("%s was not mountable", cdev)
                continue

            (md, self.userdata_raw, cfg, files) = ret
            self.seed = cdev
            self.metadata = util.mergemanydict([md, DEFAULT_METADATA])
            self.cfg = util.mergemanydict([cfg, BUILTIN_CLOUD_CONFIG])
            found = cdev

            LOG.debug("found datasource in %s", cdev)
            break

        if not found:
            return False

        if found == ddir:
            LOG.debug("using files cached in %s", ddir)

        # azure / hyper-v provides random data here
        seed = util.load_file("/sys/firmware/acpi/tables/OEM0",
                              quiet=True, decode=False)
        if seed:
            self.metadata['random_seed'] = seed

        # now update ds_cfg to reflect contents pass in config
        user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
        self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])

        # walinux agent writes files world readable, but expects
        # the directory to be protected.
        write_files(ddir, files, dirmode=0o700)

        if self.ds_cfg['agent_command'] == '__builtin__':
            metadata_func = get_metadata_from_fabric
        else:
            metadata_func = self.get_metadata_from_agent
        try:
            fabric_data = metadata_func()
        except Exception as exc:
            LOG.info("Error communicating with Azure fabric; assume we aren't"
                     " on Azure.", exc_info=True)
            return False

        self.metadata['instance-id'] = util.read_dmi_data('system-uuid')
        self.metadata.update(fabric_data)

        found_ephemeral = find_fabric_formatted_ephemeral_disk()
        if found_ephemeral:
            self.ds_cfg['disk_aliases']['ephemeral0'] = found_ephemeral
            LOG.debug("using detected ephemeral0 of %s", found_ephemeral)

        cc_modules_override = support_new_ephemeral(self.sys_cfg)
        if cc_modules_override:
            self.cfg['cloud_config_modules'] = cc_modules_override

        return True
Ejemplo n.º 32
0
    def get_data(self):
        # azure removes/ejects the cdrom containing the ovf-env.xml
        # file on reboot.  So, in order to successfully reboot we
        # need to look in the datadir and consider that valid
        ddir = self.ds_cfg["data_dir"]

        candidates = [self.seed_dir]
        candidates.extend(list_possible_azure_ds_devs())
        if ddir:
            candidates.append(ddir)

        found = None

        for cdev in candidates:
            try:
                if cdev.startswith("/dev/"):
                    ret = util.mount_cb(cdev, load_azure_ds_dir)
                else:
                    ret = load_azure_ds_dir(cdev)

            except NonAzureDataSource:
                continue
            except BrokenAzureDataSource as exc:
                raise exc
            except util.MountFailedError:
                LOG.warn("%s was not mountable", cdev)
                continue

            (md, self.userdata_raw, cfg, files) = ret
            self.seed = cdev
            self.metadata = util.mergemanydict([md, DEFAULT_METADATA])
            self.cfg = util.mergemanydict([cfg, BUILTIN_CLOUD_CONFIG])
            found = cdev

            LOG.debug("found datasource in %s", cdev)
            break

        if not found:
            return False

        if found == ddir:
            LOG.debug("using files cached in %s", ddir)

        # azure / hyper-v provides random data here
        seed = util.load_file("/sys/firmware/acpi/tables/OEM0", quiet=True, decode=False)
        if seed:
            self.metadata["random_seed"] = seed

        # now update ds_cfg to reflect contents pass in config
        user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
        self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])

        if found != ddir:
            cached_ovfenv = util.load_file(os.path.join(ddir, "ovf-env.xml"), quiet=True, decode=False)
            if cached_ovfenv != files["ovf-env.xml"]:
                # source was not walinux-agent's datadir, so we have to clean
                # up so 'wait_for_files' doesn't return early due to stale data
                cleaned = []
                for f in [os.path.join(ddir, f) for f in DATA_DIR_CLEAN_LIST]:
                    if os.path.exists(f):
                        util.del_file(f)
                        cleaned.append(f)
                if cleaned:
                    LOG.info("removed stale file(s) in '%s': %s", ddir, str(cleaned))

        # walinux agent writes files world readable, but expects
        # the directory to be protected.
        write_files(ddir, files, dirmode=0o700)

        if self.ds_cfg["agent_command"] == "__builtin__":
            metadata_func = get_metadata_from_fabric
        else:
            metadata_func = self.get_metadata_from_agent
        try:
            fabric_data = metadata_func()
        except Exception as exc:
            LOG.info("Error communicating with Azure fabric; assume we aren't" " on Azure.", exc_info=True)
            return False

        self.metadata.update(fabric_data)

        found_ephemeral = find_fabric_formatted_ephemeral_disk()
        if found_ephemeral:
            self.ds_cfg["disk_aliases"]["ephemeral0"] = found_ephemeral
            LOG.debug("using detected ephemeral0 of %s", found_ephemeral)

        cc_modules_override = support_new_ephemeral(self.sys_cfg)
        if cc_modules_override:
            self.cfg["cloud_config_modules"] = cc_modules_override

        return True
    def get_data(self):
        found = None
        md = {}

        results = {}
        if os.path.isdir(self.seed_dir):
            try:
                results = read_config_drive_dir(self.seed_dir)
                found = self.seed_dir
            except NonConfigDriveDir:
                util.logexc(LOG, "Failed reading config drive from %s",
                            self.seed_dir)
        if not found:
            devlist = find_candidate_devs()
            for dev in devlist:
                try:
                    results = util.mount_cb(dev, read_config_drive_dir)
                    found = dev
                    break
                except (NonConfigDriveDir, util.MountFailedError):
                    pass
                except BrokenConfigDriveDir:
                    util.logexc(LOG, "broken config drive: %s", dev)

        if not found:
            return False

        md = results['metadata']
        md = util.mergemanydict([md, DEFAULT_METADATA])

        # Perform some metadata 'fixups'
        #
        # OpenStack uses the 'hostname' key
        # while most of cloud-init uses the metadata
        # 'local-hostname' key instead so if it doesn't
        # exist we need to make sure its copied over.
        for (tgt, src) in [('local-hostname', 'hostname')]:
            if tgt not in md and src in md:
                md[tgt] = md[src]

        user_dsmode = results.get('dsmode', None)
        if user_dsmode not in VALID_DSMODES + (None,):
            LOG.warn("user specified invalid mode: %s" % user_dsmode)
            user_dsmode = None

        dsmode = get_ds_mode(cfgdrv_ver=results['cfgdrive_ver'],
                             ds_cfg=self.ds_cfg.get('dsmode'),
                             user=user_dsmode)

        if dsmode == "disabled":
            # most likely user specified
            return False

        # TODO(smoser): fix this, its dirty.
        # we want to do some things (writing files and network config)
        # only on first boot, and even then, we want to do so in the
        # local datasource (so they happen earlier) even if the configured
        # dsmode is 'net' or 'pass'. To do this, we check the previous
        # instance-id
        prev_iid = get_previous_iid(self.paths)
        cur_iid = md['instance-id']
        if prev_iid != cur_iid and self.dsmode == "local":
            self.helper.on_first_boot(results)

        # dsmode != self.dsmode here if:
        #  * dsmode = "pass",  pass means it should only copy files and then
        #    pass to another datasource
        #  * dsmode = "net" and self.dsmode = "local"
        #    so that user boothooks would be applied with network, the
        #    local datasource just gets out of the way, and lets the net claim
        if dsmode != self.dsmode:
            LOG.debug("%s: not claiming datasource, dsmode=%s", self, dsmode)
            return False

        self.source = found
        self.metadata = md
        self.ec2_metadata = results.get('ec2-metadata')
        self.userdata_raw = results.get('userdata')
        self.version = results['cfgdrive_ver']

        return True
    def get_data(self):
        defaults = {"instance-id": DEFAULT_IID}
        results = None
        seed = None

        # decide parseuser for context.sh shell reader
        parseuser = DEFAULT_PARSEUSER
        if 'parseuser' in self.ds_cfg:
            parseuser = self.ds_cfg.get('parseuser')

        candidates = [self.seed_dir]
        candidates.extend(find_candidate_devs())
        for cdev in candidates:
            try:
                if os.path.isdir(self.seed_dir):
                    results = read_context_disk_dir(cdev, asuser=parseuser)
                elif cdev.startswith("/dev"):
                    results = util.mount_cb(cdev, read_context_disk_dir,
                                            data=parseuser)
            except NonContextDiskDir:
                continue
            except BrokenContextDiskDir as exc:
                raise exc
            except util.MountFailedError:
                LOG.warn("%s was not mountable" % cdev)

            if results:
                seed = cdev
                LOG.debug("found datasource in %s", cdev)
                break

        if not seed:
            return False

        # merge fetched metadata with datasource defaults
        md = results['metadata']
        md = util.mergemanydict([md, defaults])

        # check for valid user specified dsmode
        user_dsmode = results['metadata'].get('DSMODE', None)
        if user_dsmode not in VALID_DSMODES + (None,):
            LOG.warn("user specified invalid mode: %s", user_dsmode)
            user_dsmode = None

        # decide dsmode
        if user_dsmode:
            dsmode = user_dsmode
        elif self.ds_cfg.get('dsmode'):
            dsmode = self.ds_cfg.get('dsmode')
        else:
            dsmode = DEFAULT_MODE

        if dsmode == "disabled":
            # most likely user specified
            return False

        # apply static network configuration only in 'local' dsmode
        if ('network-interfaces' in results and self.dsmode == "local"):
            LOG.debug("Updating network interfaces from %s", self)
            self.distro.apply_network(results['network-interfaces'])

        if dsmode != self.dsmode:
            LOG.debug("%s: not claiming datasource, dsmode=%s", self, dsmode)
            return False

        self.seed = seed
        self.metadata = md
        self.userdata_raw = results.get('userdata')
        return True
Ejemplo n.º 35
0
        try:
            cmd = CMD_UDEVADM_SETTLE
            cmd.append('--exit-if-exists=' + floppy_dev)
            (cmd_out, _err) = util.subp(cmd)
            LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out))
        except ProcessExecutionError, _err:
            util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
                        _err.message)
            return False
        except OSError, _err:
            util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
                        _err.message)
            return False

        try:
            return_str = util.mount_cb(floppy_dev, read_user_data_callback)
        except OSError as err:
            if err.errno != errno.ENOENT:
                raise
        except util.MountFailedError:
            util.logexc(LOG, "Failed to mount %s when looking for user data",
                        floppy_dev)

        self.userdata_raw = return_str
        self.metadata = META_DATA_NOT_SUPPORTED

        if return_str:
            return True
        else:
            return False
Ejemplo n.º 36
0
 def test_typeerror_raised_for_invalid_mtype(self, invalid_mtype):
     with pytest.raises(TypeError):
         util.mount_cb(mock.Mock(), mock.Mock(), mtype=invalid_mtype)
Ejemplo n.º 37
0
    def get_data(self):
        # azure removes/ejects the cdrom containing the ovf-env.xml
        # file on reboot.  So, in order to successfully reboot we
        # need to look in the datadir and consider that valid
        ddir = self.ds_cfg['data_dir']

        candidates = [self.seed_dir]
        candidates.extend(list_possible_azure_ds_devs())
        if ddir:
            candidates.append(ddir)

        found = None

        for cdev in candidates:
            try:
                if cdev.startswith("/dev/"):
                    ret = util.mount_cb(cdev, load_azure_ds_dir)
                else:
                    ret = load_azure_ds_dir(cdev)

            except NonAzureDataSource:
                continue
            except BrokenAzureDataSource as exc:
                raise exc
            except util.MountFailedError:
                LOG.warn("%s was not mountable" % cdev)
                continue

            (md, self.userdata_raw, cfg, files) = ret
            self.seed = cdev
            self.metadata = util.mergemanydict([md, DEFAULT_METADATA])
            self.cfg = util.mergemanydict([cfg, BUILTIN_CLOUD_CONFIG])
            found = cdev

            LOG.debug("found datasource in %s", cdev)
            break

        if not found:
            return False

        if found == ddir:
            LOG.debug("using files cached in %s", ddir)

        # azure / hyper-v provides random data here
        seed = util.load_file("/sys/firmware/acpi/tables/OEM0", quiet=True)
        if seed:
            self.metadata['random_seed'] = seed

        # now update ds_cfg to reflect contents pass in config
        user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
        self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])
        mycfg = self.ds_cfg

        # walinux agent writes files world readable, but expects
        # the directory to be protected.
        write_files(mycfg['data_dir'], files, dirmode=0700)

        # handle the hostname 'publishing'
        try:
            handle_set_hostname(mycfg.get('set_hostname'),
                                self.metadata.get('local-hostname'),
                                mycfg['hostname_bounce'])
        except Exception as e:
            LOG.warn("Failed publishing hostname: %s" % e)
            util.logexc(LOG, "handling set_hostname failed")

        try:
            invoke_agent(mycfg['agent_command'])
        except util.ProcessExecutionError:
            # claim the datasource even if the command failed
            util.logexc(LOG, "agent command '%s' failed.",
                        mycfg['agent_command'])

        shcfgxml = os.path.join(mycfg['data_dir'], "SharedConfig.xml")
        wait_for = [shcfgxml]

        fp_files = []
        for pk in self.cfg.get('_pubkeys', []):
            bname = str(pk['fingerprint'] + ".crt")
            fp_files += [os.path.join(mycfg['data_dir'], bname)]

        missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
                                func=wait_for_files,
                                args=(wait_for + fp_files,))
        if len(missing):
            LOG.warn("Did not find files, but going on: %s", missing)

        if shcfgxml in missing:
            LOG.warn("SharedConfig.xml missing, using static instance-id")
        else:
            try:
                self.metadata['instance-id'] = iid_from_shared_config(shcfgxml)
            except ValueError as e:
                LOG.warn("failed to get instance id in %s: %s" % (shcfgxml, e))

        pubkeys = pubkeys_from_crt_files(fp_files)

        self.metadata['public-keys'] = pubkeys
        return True
Ejemplo n.º 38
0
    def test_already_mounted_does_not_mount_or_umount_anything(
            self, m_subp, already_mounted_device):
        util.mount_cb(already_mounted_device, mock.Mock())

        assert 0 == m_subp.call_count
Ejemplo n.º 39
0
    def get_data(self):
        # azure removes/ejects the cdrom containing the ovf-env.xml
        # file on reboot.  So, in order to successfully reboot we
        # need to look in the datadir and consider that valid
        ddir = self.ds_cfg['data_dir']

        candidates = [self.seed_dir]
        candidates.extend(list_possible_azure_ds_devs())
        if ddir:
            candidates.append(ddir)

        found = None

        for cdev in candidates:
            try:
                if cdev.startswith("/dev/"):
                    ret = util.mount_cb(cdev, load_azure_ds_dir)
                else:
                    ret = load_azure_ds_dir(cdev)

            except NonAzureDataSource:
                continue
            except BrokenAzureDataSource as exc:
                raise exc
            except util.MountFailedError:
                LOG.warn("%s was not mountable", cdev)
                continue

            (md, self.userdata_raw, cfg, files) = ret
            self.seed = cdev
            self.metadata = util.mergemanydict([md, DEFAULT_METADATA])
            self.cfg = util.mergemanydict([cfg, BUILTIN_CLOUD_CONFIG])
            found = cdev

            LOG.debug("found datasource in %s", cdev)
            break

        if not found:
            return False

        if found == ddir:
            LOG.debug("using files cached in %s", ddir)

        # azure / hyper-v provides random data here
        seed = util.load_file("/sys/firmware/acpi/tables/OEM0",
                              quiet=True,
                              decode=False)
        if seed:
            self.metadata['random_seed'] = seed

        # now update ds_cfg to reflect contents pass in config
        user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
        self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])

        # walinux agent writes files world readable, but expects
        # the directory to be protected.
        write_files(ddir, files, dirmode=0o700)

        if self.ds_cfg['agent_command'] == AGENT_START_BUILTIN:
            metadata_func = partial(
                get_metadata_from_fabric,
                fallback_lease_file=self.dhclient_lease_file)
        else:
            metadata_func = self.get_metadata_from_agent

        try:
            fabric_data = metadata_func()
        except Exception as exc:
            LOG.info(
                "Error communicating with Azure fabric; assume we aren't"
                " on Azure.",
                exc_info=True)
            return False
        self.metadata['instance-id'] = util.read_dmi_data('system-uuid')
        self.metadata.update(fabric_data)

        return True
Ejemplo n.º 40
0
    def get_data(self):
        defaults = {
            "instance-id": "nocloud",
            "dsmode": self.dsmode,
        }

        found = []
        md = {}
        ud = ""

        try:
            # Parse the kernel command line, getting data passed in
            if parse_cmdline_data(self.cmdline_id, md):
                found.append("cmdline")
        except:
            util.logexc(LOG, "Unable to parse command line data")
            return False

        # Check to see if the seed dir has data.
        seedret = {}
        if util.read_optional_seed(seedret, base=self.seed_dir + "/"):
            md = util.mergemanydict([md, seedret['meta-data']])
            ud = seedret['user-data']
            found.append(self.seed_dir)
            LOG.debug("Using seeded cache data from %s", self.seed_dir)

        # If the datasource config had a 'seedfrom' entry, then that takes
        # precedence over a 'seedfrom' that was found in a filesystem
        # but not over external media
        if 'seedfrom' in self.ds_cfg and self.ds_cfg['seedfrom']:
            found.append("ds_config")
            md["seedfrom"] = self.ds_cfg['seedfrom']

        # if ds_cfg has 'user-data' and 'meta-data'
        if 'user-data' in self.ds_cfg and 'meta-data' in self.ds_cfg:
            if self.ds_cfg['user-data']:
                ud = self.ds_cfg['user-data']
            if self.ds_cfg['meta-data'] is not False:
                md = util.mergemanydict([md, self.ds_cfg['meta-data']])
            if 'ds_config' not in found:
                found.append("ds_config")

        label = self.ds_cfg.get('fs_label', "cidata")
        if label is not None:
            # Query optical drive to get it in blkid cache for 2.6 kernels
            util.find_devs_with(path="/dev/sr0")
            util.find_devs_with(path="/dev/sr1")

            fslist = util.find_devs_with("TYPE=vfat")
            fslist.extend(util.find_devs_with("TYPE=iso9660"))

            label_list = util.find_devs_with("LABEL=%s" % label)
            devlist = list(set(fslist) & set(label_list))
            devlist.sort(reverse=True)

            for dev in devlist:
                try:
                    LOG.debug("Attempting to use data from %s", dev)

                    (newmd, newud) = util.mount_cb(dev, util.read_seeded)
                    md = util.mergemanydict([newmd, md])
                    ud = newud

                    # For seed from a device, the default mode is 'net'.
                    # that is more likely to be what is desired.  If they want
                    # dsmode of local, then they must specify that.
                    if 'dsmode' not in md:
                        md['dsmode'] = "net"

                    LOG.debug("Using data from %s", dev)
                    found.append(dev)
                    break
                except OSError as e:
                    if e.errno != errno.ENOENT:
                        raise
                except util.MountFailedError:
                    util.logexc(LOG, "Failed to mount %s when looking for "
                                "data", dev)

        # There was no indication on kernel cmdline or data
        # in the seeddir suggesting this handler should be used.
        if len(found) == 0:
            return False

        seeded_interfaces = None

        # The special argument "seedfrom" indicates we should
        # attempt to seed the userdata / metadata from its value
        # its primarily value is in allowing the user to type less
        # on the command line, ie: ds=nocloud;s=http://bit.ly/abcdefg
        if "seedfrom" in md:
            seedfrom = md["seedfrom"]
            seedfound = False
            for proto in self.supported_seed_starts:
                if seedfrom.startswith(proto):
                    seedfound = proto
                    break
            if not seedfound:
                LOG.debug("Seed from %s not supported by %s", seedfrom, self)
                return False

            if 'network-interfaces' in md:
                seeded_interfaces = self.dsmode

            # This could throw errors, but the user told us to do it
            # so if errors are raised, let them raise
            (md_seed, ud) = util.read_seeded(seedfrom, timeout=None)
            LOG.debug("Using seeded cache data from %s", seedfrom)

            # Values in the command line override those from the seed
            md = util.mergemanydict([md, md_seed])
            found.append(seedfrom)

        # Now that we have exhausted any other places merge in the defaults
        md = util.mergemanydict([md, defaults])

        # Update the network-interfaces if metadata had 'network-interfaces'
        # entry and this is the local datasource, or 'seedfrom' was used
        # and the source of the seed was self.dsmode
        # ('local' for NoCloud, 'net' for NoCloudNet')
        if ('network-interfaces' in md and
            (self.dsmode in ("local", seeded_interfaces))):
            LOG.debug("Updating network interfaces from %s", self)
            self.distro.apply_network(md['network-interfaces'])

        if md['dsmode'] == self.dsmode:
            self.seed = ",".join(found)
            self.metadata = md
            self.userdata_raw = ud
            return True

        LOG.debug("%s: not claiming datasource, dsmode=%s", self, md['dsmode'])
        return False
Ejemplo n.º 41
0
    def get_data(self):
        # azure removes/ejects the cdrom containing the ovf-env.xml
        # file on reboot.  So, in order to successfully reboot we
        # need to look in the datadir and consider that valid
        ddir = self.ds_cfg['data_dir']

        candidates = [self.seed_dir]
        candidates.extend(list_possible_azure_ds_devs())
        if ddir:
            candidates.append(ddir)

        found = None

        for cdev in candidates:
            try:
                if cdev.startswith("/dev/"):
                    ret = util.mount_cb(cdev, load_azure_ds_dir)
                else:
                    ret = load_azure_ds_dir(cdev)

            except NonAzureDataSource:
                continue
            except BrokenAzureDataSource as exc:
                raise exc
            except util.MountFailedError:
                LOG.warn("%s was not mountable", cdev)
                continue

            (md, self.userdata_raw, cfg, files) = ret
            self.seed = cdev
            self.metadata = util.mergemanydict([md, IB_DEFAULT_METADATA])
            self.cfg = util.mergemanydict([cfg, IB_BUILTIN_CLOUD_CONFIG])
            found = cdev

            LOG.debug("found datasource in %s", cdev)
            break

        if not found:
            return False

        if found == ddir:
            LOG.debug("using files cached in %s", ddir)

        # azure / hyper-v provides random data here
        seed = util.load_file("/sys/firmware/acpi/tables/OEM0", quiet=True)
        if seed:
            self.metadata['random_seed'] = seed

        # now update ds_cfg to reflect contents pass in config
        user_ds_cfg = util.get_cfg_by_path(self.cfg, IB_DS_CFG_PATH, {})
        self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])
        mycfg = self.ds_cfg
        ddir = mycfg['data_dir']

        if found != ddir:
            cached_ovfenv = util.load_file(os.path.join(ddir, 'ovf-env.xml'),
                                           quiet=True)
            if cached_ovfenv != files['ovf-env.xml']:
                # source was not walinux-agent's datadir, so we have to clean
                # up so 'wait_for_files' doesn't return early due to stale data
                cleaned = []
                for f in [
                        os.path.join(ddir, f) for f in IB_DATA_DIR_CLEAN_LIST
                ]:
                    if os.path.exists(f):
                        util.del_file(f)
                        cleaned.append(f)
                if cleaned:
                    LOG.info("removed stale file(s) in '%s': %s", ddir,
                             str(cleaned))

        # walinux agent writes files world readable, but expects
        # the directory to be protected.
        write_files(ddir, files, dirmode=0700)

        # handle the hostname 'publishing'
        try:
            handle_set_hostname(mycfg.get('set_hostname'),
                                self.metadata.get('local-hostname'),
                                mycfg['hostname_bounce'])
        except Exception as e:
            LOG.warn("Failed publishing hostname: %s", e)
            util.logexc(LOG, "handling set_hostname failed")

        try:
            invoke_agent(mycfg['agent_command'])
        except util.ProcessExecutionError:
            # claim the datasource even if the command failed
            util.logexc(LOG, "agent command '%s' failed.",
                        mycfg['agent_command'])

        # code is commented out because we have no WAAgent and
        #   we do not use SSH keys. Thus, no files to wait for.

        # shcfgxml = os.path.join(ddir, "SharedConfig.xml")
        # wait_for = [shcfgxml]

        # fp_files = []
        # for pk in self.cfg.get('_pubkeys', []):
        #     bname = str(pk['fingerprint'] + ".crt")
        #     fp_files += [os.path.join(ddir, bname)]

        # missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
        #                         func=wait_for_files,
        #                         args=(wait_for + fp_files,))
        # if len(missing):
        #     LOG.warn("Did not find files, but going on: %s", missing)

        # if shcfgxml in missing:
        #     LOG.warn("SharedConfig.xml missing, using static instance-id")
        # else:
        #     try:
        #         self.metadata['instance-id'] = \
        #             iid_from_shared_config(shcfgxml)
        #     except ValueError as e:
        #         LOG.warn("failed to get instance id in %s: %s", shcfgxml, e)

        # pubkeys = pubkeys_from_crt_files(fp_files)
        # self.metadata['public-keys'] = pubkeys

        found_ephemeral = find_ephemeral_disk()
        if found_ephemeral:
            self.ds_cfg['disk_aliases']['ephemeral0'] = found_ephemeral
            LOG.debug("using detected ephemeral0 of %s", found_ephemeral)

        cc_modules_override = support_new_ephemeral(self.sys_cfg)
        if cc_modules_override:
            self.cfg['cloud_config_modules'] = cc_modules_override

        return True
Ejemplo n.º 42
0
    def get_data(self):
        # azure removes/ejects the cdrom containing the ovf-env.xml
        # file on reboot.  So, in order to successfully reboot we
        # need to look in the datadir and consider that valid
        ddir = self.ds_cfg['data_dir']

        candidates = [self.seed_dir]
        candidates.extend(list_possible_azure_ds_devs())
        if ddir:
            candidates.append(ddir)

        found = None

        for cdev in candidates:
            try:
                if cdev.startswith("/dev/"):
                    ret = util.mount_cb(cdev, load_azure_ds_dir)
                else:
                    ret = load_azure_ds_dir(cdev)

            except NonAzureDataSource:
                continue
            except BrokenAzureDataSource as exc:
                raise exc
            except util.MountFailedError:
                LOG.warn("%s was not mountable" % cdev)
                continue

            (md, self.userdata_raw, cfg, files) = ret
            self.seed = cdev
            self.metadata = util.mergemanydict([md, DEFAULT_METADATA])
            self.cfg = cfg
            found = cdev

            LOG.debug("found datasource in %s", cdev)
            break

        if not found:
            return False

        if found == ddir:
            LOG.debug("using files cached in %s", ddir)

        # azure / hyper-v provides random data here
        seed = util.load_file("/sys/firmware/acpi/tables/OEM0", quiet=True)
        if seed:
            self.metadata['random_seed'] = seed

        # now update ds_cfg to reflect contents pass in config
        usercfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
        self.ds_cfg = util.mergemanydict([usercfg, self.ds_cfg])
        mycfg = self.ds_cfg

        # walinux agent writes files world readable, but expects
        # the directory to be protected.
        write_files(mycfg['data_dir'], files, dirmode=0700)

        # handle the hostname 'publishing'
        try:
            handle_set_hostname(mycfg.get('set_hostname'),
                                self.metadata.get('local-hostname'),
                                mycfg['hostname_bounce'])
        except Exception as e:
            LOG.warn("Failed publishing hostname: %s" % e)
            util.logexc(LOG, "handling set_hostname failed")

        try:
            invoke_agent(mycfg['agent_command'])
        except util.ProcessExecutionError:
            # claim the datasource even if the command failed
            util.logexc(LOG, "agent command '%s' failed.",
                        mycfg['agent_command'])

        shcfgxml = os.path.join(mycfg['data_dir'], "SharedConfig.xml")
        wait_for = [shcfgxml]

        fp_files = []
        for pk in self.cfg.get('_pubkeys', []):
            bname = pk['fingerprint'] + ".crt"
            fp_files += [os.path.join(mycfg['data_dir'], bname)]

        missing = util.log_time(logfunc=LOG.debug,
                                msg="waiting for files",
                                func=wait_for_files,
                                args=(wait_for + fp_files, ))
        if len(missing):
            LOG.warn("Did not find files, but going on: %s", missing)

        if shcfgxml in missing:
            LOG.warn("SharedConfig.xml missing, using static instance-id")
        else:
            try:
                self.metadata['instance-id'] = iid_from_shared_config(shcfgxml)
            except ValueError as e:
                LOG.warn("failed to get instance id in %s: %s" % (shcfgxml, e))

        pubkeys = pubkeys_from_crt_files(fp_files)

        self.metadata['public-keys'] = pubkeys

        return True
    def user_data_rhevm(self):
        '''
        RHEVM specific userdata read

         If on RHEV-M the user data will be contained on the
         floppy device in file <user_data_file>
         To access it:
           modprobe floppy

           Leverage util.mount_cb to:
               mkdir <tmp mount dir>
               mount /dev/fd0 <tmp mount dir>
               The call back passed to util.mount_cb will do:
                   read <tmp mount dir>/<user_data_file>
        '''

        return_str = None

        # modprobe floppy
        try:
            cmd = CMD_PROBE_FLOPPY
            (cmd_out, _err) = util.subp(cmd)
            LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out))
        except ProcessExecutionError as _err:
            util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
                        _err.message)
            return False
        except OSError as _err:
            util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
            return False

        floppy_dev = '/dev/fd0'

        # udevadm settle for floppy device
        try:
            cmd = CMD_UDEVADM_SETTLE
            cmd.append('--exit-if-exists=' + floppy_dev)
            (cmd_out, _err) = util.subp(cmd)
            LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out))
        except ProcessExecutionError as _err:
            util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
                        _err.message)
            return False
        except OSError as _err:
            util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
                        _err.message)
            return False

        try:
            return_str = util.mount_cb(floppy_dev, read_user_data_callback)
        except OSError as err:
            if err.errno != errno.ENOENT:
                raise
        except util.MountFailedError:
            util.logexc(LOG, "Failed to mount %s when looking for user data",
                        floppy_dev)

        self.userdata_raw = return_str
        self.metadata = META_DATA_NOT_SUPPORTED

        if return_str:
            return True
        else:
            return False
Ejemplo n.º 44
0
    def get_data(self):
        defaults = {
            "instance-id": "nocloud",
            "dsmode": self.dsmode,
        }

        found = []
        mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': ""}

        try:
            # Parse the kernel command line, getting data passed in
            md = {}
            if parse_cmdline_data(self.cmdline_id, md):
                found.append("cmdline")
            mydata['meta-data'].update(md)
        except:
            util.logexc(LOG, "Unable to parse command line data")
            return False

        # Check to see if the seed dir has data.
        pp2d_kwargs = {'required': ['user-data', 'meta-data'],
                       'optional': ['vendor-data']}

        try:
            seeded = util.pathprefix2dict(self.seed_dir, **pp2d_kwargs)
            found.append(self.seed_dir)
            LOG.debug("Using seeded data from %s", self.seed_dir)
        except ValueError as e:
            pass

        if self.seed_dir in found:
            mydata = _merge_new_seed(mydata, seeded)

        # If the datasource config had a 'seedfrom' entry, then that takes
        # precedence over a 'seedfrom' that was found in a filesystem
        # but not over external media
        if self.ds_cfg.get('seedfrom'):
            found.append("ds_config_seedfrom")
            mydata['meta-data']["seedfrom"] = self.ds_cfg['seedfrom']

        # fields appropriately named can also just come from the datasource
        # config (ie, 'user-data', 'meta-data', 'vendor-data' there)
        if 'user-data' in self.ds_cfg and 'meta-data' in self.ds_cfg:
            mydata = _merge_new_seed(mydata, self.ds_cfg)
            found.append("ds_config")

        def _pp2d_callback(mp, data):
            return util.pathprefix2dict(mp, **data)

        label = self.ds_cfg.get('fs_label', "cidata")
        if label is not None:
            # Query optical drive to get it in blkid cache for 2.6 kernels
            util.find_devs_with(path="/dev/sr0")
            util.find_devs_with(path="/dev/sr1")

            fslist = util.find_devs_with("TYPE=vfat")
            fslist.extend(util.find_devs_with("TYPE=iso9660"))

            label_list = util.find_devs_with("LABEL=%s" % label)
            devlist = list(set(fslist) & set(label_list))
            devlist.sort(reverse=True)

            for dev in devlist:
                try:
                    LOG.debug("Attempting to use data from %s", dev)

                    try:
                        seeded = util.mount_cb(dev, _pp2d_callback,
                                               pp2d_kwargs)
                    except ValueError as e:
                        if dev in label_list:
                            LOG.warn("device %s with label=%s not a"
                                     "valid seed.", dev, label)
                        continue

                    mydata = _merge_new_seed(mydata, seeded)

                    # For seed from a device, the default mode is 'net'.
                    # that is more likely to be what is desired.  If they want
                    # dsmode of local, then they must specify that.
                    if 'dsmode' not in mydata['meta-data']:
                        mydata['dsmode'] = "net"

                    LOG.debug("Using data from %s", dev)
                    found.append(dev)
                    break
                except OSError as e:
                    if e.errno != errno.ENOENT:
                        raise
                except util.MountFailedError:
                    util.logexc(LOG, "Failed to mount %s when looking for "
                                "data", dev)

        # There was no indication on kernel cmdline or data
        # in the seeddir suggesting this handler should be used.
        if len(found) == 0:
            return False

        seeded_interfaces = None

        # The special argument "seedfrom" indicates we should
        # attempt to seed the userdata / metadata from its value
        # its primarily value is in allowing the user to type less
        # on the command line, ie: ds=nocloud;s=http://bit.ly/abcdefg
        if "seedfrom" in mydata['meta-data']:
            seedfrom = mydata['meta-data']["seedfrom"]
            seedfound = False
            for proto in self.supported_seed_starts:
                if seedfrom.startswith(proto):
                    seedfound = proto
                    break
            if not seedfound:
                LOG.debug("Seed from %s not supported by %s", seedfrom, self)
                return False

            if 'network-interfaces' in mydata['meta-data']:
                seeded_interfaces = self.dsmode

            # This could throw errors, but the user told us to do it
            # so if errors are raised, let them raise
            (md_seed, ud) = util.read_seeded(seedfrom, timeout=None)
            LOG.debug("Using seeded cache data from %s", seedfrom)

            # Values in the command line override those from the seed
            mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
                                                      md_seed])
            mydata['user-data'] = ud
            found.append(seedfrom)

        # Now that we have exhausted any other places merge in the defaults
        mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
                                                  defaults])

        # Update the network-interfaces if metadata had 'network-interfaces'
        # entry and this is the local datasource, or 'seedfrom' was used
        # and the source of the seed was self.dsmode
        # ('local' for NoCloud, 'net' for NoCloudNet')
        if ('network-interfaces' in mydata['meta-data'] and
                (self.dsmode in ("local", seeded_interfaces))):
            LOG.debug("Updating network interfaces from %s", self)
            self.distro.apply_network(
                mydata['meta-data']['network-interfaces'])

        if mydata['meta-data']['dsmode'] == self.dsmode:
            self.seed = ",".join(found)
            self.metadata = mydata['meta-data']
            self.userdata_raw = mydata['user-data']
            self.vendordata = mydata['vendor-data']
            return True

        LOG.debug("%s: not claiming datasource, dsmode=%s", self, md['dsmode'])
        return False
Ejemplo n.º 45
0
    def _get_data(self):
        found = None
        md = {}
        results = {}
        for sdir in (self.seed_dir, "/config-drive"):
            if not os.path.isdir(sdir):
                continue
            try:
                results = read_config_drive(sdir)
                found = sdir
                break
            except openstack.NonReadable:
                util.logexc(LOG, "Failed reading config drive from %s", sdir)

        if not found:
            dslist = self.sys_cfg.get('datasource_list')
            for dev in find_candidate_devs(dslist=dslist):
                try:
                    # Set mtype if freebsd and turn off sync
                    if dev.startswith("/dev/cd"):
                        mtype = "cd9660"
                        sync = False
                    else:
                        mtype = None
                        sync = True
                    results = util.mount_cb(dev,
                                            read_config_drive,
                                            mtype=mtype,
                                            sync=sync)
                    found = dev
                except openstack.NonReadable:
                    pass
                except util.MountFailedError:
                    pass
                except openstack.BrokenMetadata:
                    util.logexc(LOG, "Broken config drive: %s", dev)
                if found:
                    break
        if not found:
            return False

        md = results.get('metadata', {})
        md = util.mergemanydict([md, DEFAULT_METADATA])

        self.dsmode = self._determine_dsmode([
            results.get('dsmode'),
            self.ds_cfg.get('dsmode'),
            sources.DSMODE_PASS if results['version'] == 1 else None
        ])

        if self.dsmode == sources.DSMODE_DISABLED:
            return False

        prev_iid = get_previous_iid(self.paths)
        cur_iid = md['instance-id']
        if prev_iid != cur_iid:
            # better would be to handle this centrally, allowing
            # the datasource to do something on new instance id
            # note, networking is only rendered here if dsmode is DSMODE_PASS
            # which means "DISABLED, but render files and networking"
            on_first_boot(results,
                          distro=self.distro,
                          network=self.dsmode == sources.DSMODE_PASS)

        # This is legacy and sneaky.  If dsmode is 'pass' then do not claim
        # the datasource was used, even though we did run on_first_boot above.
        if self.dsmode == sources.DSMODE_PASS:
            LOG.debug("%s: not claiming datasource, dsmode=%s", self,
                      self.dsmode)
            return False

        self.source = found
        self.metadata = md
        self.ec2_metadata = results.get('ec2-metadata')
        self.userdata_raw = results.get('userdata')
        self.version = results['version']
        self.files.update(results.get('files', {}))

        vd = results.get('vendordata')
        self.vendordata_pure = vd
        try:
            self.vendordata_raw = sources.convert_vendordata(vd)
        except ValueError as e:
            LOG.warning("Invalid content in vendor-data: %s", e)
            self.vendordata_raw = None

        # network_config is an /etc/network/interfaces formated file and is
        # obsolete compared to networkdata (from network_data.json) but both
        # might be present.
        self.network_eni = results.get("network_config")
        self.network_json = results.get('networkdata')
        return True
    def get_data(self):
        found = None
        md = {}
        results = {}
        if os.path.isdir(self.seed_dir):
            try:
                results = read_config_drive(self.seed_dir)
                found = self.seed_dir
            except openstack.NonReadable:
                util.logexc(LOG, "Failed reading config drive from %s",
                            self.seed_dir)
        if not found:
            for dev in find_candidate_devs():
                try:
                    results = util.mount_cb(dev, read_config_drive)
                    found = dev
                except openstack.NonReadable:
                    pass
                except util.MountFailedError:
                    pass
                except openstack.BrokenMetadata:
                    util.logexc(LOG, "Broken config drive: %s", dev)
                if found:
                    break
        if not found:
            return False

        md = results.get('metadata', {})
        md = util.mergemanydict([md, DEFAULT_METADATA])
        user_dsmode = results.get('dsmode', None)
        if user_dsmode not in VALID_DSMODES + (None,):
            LOG.warn("User specified invalid mode: %s", user_dsmode)
            user_dsmode = None

        dsmode = get_ds_mode(cfgdrv_ver=results['version'],
                             ds_cfg=self.ds_cfg.get('dsmode'),
                             user=user_dsmode)

        if dsmode == "disabled":
            # most likely user specified
            return False

        # TODO(smoser): fix this, its dirty.
        # we want to do some things (writing files and network config)
        # only on first boot, and even then, we want to do so in the
        # local datasource (so they happen earlier) even if the configured
        # dsmode is 'net' or 'pass'. To do this, we check the previous
        # instance-id
        prev_iid = get_previous_iid(self.paths)
        cur_iid = md['instance-id']
        if prev_iid != cur_iid and self.dsmode == "local":
            on_first_boot(results, distro=self.distro)

        # dsmode != self.dsmode here if:
        #  * dsmode = "pass",  pass means it should only copy files and then
        #    pass to another datasource
        #  * dsmode = "net" and self.dsmode = "local"
        #    so that user boothooks would be applied with network, the
        #    local datasource just gets out of the way, and lets the net claim
        if dsmode != self.dsmode:
            LOG.debug("%s: not claiming datasource, dsmode=%s", self, dsmode)
            return False

        self.source = found
        self.metadata = md
        self.ec2_metadata = results.get('ec2-metadata')
        self.userdata_raw = results.get('userdata')
        self.version = results['version']
        self.files.update(results.get('files', {}))

        vd = results.get('vendordata')
        self.vendordata_pure = vd
        try:
            self.vendordata_raw = openstack.convert_vendordata_json(vd)
        except ValueError as e:
            LOG.warn("Invalid content in vendor-data: %s", e)
            self.vendordata_raw = None

        return True
Ejemplo n.º 47
0
    def _get_data(self):
        # azure removes/ejects the cdrom containing the ovf-env.xml
        # file on reboot.  So, in order to successfully reboot we
        # need to look in the datadir and consider that valid
        asset_tag = util.read_dmi_data('chassis-asset-tag')
        if asset_tag != AZURE_CHASSIS_ASSET_TAG:
            LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag)
            return False

        ddir = self.ds_cfg['data_dir']

        candidates = [self.seed_dir]
        if os.path.isfile(REPROVISION_MARKER_FILE):
            candidates.insert(0, "IMDS")
        candidates.extend(list_possible_azure_ds_devs())
        if ddir:
            candidates.append(ddir)

        found = None
        reprovision = False
        for cdev in candidates:
            try:
                if cdev == "IMDS":
                    ret = None
                    reprovision = True
                elif cdev.startswith("/dev/"):
                    if util.is_FreeBSD():
                        ret = util.mount_cb(cdev,
                                            load_azure_ds_dir,
                                            mtype="udf",
                                            sync=False)
                    else:
                        ret = util.mount_cb(cdev, load_azure_ds_dir)
                else:
                    ret = load_azure_ds_dir(cdev)

            except NonAzureDataSource:
                continue
            except BrokenAzureDataSource as exc:
                raise exc
            except util.MountFailedError:
                LOG.warning("%s was not mountable", cdev)
                continue

            if reprovision or self._should_reprovision(ret):
                ret = self._reprovision()
            (md, self.userdata_raw, cfg, files) = ret
            self.seed = cdev
            self.metadata = util.mergemanydict([md, DEFAULT_METADATA])
            self.cfg = util.mergemanydict([cfg, BUILTIN_CLOUD_CONFIG])
            found = cdev

            LOG.debug("found datasource in %s", cdev)
            break

        if not found:
            return False

        if found == ddir:
            LOG.debug("using files cached in %s", ddir)

        # azure / hyper-v provides random data here
        # TODO. find the seed on FreeBSD platform
        # now update ds_cfg to reflect contents pass in config
        if not util.is_FreeBSD():
            seed = util.load_file("/sys/firmware/acpi/tables/OEM0",
                                  quiet=True,
                                  decode=False)
            if seed:
                self.metadata['random_seed'] = seed

        user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
        self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])

        # walinux agent writes files world readable, but expects
        # the directory to be protected.
        write_files(ddir, files, dirmode=0o700)

        self.metadata['instance-id'] = util.read_dmi_data('system-uuid')

        return True