Exemple #1
0
def get_flag_mapping(flag_name, fs_family, param=None, strict=False):
    ret = []
    release = distro.lsb_release()['codename']
    overrides = release_flag_mapping_overrides.get(release, {})
    if flag_name in overrides and fs_family in overrides[flag_name]:
        flag_sym = overrides[flag_name][fs_family]
    else:
        flag_sym_families = family_flag_mappings.get(flag_name)
        if flag_sym_families is None:
            raise ValueError("unsupported flag '%s'" % flag_name)
        flag_sym = flag_sym_families.get(fs_family)

    if flag_sym is None:
        if strict:
            raise ValueError(
                "flag '%s' not supported by fs family '%s'" % flag_name,
                fs_family)
        else:
            return ret

    if param is None:
        ret.append(flag_sym)
    else:
        params = [k.format(**{flag_name: param}) for k in flag_sym]
        if list(params) == list(flag_sym):
            raise ValueError("Param %s not used for flag_name=%s and "
                             "fs_family=%s." % (param, flag_name, fs_family))

        ret.extend(params)
    return ret
Exemple #2
0
def lvm_scan(activate=True, multipath=False):
    """
    run full scan for volgroups, logical volumes and physical volumes
    """
    # prior to xenial, lvmetad is not packaged, so even if a tool supports
    # flag --cache it has no effect. In Xenial and newer the --cache flag is
    # used (if lvmetad is running) to ensure that the data cached by
    # lvmetad is updated.

    # before appending the cache flag though, check if lvmetad is running. this
    # ensures that we do the right thing even if lvmetad is supported but is
    # not running
    release = distro.lsb_release().get('codename')
    if release in [None, 'UNAVAILABLE']:
        LOG.warning('unable to find release number, assuming xenial or later')
        release = 'xenial'

    if multipath:
        # only operate on mp devices
        mponly = 'devices{ filter = [ "a|/dev/mapper/mpath.*|", "r|.*|" ] }'

    for cmd in [['pvscan'], ['vgscan']]:
        if release != 'precise' and lvmetad_running():
            cmd.append('--cache')
        if multipath:
            cmd.extend(['--config', mponly])
        util.subp(cmd, capture=True)
Exemple #3
0
def get_config_v4(
    template_name: str,
    global_dhcp_snippets: Sequence[dict],
    failover_peers: Sequence[dict],
    shared_networks: Sequence[dict],
    hosts: Sequence[dict],
    omapi_key: str,
) -> str:
    """Return a DHCP config file based on the supplied parameters.

    :param template_name: Template file name: `dhcpd.conf.template` for the
        IPv4 template.
    :return: A full configuration, as a string.
    """
    platform_codename = lsb_release()["codename"]
    template = load_template("dhcp", template_name)
    dhcp_socket = get_maas_data_path("dhcpd.sock")

    # Helper functions to stuff into the template namespace.
    helpers = {
        "oneline": normalise_whitespace,
        "commalist": normalise_any_iterable_to_comma_list,
        "quoted_commalist": normalise_any_iterable_to_quoted_comma_list,
        "running_in_snap": snappy.running_in_snap(),
    }

    for shared_network in shared_networks:
        interface = shared_network.get("interface", None)
        for subnet in shared_network["subnets"]:
            rack_ip = get_rack_ip_for_subnet(
                4, subnet["subnet_cidr"], interface
            )
            if rack_ip is not None:
                subnet["next_server"] = rack_ip
                subnet["bootloader"] = compose_conditional_bootloader(
                    False, rack_ip
                )
            ntp_servers = subnet["ntp_servers"]  # Is a list.
            ntp_servers_ipv4, ntp_servers_ipv6 = _get_addresses(*ntp_servers)
            subnet["ntp_servers_ipv4"] = ", ".join(ntp_servers_ipv4)
            subnet["ntp_servers_ipv6"] = ", ".join(ntp_servers_ipv6)

    try:
        return template.substitute(
            global_dhcp_snippets=global_dhcp_snippets,
            hosts=hosts,
            failover_peers=failover_peers,
            shared_networks=shared_networks,
            platform_codename=platform_codename,
            omapi_key=omapi_key,
            dhcp_helper=(get_path("/usr/sbin/maas-dhcp-helper")),
            dhcp_socket=dhcp_socket,
            **helpers
        )
    except (KeyError, NameError) as error:
        raise DHCPConfigError(
            "Failed to render DHCP configuration."
        ) from error
Exemple #4
0
 def _get_default_params():
     """ get_default_params
     Get the most basic default mrror and release info to be used in tests
     """
     params = {}
     params['RELEASE'] = distro.lsb_release()['codename']
     arch = util.get_architecture()
     params['MIRROR'] = apt_config.get_default_mirrors(arch)["PRIMARY"]
     return params
Exemple #5
0
    def test_lsb_release_unavailable(self, mock_subp):
        def doraise(*args, **kwargs):
            raise util.ProcessExecutionError("foo")

        mock_subp.side_effect = doraise

        expected = {
            k: "UNAVAILABLE"
            for k in ('id', 'description', 'codename', 'release')
        }
        self.assertEqual(distro.lsb_release(), expected)
Exemple #6
0
def get_config_v6(
    template_name: str,
    global_dhcp_snippets: Sequence[dict],
    failover_peers: Sequence[dict],
    shared_networks: Sequence[dict],
    hosts: Sequence[dict],
    omapi_key: str,
) -> str:
    """Return a DHCP config file based on the supplied parameters.

    :param template_name: Template file name: `dhcpd6.conf.template` for the
        IPv6 template.
    :return: A full configuration, as a string.
    """
    platform_codename = lsb_release()["codename"]
    template = load_template("dhcp", template_name)
    # Helper functions to stuff into the template namespace.
    helpers = {
        "oneline": normalise_whitespace,
        "commalist": normalise_any_iterable_to_comma_list,
        "quoted_commalist": normalise_any_iterable_to_quoted_comma_list,
        "running_in_snap": snappy.running_in_snap(),
    }

    shared_networks = _process_network_parameters_v6(
        failover_peers, shared_networks
    )

    try:
        return template.substitute(
            global_dhcp_snippets=global_dhcp_snippets,
            hosts=hosts,
            failover_peers=failover_peers,
            shared_networks=shared_networks,
            platform_codename=platform_codename,
            omapi_key=omapi_key,
            **helpers
        )
    except (KeyError, NameError) as error:
        raise DHCPConfigError(
            "Failed to render DHCP configuration."
        ) from error
Exemple #7
0
def lvm_scan(activate=True):
    """
    run full scan for volgroups, logical volumes and physical volumes
    """
    # prior to xenial, lvmetad is not packaged, so even if a tool supports
    # flag --cache it has no effect. In Xenial and newer the --cache flag is
    # used (if lvmetad is running) to ensure that the data cached by
    # lvmetad is updated.

    # before appending the cache flag though, check if lvmetad is running. this
    # ensures that we do the right thing even if lvmetad is supported but is
    # not running
    release = distro.lsb_release().get('codename')
    if release in [None, 'UNAVAILABLE']:
        LOG.warning('unable to find release number, assuming xenial or later')
        release = 'xenial'

    for cmd in [['pvscan'], ['vgscan', '--mknodes']]:
        if release != 'precise' and lvmetad_running():
            cmd.append('--cache')
        util.subp(cmd, capture=True)
Exemple #8
0
def handle_apt(cfg, target=None):
    """ handle_apt
        process the config for apt_config. This can be called from
        curthooks if a global apt config was provided or via the "apt"
        standalone command.
    """
    release = distro.lsb_release(target=target)['codename']
    arch = distro.get_architecture(target)
    mirrors = find_apt_mirror_info(cfg, arch)
    LOG.debug("Apt Mirror info: %s", mirrors)

    apply_debconf_selections(cfg, target)

    if not config.value_as_boolean(cfg.get('preserve_sources_list', True)):
        generate_sources_list(cfg, release, mirrors, target)
        apply_preserve_sources_list(target)
        rename_apt_lists(mirrors, target)

    try:
        apply_apt_proxy_config(cfg, target + APT_PROXY_FN,
                               target + APT_CONFIG_FN)
    except (IOError, OSError):
        LOG.exception("Failed to apply proxy or apt config info:")

    # Process 'apt_source -> sources {dict}'
    if 'sources' in cfg:
        params = mirrors
        params['RELEASE'] = release
        params['MIRROR'] = mirrors["MIRROR"]

        matcher = None
        matchcfg = cfg.get('add_apt_repo_match', ADD_APT_REPO_MATCH)
        if matchcfg:
            matcher = re.compile(matchcfg).search

        add_apt_sources(cfg['sources'],
                        target,
                        template_params=params,
                        aa_repo_match=matcher)
Exemple #9
0
def zfs_assert_supported():
    """ Determine if the runtime system supports zfs.
    returns: True if system supports zfs
    raises: RuntimeError: if system does not support zfs
    """
    arch = util.get_platform_arch()
    if arch in ZFS_UNSUPPORTED_ARCHES:
        raise RuntimeError("zfs is not supported on architecture: %s" % arch)

    release = distro.lsb_release()['codename']
    if release in ZFS_UNSUPPORTED_RELEASES:
        raise RuntimeError("zfs is not supported on release: %s" % release)

    if 'zfs' not in get_supported_filesystems():
        try:
            util.load_kernel_module('zfs')
        except util.ProcessExecutionError as err:
            raise RuntimeError("Failed to load 'zfs' kernel module: %s" % err)

    missing_progs = [p for p in ('zpool', 'zfs') if not util.which(p)]
    if missing_progs:
        raise RuntimeError("Missing zfs utils: %s" % ','.join(missing_progs))
Exemple #10
0
    def test_lsb_release_functional(self, mock_subp):
        output = '\n'.join([
            "Distributor ID: Ubuntu",
            "Description:    Ubuntu 14.04.2 LTS",
            "Release:    14.04",
            "Codename:   trusty",
        ])
        rdata = {
            'id': 'Ubuntu',
            'description': 'Ubuntu 14.04.2 LTS',
            'codename': 'trusty',
            'release': '14.04'
        }

        def fake_subp(cmd, capture=False, target=None):
            return output, 'No LSB modules are available.'

        mock_subp.side_effect = fake_subp
        found = distro.lsb_release()
        mock_subp.assert_called_with(['lsb_release', '--all'],
                                     capture=True,
                                     target=None)
        self.assertEqual(found, rdata)
Exemple #11
0
    def test_mir_apt_list_rename(self, m_get_architecture):
        """test_mir_apt_list_rename - Test find mirror and apt list renaming"""
        pre = "/var/lib/apt/lists"
        # filenames are archive dependent

        arch = 's390x'
        m_get_architecture.return_value = arch
        component = "ubuntu-ports"
        archive = "ports.ubuntu.com"

        cfg = {'primary': [{'arches': ["default"],
                            'uri':
                            'http://test.ubuntu.com/%s/' % component}],
               'security': [{'arches': ["default"],
                             'uri':
                             'http://testsec.ubuntu.com/%s/' % component}]}
        post = ("%s_dists_%s-updates_InRelease" %
                (component, distro.lsb_release()['codename']))
        fromfn = ("%s/%s_%s" % (pre, archive, post))
        tofn = ("%s/test.ubuntu.com_%s" % (pre, post))

        mirrors = apt_config.find_apt_mirror_info(cfg, arch)

        self.assertEqual(mirrors['MIRROR'],
                         "http://test.ubuntu.com/%s/" % component)
        self.assertEqual(mirrors['PRIMARY'],
                         "http://test.ubuntu.com/%s/" % component)
        self.assertEqual(mirrors['SECURITY'],
                         "http://testsec.ubuntu.com/%s/" % component)

        with mock.patch.object(os, 'rename') as mockren:
            with mock.patch.object(glob, 'glob',
                                   return_value=[fromfn]):
                apt_config.rename_apt_lists(mirrors, TARGET)

        mockren.assert_any_call(fromfn, tofn)
Exemple #12
0
    'write-pending',
]

VALID_RAID_ARRAY_STATES = (
    ERROR_RAID_STATES +
    READONLY_RAID_STATES +
    READWRITE_RAID_STATES
)

# need a on-import check of version and set the value for later reference
''' mdadm version < 3.3 doesn't include enough info when using --export
    and we must use --detail and parse out information.  This method
    checks the mdadm version and will return True if we can use --export
    for key=value list with enough info, false if version is less than
'''
MDADM_USE_EXPORT = lsb_release()['codename'] not in ['precise', 'trusty']

#
# mdadm executors
#


def mdadm_assemble(md_devname=None, devices=[], spares=[], scan=False,
                   ignore_errors=False):
    # md_devname is a /dev/XXXX
    # devices is non-empty list of /dev/xxx
    # if spares is non-empt list append of /dev/xxx
    cmd = ["mdadm", "--assemble"]
    if scan:
        cmd += ['--scan', '-v']
    else:
Exemple #13
0
    ('mkfs.vfat', 'dosfstools'),
    ('mkfs.btrfs', '^btrfs-(progs|tools)$'),
    ('mkfs.ext4', 'e2fsprogs'),
    ('mkfs.xfs', 'xfsprogs'),
    ('partprobe', 'parted'),
    ('sgdisk', 'gdisk'),
    ('udevadm', 'udev'),
    ('make-bcache', 'bcache-tools'),
    ('iscsiadm', 'open-iscsi'),
]

REQUIRED_KERNEL_MODULES = [
    # kmod name
]

if lsb_release()['codename'] == "precise":
    REQUIRED_IMPORTS.append(('import oauth.oauth', 'python-oauth', None), )
else:
    REQUIRED_IMPORTS.append(
        ('import oauthlib.oauth1', 'python-oauthlib', 'python3-oauthlib'), )

# zfs is > trusty only
if not lsb_release()['codename'] in ["precise", "trusty"]:
    REQUIRED_EXECUTABLES.append(('zfs', 'zfsutils-linux'))
    REQUIRED_KERNEL_MODULES.append('zfs')

if not is_uefi_bootable() and 'arm' in get_architecture():
    REQUIRED_EXECUTABLES.append(('flash-kernel', 'flash-kernel'))


class MissingDeps(Exception):