Ejemplo n.º 1
0
    def __init__(self, settings, writer, **kwargs):
        """
        Track cluster names, set minions to actively responding minions

        Allow overriding of default cluster
        """
        self.root_dir = settings.root_dir
        if 'cluster' in kwargs:
            self.names = kwargs['cluster']
        else:
            self.names = ['ceph']
        self.writer = writer

        target = DeepseaMinions()
        search = target.deepsea_minions

        local = salt.client.LocalClient()
        self.minions = local.cmd(search,
                                 'grains.get', ['id'],
                                 tgt_type="compound")

        _rgws = local.cmd(search,
                          'pillar.get', ['rgw_configurations'],
                          tgt_type="compound")
        for node in _rgws:
            self.rgw_configurations = _rgws[node]
            # Just need first
            break
Ejemplo n.º 2
0
def _get_existing_cluster_networks(addrs, public_networks=[]):
    """
    Based on the addrs dictionary { minion: [ipaddress] }, this function
    returns a list of addresses consisting of network prefix followed by the
    cidr prefix (e.g. [ "10.0.0.0/24" ]).  It may return an empty list.
    """
    target = DeepseaMinions()
    search = target.deepsea_minions

    local = salt.client.LocalClient()
    # Stores the derived network addresses (in CIDR notation) of all addresses contained in addrs.
    minion_networks = []

    # Grab network interfaces from salt.
    # TODO: see if we can use network.ip_addrs (see Fio object in benchmark.py)
    # instead of network.interfaces to simplify the following code
    minion_network_interfaces = local.cmd(search,
                                          "network.interfaces", [],
                                          tgt_type="compound")
    # Remove lo.
    for entry in minion_network_interfaces:
        try:
            del minion_network_interfaces[entry]["lo"]
        except:
            pass

    for minion, ipaddr in addrs.items():
        # Only continue if ipaddr is present.
        for i in ipaddr:
            for intf, data in minion_network_interfaces[minion].items():
                if "inet" in data:
                    for inet_data in data["inet"]:
                        if i == "0.0.0.0":
                            # If running on 0.0.0.0, assume we can use public_networks
                            if public_networks:
                                for n in public_networks:
                                    ip = ipaddress.ip_interface(
                                        u"{}".format(n))
                                    minion_networks.append(str(ip.network))
                            else:
                                ip = ipaddress.ip_interface(u"{}/{}".format(
                                    i, inet_data["netmask"]))
                                minion_networks.append(str(ip.network))
                        elif inet_data["address"] == i:
                            ip = ipaddress.ip_interface(u"{}/{}".format(
                                inet_data["address"], inet_data["netmask"]))
                            minion_networks.append(str(ip.network))

    # Collapse minion_networks back to unique items.  Usually there'll
    # be only one network after this, unless the cluster is using a
    # separate public and private network, or is using multiple public
    # and/or private networks
    minion_networks = set(minion_networks)

    # in the case where there's multiple networks, and a list of public
    # networks was passed in, strip the public networks (this is used
    # when trying to figure out what the cluter network(s) is/are
    minion_networks = [n for n in minion_networks if not n in public_networks]

    return minion_networks
Ejemplo n.º 3
0
def _inspect_ceph_statedir(path):
    """
    Helper function that collects /var/lib/ceph information from all minions.

    Returns a dictionary of Path objects keyed on minion id.

    """
    target = DeepseaMinions()
    search = target.deepsea_minions
    local = salt.client.LocalClient()

    # A container of Path's keyed on minion id.
    statedirs = {}

    for minion, path_info in six.iteritems(
            local.cmd(search,
                      'fs.inspect_path', ["path={}".format(path)],
                      tgt_type='compound')):
        statedirs[minion] = Path(
            path, path_info['attrs'], path_info['exists'], path_info['type'],
            Device(path_info['dev_info']['dev'],
                   path_info['dev_info']['part_dev'],
                   path_info['dev_info']['type'],
                   path_info['dev_info']['uuid'],
                   path_info['dev_info']['fstype']),
            Mount(
                path_info['mount_info']['mountpoint'],
                path_info['mount_info']['opts'])) if path_info['ret'] else None

    return statedirs
Ejemplo n.º 4
0
def discovery(cluster=None, printer=None, **kwargs):
    """
    Check that the pillar for each cluster meets the requirements to install
    a Ceph cluster.
    """
    if not cluster:
        usage(func='discovery')
        exit(1)

    local = salt.client.LocalClient()

    # Restrict search to this cluster
    target = DeepseaMinions()
    search = target.deepsea_minions
    if 'cluster' in __pillar__:
        if __pillar__['cluster']:
            # pylint: disable=redefined-variable-type
            # Salt accepts either list or string as target
            search = "I@cluster:{}".format(cluster)

    pillar_data = local.cmd(search, 'pillar.items', [], tgt_type="compound")

    printer = get_printer(**kwargs)
    valid = Validate(cluster, data=pillar_data, printer=printer)

    valid.deepsea_minions(target)
    valid.lint_yaml_files()
    if not valid.in_dev_env:
        valid.profiles_populated()
    valid.report()

    if valid.errors:
        return False

    return True
Ejemplo n.º 5
0
    def __init__(self, local):
        """
        Query the cluster assignment and remove unassigned
        """
        target = DeepseaMinions()
        search = target.deepsea_minions
        self.minions = local.cmd(search, 'pillar.get', ['cluster'])

        self.names = dict(self._clusters())
        if 'unassigned' in self.names:
            self.names.pop('unassigned')
Ejemplo n.º 6
0
    def __init__(self, settings, cluster, servers, writer):
        """
        Initialize role secrets, track parameters
        """
        self.cluster = cluster
        self.servers = servers
        self.writer = writer

        self.root_dir = settings.root_dir
        target = DeepseaMinions()
        self.search = target.deepsea_minions

        self.networks = self._networks(self.servers)
        self.public_networks, self.cluster_networks = self.public_cluster(
            self.networks.copy())

        self.available_roles = ['storage']
Ejemplo n.º 7
0
def saltapi(**kwargs):
    """
    Verify that the Salt API is working
    """
    target = DeepseaMinions()
    search = target.deepsea_minions
    local = salt.client.LocalClient()

    pillar_data = local.cmd(search, 'pillar.items', [], tgt_type="compound")
    printer = get_printer(**kwargs)

    valid = Validate("salt-api", pillar_data, [], printer)
    valid.saltapi()
    valid.report()

    if valid.errors:
        return False

    return True
Ejemplo n.º 8
0
def deploy(**kwargs):
    """
    Verify that Stage 4, Services can succeed.
    """
    target = DeepseaMinions()
    search = target.deepsea_minions
    local = salt.client.LocalClient()
    pillar_data = local.cmd(search, 'pillar.items', [], tgt_type="compound")
    grains_data = local.cmd(search, 'grains.items', [], tgt_type="compound")
    printer = get_printer(**kwargs)

    valid = Validate("deploy", pillar_data, grains_data, printer)
    valid.openattic()
    valid.report()

    if valid.errors:
        return False

    return True
Ejemplo n.º 9
0
def setup(**kwargs):
    """
    Check that initial files prior to any stage are correct
    """
    target = DeepseaMinions()
    search = target.deepsea_minions
    local = salt.client.LocalClient()

    pillar_data = local.cmd(search, 'pillar.items', [], tgt_type="compound")
    printer = get_printer(**kwargs)

    valid = Validate("setup", pillar_data, [], printer)
    valid.deepsea_minions(target)
    valid.master_minion()
    valid.ceph_version()
    valid.report()

    if valid.errors:
        return False

    return True
Ejemplo n.º 10
0
def engulf_existing_cluster(**kwargs):
    """
    Assuming proposals() has already been run to collect hardware profiles and
    all possible role assignments and common configuration, this will generate
    a policy.cfg with roles and assignments reflecting whatever cluster is
    currently deployed.  It will also suck in all the keyrings so that they're
    present when the configure stage is run.

    This assumes your cluster is named "ceph".  If it's not, things will break.
    """
    target = DeepseaMinions()
    search = target.deepsea_minions
    local = salt.client.LocalClient()
    settings = Settings()
    salt_writer = SaltWriter(**kwargs)

    # Make sure deepsea_minions contains valid minions before proceeding with engulf.
    from . import validate
    validator = validate.Validate("ceph",
                                  search_pillar=True,
                                  printer=validate.get_printer())
    validator.deepsea_minions(validator.target)
    if validator.errors:
        validator.report()
        return False

    policy_cfg = []

    # Check for firewall/apparmor.
    if not ready.check("ceph", True, search):
        return False

    # First, hand apply select Stage 0 functions
    local.cmd(search, "saltutil.sync_all", [], tgt_type="compound")
    local.cmd(search, "state.apply", ["ceph.mines"], tgt_type="compound")

    # Run proposals gathering directly.
    proposals()

    # Our imported hardware profile proposal path
    imported_profile = "profile-import"
    imported_profile_path = settings.root_dir + "/" + imported_profile

    # Used later on to compute cluster and public networks.
    mon_addrs = {}
    osd_addrs = {}

    ceph_conf = None
    previous_minion = None
    admin_minion = None

    mon_minions = []
    mgr_instances = []
    mds_instances = []
    rgw_instances = []

    for minion, info in local.cmd(search,
                                  "cephinspector.inspect", [],
                                  tgt_type="compound").items():

        if type(info) is not dict:
            print("cephinspector.inspect failed on %s: %s" % (minion, info))
            return False

        if info["ceph_conf"] is not None:
            if ceph_conf is None:
                ceph_conf = info["ceph_conf"]
            else:
                if info["ceph_conf"] != ceph_conf:
                    # TODO: what's the best way to report errors from a runner?
                    print(("ceph.conf on {} doesn't match ceph.conf on "
                           "{}").format(minion, previous_minion))
                    return False
            previous_minion = minion

        is_admin = info["has_admin_keyring"]

        if admin_minion is None and is_admin:
            # We'll talk to this minion later to obtain keyrings
            admin_minion = minion

        is_master = local.cmd(minion,
                              "pillar.get", ["master_minion"],
                              tgt_type="compound")[minion] == minion

        if not info["running_services"] and not is_admin and not is_master:
            # No ceph services running, no admin key, not the master_minion,
            # don't assign it to the cluster
            continue

        policy_cfg.append("cluster-ceph/cluster/" + minion + ".sls")

        if is_master:
            policy_cfg.append("role-master/cluster/" + minion + ".sls")
        elif is_admin:
            policy_cfg.append("role-admin/cluster/" + minion + ".sls")

        if "ceph-mon" in info["running_services"]:
            mon_minions.append(minion)
            policy_cfg.append("role-mon/cluster/" + minion + ".sls")
            policy_cfg.append("role-mon/stack/default/ceph/minions/" + minion +
                              ".yml")
            for minion, ipaddrs in local.cmd(
                    minion,
                    "cephinspector.get_minion_public_networks", [],
                    tgt_type="compound").items():
                mon_addrs[minion] = ipaddrs

        if "ceph-osd" in info["running_services"]:
            # Needs a storage profile assigned (which may be different
            # than the proposals deepsea has come up with, depending on
            # how things were deployed)
            ceph_disks = local.cmd(minion,
                                   "cephinspector.get_ceph_disks_yml", [],
                                   tgt_type="compound")
            if not ceph_disks:
                log.error("Failed to get list of Ceph OSD disks.")
                return [False]

            for minion, store in ceph_disks.items():
                minion_yml_dir = imported_profile_path + "/stack/default/ceph/minions"
                minion_yml_path = minion_yml_dir + "/" + minion + ".yml"
                _create_dirs(minion_yml_dir, "")
                salt_writer.write(minion_yml_path, store)

                minion_sls_data = {"roles": ["storage"]}
                minion_sls_dir = imported_profile_path + "/cluster"
                minion_sls_path = minion_sls_dir + "/" + minion + ".sls"
                _create_dirs(minion_sls_dir, "")
                salt_writer.write(minion_sls_path, minion_sls_data)

                policy_cfg.append(
                    minion_sls_path[minion_sls_path.find(imported_profile):])
                policy_cfg.append(
                    minion_yml_path[minion_yml_path.find(imported_profile):])

            for minion, ipaddrs in local.cmd(
                    minion,
                    "cephinspector.get_minion_cluster_networks", [],
                    tgt_type="compound").items():
                osd_addrs[minion] = ipaddrs

        if "ceph-mgr" in info["running_services"]:
            policy_cfg.append("role-mgr/cluster/" + minion + ".sls")
            for i in info["running_services"]["ceph-mgr"]:
                mgr_instances.append(i)

        if "ceph-mds" in info["running_services"]:
            policy_cfg.append("role-mds/cluster/" + minion + ".sls")
            for i in info["running_services"]["ceph-mds"]:
                mds_instances.append(i)

        if "ceph-radosgw" in info["running_services"]:
            policy_cfg.append("role-rgw/cluster/" + minion + ".sls")
            for i in info["running_services"]["ceph-radosgw"]:
                rgw_instances.append(i)

        # TODO: what else to do for rgw?  Do we need to do something to
        # populate rgw_configurations in pillar data?

    if not admin_minion:
        print("No nodes found with ceph.client.admin.keyring")
        return False

    # TODO: this is really not very DRY...
    admin_keyring = local.cmd(admin_minion,
                              "cephinspector.get_keyring",
                              ["key=client.admin"],
                              tgt_type="compound")[admin_minion]
    if not admin_keyring:
        print("Could not obtain client.admin keyring")
        return False

    mon_keyring = local.cmd(admin_minion,
                            "cephinspector.get_keyring", ["key=mon."],
                            tgt_type="compound")[admin_minion]
    if not mon_keyring:
        print("Could not obtain mon keyring")
        return False

    osd_bootstrap_keyring = local.cmd(admin_minion,
                                      "cephinspector.get_keyring",
                                      ["key=client.bootstrap-osd"],
                                      tgt_type="compound")[admin_minion]
    if not osd_bootstrap_keyring:
        print("Could not obtain osd bootstrap keyring")
        return False

    # If there's no MGR instances, add MGR roles automatically to all the MONs
    # (since Luminous, MGR is a requirement, so it seems reasonable to add this
    # role automatically for imported clusters)
    if not mgr_instances:
        print("No MGRs detected, automatically assigning role-mgr to MONs")
        for minion in mon_minions:
            policy_cfg.append("role-mgr/cluster/" + minion + ".sls")

    with open("/srv/salt/ceph/admin/cache/ceph.client.admin.keyring",
              'w') as keyring:
        keyring.write(admin_keyring)

    with open("/srv/salt/ceph/mon/cache/mon.keyring", 'w') as keyring:
        # following srv/salt/ceph/mon/files/keyring.j2, this includes both mon
        # and admin keyrings
        keyring.write(mon_keyring)
        keyring.write(admin_keyring)

    with open("/srv/salt/ceph/osd/cache/bootstrap.keyring", 'w') as keyring:
        keyring.write(osd_bootstrap_keyring)

    for i in mgr_instances:
        mgr_keyring = local.cmd(admin_minion,
                                "cephinspector.get_keyring", ["key=mgr." + i],
                                tgt_type="compound")[admin_minion]
        if not mgr_keyring:
            print("Could not obtain mgr." + i + " keyring")
            return False
        with open("/srv/salt/ceph/mgr/cache/" + i + ".keyring",
                  'w') as keyring:
            keyring.write(mgr_keyring)

    for i in mds_instances:
        mds_keyring = local.cmd(admin_minion,
                                "cephinspector.get_keyring", ["key=mds." + i],
                                tgt_type="compound")[admin_minion]
        if not mds_keyring:
            print("Could not obtain mds." + i + " keyring")
            return False
        with open("/srv/salt/ceph/mds/cache/" + i + ".keyring",
                  'w') as keyring:
            keyring.write(mds_keyring)

    for i in rgw_instances:
        rgw_keyring = local.cmd(admin_minion,
                                "cephinspector.get_keyring",
                                ["key=client." + i],
                                tgt_type="compound")[admin_minion]
        if not rgw_keyring:
            print("Could not obtain client." + i + " keyring")
            return False
        with open("/srv/salt/ceph/rgw/cache/client." + i + ".keyring",
                  'w') as keyring:
            keyring.write(rgw_keyring)

    # Now policy_cfg reflects the current deployment, make it a bit legible...
    policy_cfg.sort()

    # ...but inject the unassigned line first so it takes precendence,
    # along with the global config bits (because they're prettier early)...
    policy_cfg = [
        "cluster-unassigned/cluster/*.sls",
        "config/stack/default/ceph/cluster.yml",
        "config/stack/default/global.yml"
    ] + policy_cfg

    # ...and write it out (this will fail with EPERM if someone's already
    # created a policy.cfg as root, BTW)
    with open("/srv/pillar/ceph/proposals/policy.cfg", 'w') as policy:
        policy.write("\n".join(policy_cfg) + "\n")

    # We've also got a ceph.conf to play with
    cp = configparser.RawConfigParser()
    # This little bit of natiness strips whitespace from all the lines, as
    # Python's configparser interprets leading whitespace as a line continuation,
    # whereas ceph itself is happy to have leading whitespace.
    cp.readfp(
        StringIO("\n".join([line.strip() for line in ceph_conf.split("\n")])))

    if not cp.has_section("global"):
        print("ceph.conf is missing [global] section")
        return False
    if not cp.has_option("global", "fsid"):
        print("ceph.conf is missing fsid")
        return False

    if not _replace_fsid_with_existing_cluster(cp.get("global", "fsid")):
        log.error(
            "Failed to replace derived fsid with fsid of existing cluster.")
        return [False]

    p_net_dict = _replace_public_network_with_existing_cluster(mon_addrs)
    if not p_net_dict['ret']:
        log.error(("Failed to replace derived public_network with "
                   "public_network of existing cluster."))
        return [False]

    c_net_dict = _replace_cluster_network_with_existing_cluster(
        osd_addrs, p_net_dict['public_networks'])
    if not c_net_dict['ret']:
        log.error(("Failed to replace derived cluster_network with "
                   "cluster_network of existing cluster."))
        return [False]

    # write out the imported ceph.conf
    with open("/srv/salt/ceph/configuration/files/ceph.conf.import",
              'w') as conf:
        conf.write(ceph_conf)

    # ensure the imported config will be used
    _replace_key_in_cluster_yml("configuration_init", "default-import")

    return True
Ejemplo n.º 11
0
    def ceph_version(self):
        """
        Scan all minions for ceph versions in their repos.
        """
        target = DeepseaMinions()
        search = target.deepsea_minions
        local = salt.client.LocalClient()
        contents = local.cmd(search, 'pkg.latest_version', ['ceph-common'], tgt_type="compound")
        for minion, version in contents.items():
            # sometimes, version contains a string value
            # other times, it contains an empty dict
            log.debug("VALIDATE ceph_version: minion ->{}<- latest_version version ->{}<-"
                      .format(minion, version))
            if not version:
                info = local.cmd(minion, 'pkg.info_installed', ['ceph-common'])
                if info and info[minion] and 'version' in info[minion]['ceph-common']:
                    version = info[minion]['ceph-common']['version']
                    log.debug("VALIDATE ceph_version: minion ->{}<- info_installed version ->{}<-"
                              .format(minion, version))
                else:
                    failmsg = ("No Ceph version is available for installation on minion {}"
                               .format(minion))
                    if self.in_dev_env:
                        log.warning('VALIDATE ceph_version: ' + failmsg)
                    else:
                        self.errors.setdefault('ceph_version', []).append(failmsg)
                        continue

            colon_idx = version.find(':')
            if colon_idx != -1:
                version = version[colon_idx+1:]
            dash_idx = version.rfind('-')
            if dash_idx != -1:
                version = version[:dash_idx]
            log.debug("VALIDATE ceph_version: minion ->{}<- final munged version ->{}<-"
                      .format(minion, version))
            assert isinstance(version, str), "version value is not a string"

            # "11.10" < "11.2" in Python terms, but not in terms of
            # version numbering semantics, so we have to break the version number
            # down into its integer components and compare those separately.
            #
            # We can assume that Ceph version numbers will always begin with X.Y.Z
            # where X, Y, and Z are integers. Here, we are only interested in X and Y.
            #
            # In other words, there must be at least two version components and
            # both must be convertible into integers.

            if not all(s.isdigit() for s in version.split(".")[0:2]):
                failmsg = ("Minion {} reports unparseable Ceph version {}"
                           .format(minion, version))
                if self.in_dev_env:
                    log.warning('VALIDATE ceph_version: ' + failmsg)
                else:
                    self.errors.setdefault('ceph_version', []).append(failmsg)
                    continue

            if LooseVersion(version) < LooseVersion(LUMINOUS_VERSION):
                self.errors.setdefault('ceph_version', []).append(
                    "The Ceph version available on minion {} ({}) is older than 'luminous' ({})"
                    .format(minion, version, LUMINOUS_VERSION))

        self._set_pass_status('ceph_version')
Ejemplo n.º 12
0
 def __init__(self):
     self.target = DeepseaMinions()
     self.search = self.target.deepsea_minions
     self.local = salt.client.LocalClient()
Ejemplo n.º 13
0
def iperf(cluster=None, exclude=None, output=None, **kwargs):
    """
    iperf server created from the each minions and then clients are created
    base on the server's cpu count and request that number of other minions
    as client to hit the server and report the total bendwidth.

    CLI Example: (Before DeepSea with a cluster configuration)
    .. code-block:: bash
        sudo salt-run net.iperf

    or you can run it with exclude
    .. code-block:: bash
        sudo salt-run net.iperf exclude="E@host*,host-osd-name*,192.168.1.1"

    (After DeepSea with a cluster configuration)
    .. code-block:: bash
        sudo salt-run net.iperf cluster=ceph

    To get all host iperf result
        sudo salt-run net.iperf cluster=ceph output=full

    """
    exclude_string = exclude_iplist = None
    if exclude:
        exclude_string, exclude_iplist = _exclude_filter(exclude)

    addresses = []
    local = salt.client.LocalClient()
    # Salt targets can use list or string
    if cluster:
        search = "I@cluster:{}".format(cluster)

        if exclude_string:
            search += " and not ( " + exclude_string + " )"
            log.debug("iperf: search {} ".format(search))

        public_networks = local.cmd(search,
                                    'pillar.item', ['public_network'],
                                    tgt_type="compound")
        log.debug("iperf: public_network {} ".format(public_networks))
        cluster_networks = local.cmd(search,
                                     'pillar.item', ['cluster_network'],
                                     tgt_type="compound")
        log.debug("iperf: cluster_network {} ".format(cluster_networks))
        total = local.cmd(search, 'grains.get', ['ipv4'], tgt_type="compound")
        log.debug("iperf: total grains.get {} ".format(total))
        public_addresses = []
        cluster_addresses = []
        for host in sorted(six.iterkeys(total)):
            if 'public_network' in public_networks[host]:
                public_addresses.extend(
                    _address(total[host],
                             public_networks[host]['public_network']))
            if 'cluster_network' in cluster_networks[host]:
                cluster_addresses.extend(
                    _address(total[host],
                             cluster_networks[host]['cluster_network']))
            log.debug("iperf: public_network {} ".format(public_addresses))
            log.debug("iperf: cluster_network {} ".format(cluster_addresses))
        result = {}
        _create_server(public_addresses)
        p_result = _create_client(public_addresses)
        _create_server(cluster_addresses)
        c_result = _create_client(cluster_addresses)
        p_sort = _add_unit(
            sorted(list(p_result.items()),
                   key=operator.itemgetter(1),
                   reverse=True))
        c_sort = _add_unit(
            sorted(list(c_result.items()),
                   key=operator.itemgetter(1),
                   reverse=True))

        if output:
            result.update({'Public Network': p_sort})
            result.update({'Cluster Network': c_sort})
            return result
        else:
            result.update({
                'Public Network': {
                    "Slowest 2 hosts": p_sort[-2:],
                    "Fastest 2 hosts": p_sort[:2]
                }
            })
            result.update({
                'Cluster Network': {
                    "Slowest 2 hosts": c_sort[-2:],
                    "Fastest 2 hosts": c_sort[:2]
                }
            })
            return result
    else:
        # pylint: disable=redefined-variable-type
        search = DeepseaMinions().deepsea_minions
        if exclude_string:
            search += " and not ( " + exclude_string + " )"
            log.debug("ping: search {} ".format(search))
        addresses = local.cmd(search,
                              'grains.get', ['ipv4'],
                              tgt_type="compound")
        addresses = _flatten(list(addresses.values()))
        # Lazy loopback removal - use ipaddress when adding IPv6
        try:
            if addresses:
                addresses.remove('127.0.0.1')
            if exclude_iplist:
                for ex_ip in exclude_iplist:
                    log.debug("ping: removing {} ip ".format(ex_ip))
                    addresses.remove(ex_ip)
        except ValueError:
            log.debug("ping: remove {} ip doesn't exist".format(ex_ip))
        _create_server(addresses)
        result = _create_client(addresses)
        sort_result = _add_unit(
            sorted(list(result.items()),
                   key=operator.itemgetter(1),
                   reverse=True))
        if output:
            return sort_result
        else:
            return {
                "Slowest 2 hosts": sort_result[-2:],
                "Fastest 2 hosts": sort_result[:2]
            }
Ejemplo n.º 14
0
def ping(cluster=None, exclude=None, ping_type=None, **kwargs):
    """
    Ping all addresses from all addresses on all minions.  If cluster is passed,
    restrict addresses to public and cluster networks.

    Note: Some optimizations could be done here in the multi module (such as
    skipping the source and destination when they are the same).  However, the
    unoptimized version is taking ~2.5 seconds on 18 minions with 72 addresses
    for success.  Failures take between 6 to 12 seconds.  Optimizations should
    focus there.

    TODO: Convert commented out print statements to log.debug

    CLI Example: (Before DeepSea with a cluster configuration)
    .. code-block:: bash
        sudo salt-run net.ping

    or you can run it with exclude
    .. code-block:: bash
        sudo salt-run net.ping exclude="E@host*,host-osd-name*,192.168.1.1"

    (After DeepSea with a cluster configuration)
    .. code-block:: bash
        sudo salt-run net.ping cluster=ceph
        sudo salt-run net.ping ceph

    """
    exclude_string = exclude_iplist = None
    if exclude:
        exclude_string, exclude_iplist = _exclude_filter(exclude)

    extra_kwargs = _skip_dunder(kwargs)
    if _skip_dunder(kwargs):
        print("Unsupported parameters:{}".format(" ,".join(
            list(extra_kwargs.keys()))))
        text = re.sub(
            re.compile("^ {12}", re.MULTILINE), "", '''
            salt-run net.ping [cluster] [exclude]

            Ping all addresses from all addresses on all minions.
            If cluster is specified, restrict addresses to cluster networks.
            If exclude is specified, remove matching addresses.
            Detail read the Salt compound matchers.
            All the excluded individual ip address interface will be removed,
            instead of ping from, the ping to interface will be removed.


            Examples:
                salt-run net.ping
                salt-run net.ping ceph
                salt-run net.ping ceph [email protected]
                salt-run net.ping cluster=ceph [email protected]
                salt-run net.ping [email protected]
                salt-run net.ping [email protected]/29
                salt-run net.ping exclude="E@host*,host-osd-name*,192.168.1.1"
        ''')
        print(text)
        return ""

    local = salt.client.LocalClient()
    if cluster:
        search = "I@cluster:{}".format(cluster)
        if exclude_string:
            search += " and not ( " + exclude_string + " )"
            log.debug("ping: search {} ".format(search))
        networks = local.cmd(search,
                             'pillar.item',
                             ['cluster_network', 'public_network'],
                             tgt_type="compound")
        total = local.cmd(search, 'grains.get', ['ipv4'], tgt_type="compound")
        addresses = []
        for host in sorted(six.iterkeys(total)):
            if 'cluster_network' in networks[host]:
                addresses.extend(
                    _address(total[host], networks[host]['cluster_network']))
            if 'public_network' in networks[host]:
                addresses.extend(
                    _address(total[host], networks[host]['public_network']))
    else:
        # pylint: disable=redefined-variable-type
        search = DeepseaMinions().deepsea_minions

        if exclude_string:
            search += " and not ( " + exclude_string + " )"
            log.debug("ping: search {} ".format(search))
        addresses = local.cmd(search,
                              'grains.get', ['ipv4'],
                              tgt_type="compound")

        addresses = _flatten(list(addresses.values()))
        # Lazy loopback removal - use ipaddress when adding IPv6
        try:
            if addresses:
                addresses.remove('127.0.0.1')
            if exclude_iplist:
                for ex_ip in exclude_iplist:
                    log.debug("ping: removing {} ip ".format(ex_ip))
                    addresses.remove(ex_ip)
        except ValueError:
            log.debug("ping: remove {} ip doesn't exist".format(ex_ip))
    if ping_type == "jumbo":
        results = local.cmd(search,
                            'multi.jumbo_ping',
                            addresses,
                            tgt_type="compound")
    else:
        results = local.cmd(search,
                            'multi.ping',
                            addresses,
                            tgt_type="compound")
    _summarize(len(addresses), results)
    return ""
Ejemplo n.º 15
0
                treated to be equivalent with db taking precedence.
    name=default - Name of the storage profile and thus location of the
                   resulting files.
    format=bluestore - The OSDs underlying storage format. Legal values are
                       bluestore and filestore.
    encryption='' - Set to dmcrypt to encrypt OSD. Leave empty (the default)
                    for non-encrypted OSDs.
    journal-size=5g
    db-size=500m
    wal-size=500m - Sizes for journal/db/wal partitions respectively. Specify a
                    number with a unit suffix. Unit suffix' as accepted by
                    sgdisk can be used, i.e. kibibytes (K), mebibytes (M),
                    gibibytes (G), tebibytes (T), or pebibytes (P).
'''

TARGET = DeepseaMinions()

STD_ARGS = {
    'leftovers': False,
    'standalone': False,
    'nvme-ssd-spinner': False,
    'nvme-ssd': False,
    'nvme-spinner': False,
    'ssd-spinner': False,
    'ratio': 5,
    'db-ratio': 5,
    'target': TARGET.deepsea_minions,
    'data': 0,
    'journal': 0,
    'wal': 0,
    'name': 'default',