Ejemplo n.º 1
0
def _inspect_ceph_statedir(path):
    """
    Helper function that collects /var/lib/ceph information from all minions.

    Returns a dictionary of Path objects keyed on minion id.

    """
    target = deepsea_minions.DeepseaMinions()
    search = target.deepsea_minions
    local = salt.client.LocalClient()

    # A container of Path's keyed on minion id.
    statedirs = {}

    for minion, path_info in local.cmd(search, 'fs.inspect_path',
                                       ["path={}".format(path)],
                                       expr_form='compound').iteritems():
        statedirs[minion] = Path(path, path_info['attrs'], path_info['exists'],
                                 path_info['type'],
                                 Device(path_info['dev_info']['dev'],
                                 path_info['dev_info']['part_dev'],
                                 path_info['dev_info']['type'],
                                 path_info['dev_info']['uuid'],
                                 path_info['dev_info']['fstype']),
                                 Mount(path_info['mount_info']['mountpoint'],
                                 path_info['mount_info']['opts'])) if path_info['ret'] else None

    return statedirs
Ejemplo n.º 2
0
def discovery(cluster=None, printer=None, **kwargs):
    """
    Check that the pillar for each cluster meets the requirements to install
    a Ceph cluster.
    """
    if not cluster:
        usage(func='discovery')
        exit(1)

    local = salt.client.LocalClient()

    # Restrict search to this cluster
    target = deepsea_minions.DeepseaMinions()
    search = target.deepsea_minions
    if 'cluster' in __pillar__:
        if __pillar__['cluster']:
            # pylint: disable=redefined-variable-type
            # Salt accepts either list or string as target
            search = "I@cluster:{}".format(cluster)

    pillar_data = local.cmd(search, 'pillar.items', [], expr_form="compound")

    printer = get_printer(**kwargs)
    valid = Validate(cluster, data=pillar_data, printer=printer)

    valid.deepsea_minions(target)
    valid.lint_yaml_files()
    if not valid.in_dev_env:
        valid.profiles_populated()
    valid.report()

    if valid.errors:
        return False

    return True
Ejemplo n.º 3
0
    def __init__(self, settings, writer, **kwargs):
        """
        Track cluster names, set minions to actively responding minions

        Allow overriding of default cluster
        """
        self.root_dir = settings.root_dir
        if 'cluster' in kwargs:
            self.names = kwargs['cluster']
        else:
            self.names = ['ceph']
        self.writer = writer

        target = deepsea_minions.DeepseaMinions()
        search = target.deepsea_minions

        local = salt.client.LocalClient()
        self.minions = local.cmd(search,
                                 'grains.get', ['id'],
                                 expr_form="compound")

        _rgws = local.cmd(search,
                          'pillar.get', ['rgw_configurations'],
                          expr_form="compound")
        for node in _rgws.keys():
            self.rgw_configurations = _rgws[node]
            # Just need first
            break
Ejemplo n.º 4
0
    def ceph_version(self):
        """
        Scan all minions for ceph versions in their repos.
        """
        target = deepsea_minions.DeepseaMinions()
        search = target.deepsea_minions
        local = salt.client.LocalClient()
        contents = local.cmd(search, 'pkg.latest_version', ['ceph'], expr_form="compound")
        for minion, version in contents.items():
            if not version:
                info = local.cmd(minion, 'pkg.info_installed', ['ceph'])
                if info and 'version' in info[minion]['ceph']:
                    version = info[minion]['ceph']['version']
                else:
                    self.errors.setdefault('ceph_version', []).append(
                        "No Ceph version is available for installation in {}".format(minion))
                    continue

            colon_idx = version.find(':')
            if colon_idx != -1:
                version = version[colon_idx+1:]
            dash_idx = version.rfind('-')
            if dash_idx != -1:
                version = version[:dash_idx]
            if version < JEWEL_VERSION:
                self.errors.setdefault('ceph_version', []).append(
                    "The Ceph version available in {} is older than 'jewel' (10.2)"
                    .format(minion))

        self._set_pass_status('ceph_version')
Ejemplo n.º 5
0
    def ceph_version(self):
        """
        Scan all minions for ceph versions in their repos.
        """
        JEWEL_VERSION = "10.2"
        target = deepsea_minions.DeepseaMinions()
        search = target.deepsea_minions
        local = salt.client.LocalClient()
        contents = local.cmd(search,
                             'cmd.shell', ['/usr/bin/zypper info ceph'],
                             expr_form="compound")

        for minion in contents.keys():
            m = re.search(r'Version: (\S+)', contents[minion])
            # Skip minions with no ceph repo
            if m:
                version = m.group(1)

                # String comparison works for now
                if version < JEWEL_VERSION:
                    msg = "ceph version {} on minion {}".format(
                        version, minion)
                    self.errors.setdefault('ceph_version', []).append(msg)

        self._set_pass_status('ceph_version')
Ejemplo n.º 6
0
def _get_existing_cluster_network(addrs, public_network=None):
    """
    Based on the addrs dictionary { minion: [ ipaddress ] }, this function
    returns an address consisting of network prefix followed by the cidr
    prefix (ie. 10.0.0.0/24), or None.
    """
    target = deepsea_minions.DeepseaMinions()
    search = target.deepsea_minions

    local = salt.client.LocalClient()
    # Stores the derived network addresses (in CIDR notation) of all addresses contained in addrs.
    minion_networks = []
    # The network address (in CIDR notation) that we return after collapsing minion_networks,
    # or None.
    network = None

    # Grab network interfaces from salt.
    minion_network_interfaces = local.cmd(search,
                                          "network.interfaces", [],
                                          expr_form="compound")
    # Remove lo.
    for entry in minion_network_interfaces:
        try:
            del (minion_network_interfaces[entry]["lo"])
        except:
            pass

    for minion, ipaddr in addrs.items():
        # Only continue if ipaddr is present.
        for i in ipaddr:
            for intf, data in minion_network_interfaces[minion].items():
                if "inet" in data:
                    for inet_data in data["inet"]:
                        if i == "0.0.0.0":
                            # If running on 0.0.0.0, assume we can use public_network
                            ip = ipaddress.ip_interface(
                                u"{}".format(public_network)
                            ) if public_network else ipaddress.ip_interface(
                                u"{}/{}".format(i, inet_data["netmask"]))
                            minion_networks.append(str(ip.network))
                        elif inet_data["address"] == i:
                            ip = ipaddress.ip_interface(u"{}/{}".format(
                                inet_data["address"], inet_data["netmask"]))
                            minion_networks.append(str(ip.network))

    # Check for consistency across all entries.
    if len(set(minion_networks)) == 1:
        # We have equal entries.
        network = minion_networks[0]
    else:
        # We have multiple possible networks.  This is liable to happen with OSDs
        # when there is a private cluster network.  Let's try to remove the public
        # network.
        minion_networks = [n for n in minion_networks if n != public_network]
        network = minion_networks[0] if len(
            set(minion_networks)) == 1 else None

    return network
Ejemplo n.º 7
0
    def __init__(self, local):
        """
        Query the cluster assignment and remove unassigned
        """
        target = deepsea_minions.DeepseaMinions()
        search = target.deepsea_minions
        self.minions = local.cmd(search, 'pillar.get', ['cluster'])

        self.names = dict(self._clusters())
        if 'unassigned' in self.names:
            self.names.pop('unassigned')
Ejemplo n.º 8
0
    def __init__(self, settings, cluster, servers, writer):
        """
        Initialize role secrets, track parameters
        """
        self.cluster = cluster
        self.servers = servers
        self.writer = writer

        self.root_dir = settings.root_dir
        target = deepsea_minions.DeepseaMinions()
        self.search = target.deepsea_minions

        self.networks = self._networks(self.servers)
        self.public_networks, self.cluster_networks = self.public_cluster(
            self.networks)

        self.available_roles = ['storage']
Ejemplo n.º 9
0
def saltapi(**kwargs):
    """
    """
    target = deepsea_minions.DeepseaMinions()
    search = target.deepsea_minions
    local = salt.client.LocalClient()

    pillar_data = local.cmd(search, 'pillar.items', [], expr_form="compound")
    printer = get_printer(**kwargs)

    v = Validate("salt-api", pillar_data, [], printer)
    v.saltapi()
    v.report()

    if v.errors:
        return False

    return True
Ejemplo n.º 10
0
def deploy(**kwargs):
    """
    Verify that Stage 4, Services can succeed.
    """
    target = deepsea_minions.DeepseaMinions()
    search = target.deepsea_minions
    local = salt.client.LocalClient()
    pillar_data = local.cmd(search, 'pillar.items', [], expr_form="compound")
    grains_data = local.cmd(search, 'grains.items', [], expr_form="compound")
    printer = get_printer(**kwargs)

    valid = Validate("deploy", pillar_data, grains_data, printer)
    valid.openattic()
    valid.report()

    if valid.errors:
        return False

    return True
Ejemplo n.º 11
0
def setup(**kwargs):
    """
    Check that initial files prior to any stage are correct
    """
    target = deepsea_minions.DeepseaMinions()
    search = target.deepsea_minions
    local = salt.client.LocalClient()

    pillar_data = local.cmd(search, 'pillar.items', [], expr_form="compound")
    printer = get_printer(**kwargs)

    valid = Validate("setup", pillar_data, [], printer)
    valid.deepsea_minions(target)
    valid.master_minion()
    valid.ceph_version()
    valid.report()

    if valid.errors:
        return False

    return True
Ejemplo n.º 12
0
                treated to be equivalent with db taking precedence.
    name=default - Name of the storage profile and thus location of the
                   resulting files.
    format=bluestore - The OSDs underlying storage format. Legal values are
                       bluestore and filestore.
    encryption='' - Set to dmcrypt to encrypt OSD. Leave empty (the default)
                    for non-encrypted OSDs.
    journal-size=5g
    db-size=500m
    wal-size=500m - Sizes for journal/db/wal partitions respectively. Specify a
                    number with a unit suffix. Unit suffix' as accepted by
                    sgdisk can be used, i.e. kibibytes (K), mebibytes (M),
                    gibibytes (G), tebibytes (T), or pebibytes (P).
'''

TARGET = deepsea_minions.DeepseaMinions()

STD_ARGS = {
    'leftovers': False,
    'standalone': False,
    'nvme-ssd-spinner': False,
    'nvme-ssd': False,
    'nvme-spinner': False,
    'ssd-spinner': False,
    'ratio': 5,
    'db-ratio': 5,
    'target': TARGET.deepsea_minions,
    'data': 0,
    'journal': 0,
    'wal': 0,
    'name': 'default',
Ejemplo n.º 13
0
Archivo: net.py Proyecto: rjfd/DeepSea
def iperf(cluster=None, exclude=None, output=None, **kwargs):
    """
    iperf server created from the each minions and then clients are created
    base on the server's cpu count and request that number of other minions
    as client to hit the server and report the total bendwidth.

    CLI Example: (Before DeepSea with a cluster configuration)
    .. code-block:: bash
        sudo salt-run net.iperf

    or you can run it with exclude
    .. code-block:: bash
        sudo salt-run net.iperf exclude="E@host*,host-osd-name*,192.168.1.1"

    (After DeepSea with a cluster configuration)
    .. code-block:: bash
        sudo salt-run net.iperf cluster=ceph

    To get all host iperf result
        sudo salt-run net.iperf cluster=ceph output=full

    """
    exclude_string = exclude_iplist = None
    if exclude:
        exclude_string, exclude_iplist = _exclude_filter(exclude)

    addresses = []
    local = salt.client.LocalClient()
    # Salt targets can use list or string
    # pylint: disable=redefined-variable-type
    if cluster:
        search = "I@cluster:{}".format(cluster)

        if exclude_string:
            search += " and not ( " + exclude_string + " )"
            log.debug("iperf: search {} ".format(search))

        public_networks = local.cmd(search,
                                    'pillar.item', ['public_network'],
                                    expr_form="compound")
        log.debug("iperf: public_network {} ".format(public_networks))
        cluster_networks = local.cmd(search,
                                     'pillar.item', ['cluster_network'],
                                     expr_form="compound")
        log.debug("iperf: cluster_network {} ".format(cluster_networks))
        total = local.cmd(search, 'grains.get', ['ipv4'], expr_form="compound")
        log.debug("iperf: total grains.get {} ".format(total))
        public_addresses = []
        cluster_addresses = []
        for host in sorted(total.iterkeys()):
            if 'public_network' in public_networks[host]:
                public_addresses.extend(
                    _address(total[host],
                             public_networks[host]['public_network']))
            if 'cluster_network' in cluster_networks[host]:
                cluster_addresses.extend(
                    _address(total[host],
                             cluster_networks[host]['cluster_network']))
            log.debug("iperf: public_network {} ".format(public_addresses))
            log.debug("iperf: cluster_network {} ".format(cluster_addresses))
        result = {}
        _create_server(public_addresses)
        p_result = _create_client(public_addresses)
        _create_server(cluster_addresses)
        c_result = _create_client(cluster_addresses)
        p_sort = _add_unit(
            sorted(p_result.items(), key=operator.itemgetter(1), reverse=True))
        c_sort = _add_unit(
            sorted(c_result.items(), key=operator.itemgetter(1), reverse=True))

        if output:
            result.update({'Public Network': p_sort})
            result.update({'Cluster Network': c_sort})
            return result
        else:
            result.update({
                'Public Network': {
                    "Slowest 2 hosts": p_sort[-2:],
                    "Fastest 2 hosts": p_sort[:2]
                }
            })
            result.update({
                'Cluster Network': {
                    "Slowest 2 hosts": c_sort[-2:],
                    "Fastest 2 hosts": c_sort[:2]
                }
            })
            return result
    else:
        search = deepsea_minions.DeepseaMinions().deepsea_minions
        if exclude_string:
            search += " and not ( " + exclude_string + " )"
            log.debug("ping: search {} ".format(search))
        addresses = local.cmd(search,
                              'grains.get', ['ipv4'],
                              expr_form="compound")
        addresses = _flatten(addresses.values())
        # Lazy loopback removal - use ipaddress when adding IPv6
        try:
            if addresses:
                addresses.remove('127.0.0.1')
            if exclude_iplist:
                for ex_ip in exclude_iplist:
                    log.debug("ping: removing {} ip ".format(ex_ip))
                    addresses.remove(ex_ip)
        except ValueError:
            log.debug("ping: remove {} ip doesn't exist".format(ex_ip))
        _create_server(addresses)
        result = _create_client(addresses)
        sort_result = _add_unit(
            sorted(result.items(), key=operator.itemgetter(1), reverse=True))
        if output:
            return sort_result
        else:
            return {
                "Slowest 2 hosts": sort_result[-2:],
                "Fastest 2 hosts": sort_result[:2]
            }
Ejemplo n.º 14
0
Archivo: net.py Proyecto: rjfd/DeepSea
def ping(cluster=None, exclude=None, ping_type=None, **kwargs):
    """
    Ping all addresses from all addresses on all minions.  If cluster is passed,
    restrict addresses to public and cluster networks.

    Note: Some optimizations could be done here in the multi module (such as
    skipping the source and destination when they are the same).  However, the
    unoptimized version is taking ~2.5 seconds on 18 minions with 72 addresses
    for success.  Failures take between 6 to 12 seconds.  Optimizations should
    focus there.

    TODO: Convert commented out print statements to log.debug

    CLI Example: (Before DeepSea with a cluster configuration)
    .. code-block:: bash
        sudo salt-run net.ping

    or you can run it with exclude
    .. code-block:: bash
        sudo salt-run net.ping exclude="E@host*,host-osd-name*,192.168.1.1"

    (After DeepSea with a cluster configuration)
    .. code-block:: bash
        sudo salt-run net.ping cluster=ceph
        sudo salt-run net.ping ceph

    """
    exclude_string = exclude_iplist = None
    if exclude:
        exclude_string, exclude_iplist = _exclude_filter(exclude)

    extra_kwargs = _skip_dunder(kwargs)
    if _skip_dunder(kwargs):
        print "Unsupported parameters:{}".format(" ,".join(
            extra_kwargs.keys()))
        text = re.sub(
            re.compile("^ {12}", re.MULTILINE), "", '''
            salt-run net.ping [cluster] [exclude]

            Ping all addresses from all addresses on all minions.
            If cluster is specified, restrict addresses to cluster networks.
            If exclude is specified, remove matching addresses.
            Detail read the Salt compound matchers.
            All the excluded individual ip address interface will be removed,
            instead of ping from, the ping to interface will be removed.


            Examples:
                salt-run net.ping
                salt-run net.ping ceph
                salt-run net.ping ceph [email protected]
                salt-run net.ping cluster=ceph [email protected]
                salt-run net.ping [email protected]
                salt-run net.ping [email protected]/29
                salt-run net.ping exclude="E@host*,host-osd-name*,192.168.1.1"
        ''')
        print text
        return ""

    local = salt.client.LocalClient()
    # pylint: disable=redefined-variable-type
    if cluster:
        search = "I@cluster:{}".format(cluster)
        if exclude_string:
            search += " and not ( " + exclude_string + " )"
            log.debug("ping: search {} ".format(search))
        networks = local.cmd(search,
                             'pillar.item',
                             ['cluster_network', 'public_network'],
                             expr_form="compound")
        total = local.cmd(search, 'grains.get', ['ipv4'], expr_form="compound")
        addresses = []
        for host in sorted(total.iterkeys()):
            if 'cluster_network' in networks[host]:
                addresses.extend(
                    _address(total[host], networks[host]['cluster_network']))
            if 'public_network' in networks[host]:
                addresses.extend(
                    _address(total[host], networks[host]['public_network']))
    else:
        search = deepsea_minions.DeepseaMinions().deepsea_minions

        if exclude_string:
            search += " and not ( " + exclude_string + " )"
            log.debug("ping: search {} ".format(search))
        addresses = local.cmd(search,
                              'grains.get', ['ipv4'],
                              expr_form="compound")

        addresses = _flatten(addresses.values())
        # Lazy loopback removal - use ipaddress when adding IPv6
        try:
            if addresses:
                addresses.remove('127.0.0.1')
            if exclude_iplist:
                for ex_ip in exclude_iplist:
                    log.debug("ping: removing {} ip ".format(ex_ip))
                    addresses.remove(ex_ip)
        except ValueError:
            log.debug("ping: remove {} ip doesn't exist".format(ex_ip))
    if ping_type is "jumbo":
        results = local.cmd(search,
                            'multi.jumbo_ping',
                            addresses,
                            expr_form="compound")
    else:
        results = local.cmd(search,
                            'multi.ping',
                            addresses,
                            expr_form="compound")
    _summarize(len(addresses), results)
    return ""
Ejemplo n.º 15
0
def engulf_existing_cluster(**kwargs):
    """
    Assuming proposals() has already been run to collect hardware profiles and
    all possible role assignments and common configuration, this will generate
    a policy.cfg with roles and assignments reflecting whatever cluster is
    currently deployed.  It will also suck in all the keyrings so that they're
    present when the configure stage is run.

    This assumes your cluster is named "ceph".  If it's not, things will break.
    """
    local = salt.client.LocalClient()
    settings = Settings()
    salt_writer = SaltWriter(**kwargs)

    # Make sure deepsea_minions contains valid minions before proceeding with engulf.
    minions = deepsea_minions.DeepseaMinions()
    search = minions.deepsea_minions
    validator = validate.Validate(
        "ceph", local.cmd(search, 'pillar.items', [], expr_form="compound"),
        [], validate.get_printer())
    validator.deepsea_minions(minions)
    if validator.errors:
        validator.report()
        return False

    policy_cfg = []

    # Check for firewall/apparmor.
    if not ready.check("ceph", True, search):
        return False

    # First, hand apply select Stage 0 functions
    local.cmd(search, "saltutil.sync_all", [], expr_form="compound")
    local.cmd(search, "state.apply", ["ceph.mines"], expr_form="compound")

    # Run proposals gathering directly.
    proposals()

    # Our imported hardware profile proposal path
    imported_profile = "profile-import"
    imported_profile_path = settings.root_dir + "/" + imported_profile

    # Used later on to compute cluster and public networks.
    mon_addrs = {}
    osd_addrs = {}

    ceph_conf = None
    previous_minion = None
    admin_minion = None

    mon_minions = []
    mgr_instances = []
    mds_instances = []
    rgw_instances = []

    for minion, info in local.cmd(search,
                                  "cephinspector.inspect", [],
                                  expr_form="compound").items():

        if type(info) is not dict:
            print("cephinspector.inspect failed on %s: %s" % (minion, info))
            return False

        if info["ceph_conf"] is not None:
            if ceph_conf is None:
                ceph_conf = info["ceph_conf"]
            else:
                if info["ceph_conf"] != ceph_conf:
                    # TODO: what's the best way to report errors from a runner?
                    print("ceph.conf on %s doesn't match ceph.conf on %s" %
                          (minion, previous_minion))
                    return False
            previous_minion = minion

        is_admin = info["has_admin_keyring"]

        if admin_minion is None and is_admin:
            # We'll talk to this minion later to obtain keyrings
            admin_minion = minion

        is_master = local.cmd(minion,
                              "pillar.get", ["master_minion"],
                              expr_form="compound")[minion] == minion

        if not info["running_services"].keys(
        ) and not is_admin and not is_master:
            # No ceph services running, no admin key, not the master_minion,
            # don't assign it to the cluster
            continue

        policy_cfg.append("cluster-ceph/cluster/" + minion + ".sls")

        if is_master:
            policy_cfg.append("role-master/cluster/" + minion + ".sls")
        elif is_admin:
            policy_cfg.append("role-admin/cluster/" + minion + ".sls")

        if "ceph-mon" in info["running_services"].keys():
            mon_minions.append(minion)
            policy_cfg.append("role-mon/cluster/" + minion + ".sls")
            policy_cfg.append("role-mon/stack/default/ceph/minions/" + minion +
                              ".yml")
            for minion, ipaddrs in local.cmd(
                    minion,
                    "cephinspector.get_minion_public_networks", [],
                    expr_form="compound").items():
                mon_addrs[minion] = ipaddrs

        if "ceph-osd" in info["running_services"].keys():
            # Needs a storage profile assigned (which may be different
            # than the proposals deepsea has come up with, depending on
            # how things were deployed)
            ceph_disks = local.cmd(minion,
                                   "cephinspector.get_ceph_disks_yml", [],
                                   expr_form="compound")
            if not ceph_disks:
                log.error("Failed to get list of Ceph OSD disks.")
                return [False]

            for minion, store in ceph_disks.items():
                minion_yml_dir = imported_profile_path + "/stack/default/ceph/minions"
                minion_yml_path = minion_yml_dir + "/" + minion + ".yml"
                _create_dirs(minion_yml_dir, "")
                salt_writer.write(minion_yml_path, store)

                minion_sls_data = {"roles": ["storage"]}
                minion_sls_dir = imported_profile_path + "/cluster"
                minion_sls_path = minion_sls_dir + "/" + minion + ".sls"
                _create_dirs(minion_sls_dir, "")
                salt_writer.write(minion_sls_path, minion_sls_data)

                policy_cfg.append(
                    minion_sls_path[minion_sls_path.find(imported_profile):])
                policy_cfg.append(
                    minion_yml_path[minion_yml_path.find(imported_profile):])

            for minion, ipaddrs in local.cmd(
                    minion,
                    "cephinspector.get_minion_cluster_networks", [],
                    expr_form="compound").items():
                osd_addrs[minion] = ipaddrs

        if "ceph-mgr" in info["running_services"].keys():
            policy_cfg.append("role-mgr/cluster/" + minion + ".sls")
            for i in info["running_services"]["ceph-mgr"]:
                mgr_instances.append(i)

        if "ceph-mds" in info["running_services"].keys():
            policy_cfg.append("role-mds/cluster/" + minion + ".sls")
            for i in info["running_services"]["ceph-mds"]:
                mds_instances.append(i)

        if "ceph-radosgw" in info["running_services"].keys():
            policy_cfg.append("role-rgw/cluster/" + minion + ".sls")
            for i in info["running_services"]["ceph-radosgw"]:
                rgw_instances.append(i)

        # TODO: what else to do for rgw?  Do we need to do something to
        # populate rgw_configurations in pillar data?

    if not admin_minion:
        print("No nodes found with ceph.client.admin.keyring")
        return False

    # TODO: this is really not very DRY...
    admin_keyring = local.cmd(admin_minion,
                              "cephinspector.get_keyring",
                              ["key=client.admin"],
                              expr_form="compound")[admin_minion]
    if not admin_keyring:
        print("Could not obtain client.admin keyring")
        return False

    mon_keyring = local.cmd(admin_minion,
                            "cephinspector.get_keyring", ["key=mon."],
                            expr_form="compound")[admin_minion]
    if not mon_keyring:
        print("Could not obtain mon keyring")
        return False

    osd_bootstrap_keyring = local.cmd(admin_minion,
                                      "cephinspector.get_keyring",
                                      ["key=client.bootstrap-osd"],
                                      expr_form="compound")[admin_minion]
    if not osd_bootstrap_keyring:
        print("Could not obtain osd bootstrap keyring")
        return False

    # If there's no MGR instances, add MGR roles automatically to all the MONs
    # (since Luminous, MGR is a requirement, so it seems reasonable to add this
    # role automatically for imported clusters)
    if not mgr_instances:
        print("No MGRs detected, automatically assigning role-mgr to MONs")
        for minion in mon_minions:
            policy_cfg.append("role-mgr/cluster/" + minion + ".sls")

    with open("/srv/salt/ceph/admin/cache/ceph.client.admin.keyring",
              'w') as keyring:
        keyring.write(admin_keyring)

    with open("/srv/salt/ceph/mon/cache/mon.keyring", 'w') as keyring:
        # following srv/salt/ceph/mon/files/keyring.j2, this includes both mon
        # and admin keyrings
        keyring.write(mon_keyring)
        keyring.write(admin_keyring)

    with open("/srv/salt/ceph/osd/cache/bootstrap.keyring", 'w') as keyring:
        keyring.write(osd_bootstrap_keyring)

    for i in mgr_instances:
        mgr_keyring = local.cmd(admin_minion,
                                "cephinspector.get_keyring", ["key=mgr." + i],
                                expr_form="compound")[admin_minion]
        if not mgr_keyring:
            print("Could not obtain mgr." + i + " keyring")
            return False
        with open("/srv/salt/ceph/mgr/cache/" + i + ".keyring",
                  'w') as keyring:
            keyring.write(mgr_keyring)

    for i in mds_instances:
        mds_keyring = local.cmd(admin_minion,
                                "cephinspector.get_keyring", ["key=mds." + i],
                                expr_form="compound")[admin_minion]
        if not mds_keyring:
            print("Could not obtain mds." + i + " keyring")
            return False
        with open("/srv/salt/ceph/mds/cache/" + i + ".keyring",
                  'w') as keyring:
            keyring.write(mds_keyring)

    for i in rgw_instances:
        rgw_keyring = local.cmd(admin_minion,
                                "cephinspector.get_keyring",
                                ["key=client." + i],
                                expr_form="compound")[admin_minion]
        if not rgw_keyring:
            print("Could not obtain client." + i + " keyring")
            return False
        with open("/srv/salt/ceph/rgw/cache/client." + i + ".keyring",
                  'w') as keyring:
            keyring.write(rgw_keyring)

    # Now policy_cfg reflects the current deployment, make it a bit legible...
    policy_cfg.sort()

    # ...but inject the unassigned line first so it takes precendence,
    # along with the global config bits (because they're prettier early)...
    policy_cfg = [
        "cluster-unassigned/cluster/*.sls",
        "config/stack/default/ceph/cluster.yml",
        "config/stack/default/global.yml"
    ] + policy_cfg

    # ...and write it out (this will fail with EPERM if someone's already
    # created a policy.cfg as root, BTW)
    with open("/srv/pillar/ceph/proposals/policy.cfg", 'w') as policy:
        policy.write("\n".join(policy_cfg) + "\n")

    # We've also got a ceph.conf to play with
    cp = configparser.RawConfigParser()
    # This little bit of natiness strips whitespace from all the lines, as
    # Python's configparser interprets leading whitespace as a line continuation,
    # whereas ceph itself is happy to have leading whitespace.
    cp.readfp(
        StringIO("\n".join([line.strip() for line in ceph_conf.split("\n")])))

    if not cp.has_section("global"):
        print("ceph.conf is missing [global] section")
        return False
    if not cp.has_option("global", "fsid"):
        print("ceph.conf is missing fsid")
        return False

    if not _replace_fsid_with_existing_cluster(cp.get("global", "fsid")):
        log.error(
            "Failed to replace derived fsid with fsid of existing cluster.")
        return [False]

    p_net_dict = _replace_public_network_with_existing_cluster(mon_addrs)
    if not p_net_dict['ret']:
        log.error(
            "Failed to replace derived public_network with public_network of existing cluster."
        )
        return [False]

    c_net_dict = _replace_cluster_network_with_existing_cluster(
        osd_addrs, p_net_dict['public_network'])
    if not c_net_dict['ret']:
        log.error(
            "Failed to replace derived cluster_network with cluster_network of existing cluster."
        )
        return [False]

    # write out the imported ceph.conf
    with open("/srv/salt/ceph/configuration/files/ceph.conf.import",
              'w') as conf:
        conf.write(ceph_conf)

    # ensure the imported config will be used
    _replace_key_in_cluster_yml("configuration_init", "default-import")

    return True
Ejemplo n.º 16
0
                treated to be equivalent with db taking precedence.
    name=default - Name of the storage profile and thus location of the
                   resulting files.
    format=bluestore - The OSDs underlying storage format. Legal values are
                       bluestore and filestore.
    encryption='' - Set to dmcrypt to encrypt OSD. Leave empty (the default)
                    for non-encrypted OSDs.
    journal-size=5g
    db-size=500m
    wal-size=500m - Sizes for journal/db/wal partitions respectively. Specify a
                    number with a unit suffix. Unit suffix' as accepted by
                    sgdisk can be used, i.e. kibibytes (K), mebibytes (M),
                    gibibytes (G), tebibytes (T), or pebibytes (P).
'''

target = deepsea_minions.DeepseaMinions()

std_args = {
    'leftovers': False,
    'standalone': False,
    'nvme-ssd-spinner': False,
    'nvme-ssd': False,
    'nvme-spinner': False,
    'ssd-spinner': False,
    'ratio': 5,
    'db-ratio': 5,
    'target': target.deepsea_minions,
    'data': 0,
    'journal': 0,
    'wal': 0,
    'name': 'default',