Exemple #1
0
    def _crushmap_rule_delete(self, name):
        """Delete existing tier crushmap rule. """

        crushmap_flag_file = os.path.join(constants.SYSINV_CONFIG_PATH,
                                          constants.CEPH_CRUSH_MAP_APPLIED)
        if not os.path.isfile(crushmap_flag_file):
            reason = "Cannot remove any additional rules."
            raise exception.CephCrushMapNotApplied(reason=reason)

        default_root_name = self._format_root_name(self._default_tier)
        root_name = self._format_root_name(name)
        if root_name == default_root_name:
            reason = (("Cannot remove the rule for tier '%s'.") %
                      default_root_name)
            raise exception.CephCrushInvalidTierUse(tier=name, reason=reason)

        # get the current rule count
        rule_is_present, rule_name, rule_count = self._crush_rule_status(
            root_name)

        if not rule_is_present:
            reason = (("Rule '%s' is not present in the crushmap. No action "
                       "taken.") % rule_name)
            raise exception.CephCrushInvalidRuleOperation(rule=rule_name,
                                                          reason=reason)

        LOG.info("ceph osd crush rule rm %s" % rule_name)
        response, body = self._ceph_api.osd_crush_rule_rm(rule_name,
                                                          body='json')
        LOG.info("CRUSH: %d :%s" % (response.status_code, body['status']))
Exemple #2
0
def fix_crushmap(dbapi=None):
    """ Set Ceph's CRUSH Map based on storage model """
    if not dbapi:
        dbapi = pecan.request.dbapi
    crushmap_flag_file = os.path.join(constants.SYSINV_CONFIG_PATH,
                                      constants.CEPH_CRUSH_MAP_APPLIED)
    if not os.path.isfile(crushmap_flag_file):
        stor_model = get_ceph_storage_model(dbapi)
        if stor_model == constants.CEPH_AIO_SX_MODEL:
            crushmap_txt = "/etc/sysinv/crushmap-aio-sx.txt"
        elif stor_model == constants.CEPH_CONTROLLER_MODEL:
            crushmap_txt = "/etc/sysinv/crushmap-controller-model.txt"
        else:
            crushmap_txt = "/etc/sysinv/crushmap-storage-model.txt"
        LOG.info("Updating crushmap with: %s" % crushmap_txt)

        try:
            # Compile crushmap
            crushmap_bin = "/etc/sysinv/crushmap.bin"
            subprocess.check_output("crushtool -c %s "
                                    "-o %s" % (crushmap_txt, crushmap_bin),
                                    stderr=subprocess.STDOUT,
                                    shell=True)
            # Set crushmap
            subprocess.check_output("ceph osd setcrushmap -i %s" %
                                    crushmap_bin,
                                    stderr=subprocess.STDOUT,
                                    shell=True)
        except subprocess.CalledProcessError as e:
            # May not be critical, depends on where this is called.
            reason = "Error: %s Output: %s" % (str(e), e.output)
            raise exception.CephCrushMapNotApplied(reason=reason)
        try:
            open(crushmap_flag_file, "w").close()
        except IOError as e:
            LOG.warn(
                _('Failed to create flag file: {}. '
                  'Reason: {}').format(crushmap_flag_file, e))

        return True
Exemple #3
0
def fix_crushmap(dbapi=None):
    """ Set Ceph's CRUSH Map based on storage model """
    def _create_crushmap_flag_file():
        try:
            open(crushmap_flag_file, "w").close()
        except IOError as e:
            LOG.warn(('Failed to create flag file: {}. '
                      'Reason: {}').format(crushmap_flag_file, e))

    if not dbapi:
        dbapi = pecan.request.dbapi
    crushmap_flag_file = os.path.join(constants.SYSINV_CONFIG_PATH,
                                      constants.CEPH_CRUSH_MAP_APPLIED)

    if not os.path.isfile(crushmap_flag_file):
        _operator = CephApiOperator()
        if not cutils.is_aio_system(dbapi):
            # At least two monitors have to be running on a standard deployment,
            # otherwise don't even try to load the crushmap.
            active_mons, required_mons, __ = _operator.get_monitors_status(
                dbapi)
            if required_mons > active_mons:
                LOG.info("Not enough monitors yet available to fix crushmap.")
                return False

        # For AIO system, crushmap should be already loaded through puppet.
        # If it was loaded, set the crushmap flag to avoid loading it twice.
        default_ceph_tier_name = constants.SB_TIER_DEFAULT_NAMES[
            constants.SB_TIER_TYPE_CEPH] + constants.CEPH_CRUSH_TIER_SUFFIX
        rule_is_present, __, __ = _operator._crush_rule_status(
            default_ceph_tier_name)
        if rule_is_present:
            _create_crushmap_flag_file()
            return False

        try:
            # For AIO system, crushmap should alreadby be loaded through
            # puppet. If for any reason it is not, as a precaution we set
            # the crushmap here.

            # Check if a backup crushmap exists. If it does, that means
            # it is during restore. We need to restore the backup crushmap
            # instead of generating it. For non-AIO system, it is stored in
            # /opt/platform/sysinv which is a drbd fs. For AIO systems because
            # when unlocking controller-0 for the first time, the crushmap is
            # set thru ceph puppet when /opt/platform is not mounted yet, we
            # store the crushmap in /etc/sysinv.

            if cutils.is_aio_system(dbapi):
                backup = os.path.join(
                    constants.CEPH_CRUSH_MAP_BACKUP_DIR_FOR_AIO,
                    constants.CEPH_CRUSH_MAP_BACKUP)
            else:
                backup = os.path.join(constants.SYSINV_CONFIG_PATH,
                                      constants.CEPH_CRUSH_MAP_BACKUP)
            crushmap_bin = "/etc/sysinv/crushmap.bin"
            if os.path.exists(backup):
                shutil.copyfile(backup, crushmap_bin)
            else:
                stor_model = get_ceph_storage_model(dbapi)
                if stor_model == constants.CEPH_AIO_SX_MODEL:
                    crushmap_txt = "/etc/sysinv/crushmap-aio-sx.txt"
                elif stor_model == constants.CEPH_CONTROLLER_MODEL:
                    crushmap_txt = "/etc/sysinv/crushmap-controller-model.txt"
                elif stor_model == constants.CEPH_STORAGE_MODEL:
                    crushmap_txt = "/etc/sysinv/crushmap-storage-model.txt"
                else:
                    reason = "Error: Undefined ceph storage model %s" % stor_model
                    raise exception.CephCrushMapNotApplied(reason=reason)
                LOG.info("Updating crushmap with: %s" % crushmap_txt)

                # Compile crushmap
                subprocess.check_output("crushtool -c %s "
                                        "-o %s" % (crushmap_txt, crushmap_bin),
                                        stderr=subprocess.STDOUT,
                                        shell=True)
            # Set crushmap
            subprocess.check_output("ceph osd setcrushmap -i %s" %
                                    crushmap_bin,
                                    stderr=subprocess.STDOUT,
                                    shell=True)

            if os.path.exists(backup):
                os.remove(backup)
        except (IOError, subprocess.CalledProcessError) as e:
            # May not be critical, depends on where this is called.
            reason = "Error: %s Output: %s" % (str(e), e.output)
            raise exception.CephCrushMapNotApplied(reason=reason)

        _create_crushmap_flag_file()

        return True
    return False
Exemple #4
0
    def _crushmap_rule_add(self, tier, replicate_by):
        """Add a tier crushmap rule."""

        crushmap_flag_file = os.path.join(constants.SYSINV_CONFIG_PATH,
                                          constants.CEPH_CRUSH_MAP_APPLIED)
        if not os.path.isfile(crushmap_flag_file):
            reason = "Cannot add any additional rules."
            raise exception.CephCrushMapNotApplied(reason=reason)

        default_root_name = self._format_root_name(self._default_tier)
        root_name = self._format_root_name(tier)
        if root_name == default_root_name:
            raise exception.CephCrushRuleAlreadyExists(tier=tier,
                                                       rule='default')

        # get the current rule count
        rule_is_present, rule_name, rule_count = self._crush_rule_status(
            root_name)
        if rule_is_present:
            raise exception.CephCrushRuleAlreadyExists(tier=tier,
                                                       rule=rule_name)

        # NOTE: The Ceph API only supports simple single step rule creation.
        # Because of this we need to update the crushmap the hard way.

        tmp_crushmap_bin_file = os.path.join(constants.SYSINV_CONFIG_PATH,
                                             "crushmap_rule_update.bin")
        tmp_crushmap_txt_file = os.path.join(constants.SYSINV_CONFIG_PATH,
                                             "crushmap_rule_update.txt")

        # Extract the crushmap
        cmd = ["ceph", "osd", "getcrushmap", "-o", tmp_crushmap_bin_file]
        stdout, __ = cutils.execute(*cmd, run_as_root=False)

        if os.path.exists(tmp_crushmap_bin_file):
            # Decompile the crushmap
            cmd = [
                "crushtool", "-d", tmp_crushmap_bin_file, "-o",
                tmp_crushmap_txt_file
            ]
            stdout, __ = cutils.execute(*cmd, run_as_root=False)

            if os.path.exists(tmp_crushmap_txt_file):
                # Add the custom rule
                with open(tmp_crushmap_txt_file, 'r') as fp:
                    contents = fp.readlines()

                self._insert_crush_rule(contents, root_name, rule_name,
                                        rule_count, replicate_by)

                with open(tmp_crushmap_txt_file, 'w') as fp:
                    contents = "".join(contents)
                    fp.write(contents)

                # Compile the crush map
                cmd = [
                    "crushtool", "-c", tmp_crushmap_txt_file, "-o",
                    tmp_crushmap_bin_file
                ]
                stdout, __ = cutils.execute(*cmd, run_as_root=False)

                # Load the new crushmap
                LOG.info("Loading updated crushmap with elements for "
                         "crushmap root: %s" % root_name)
                cmd = [
                    "ceph", "osd", "setcrushmap", "-i", tmp_crushmap_bin_file
                ]
                stdout, __ = cutils.execute(*cmd, run_as_root=False)

        # cleanup
        if os.path.exists(tmp_crushmap_txt_file):
            os.remove(tmp_crushmap_txt_file)
        if os.path.exists(tmp_crushmap_bin_file):
            os.remove(tmp_crushmap_bin_file)
Exemple #5
0
def fix_crushmap(dbapi=None):
    """ Set Ceph's CRUSH Map based on storage model """
    def _create_crushmap_flag_file():
        try:
            open(crushmap_flag_file, "w").close()
        except IOError as e:
            LOG.warn(('Failed to create flag file: {}. '
                      'Reason: {}').format(crushmap_flag_file, e))

    if not dbapi:
        dbapi = pecan.request.dbapi
    crushmap_flag_file = os.path.join(constants.SYSINV_CONFIG_PATH,
                                      constants.CEPH_CRUSH_MAP_APPLIED)

    if not os.path.isfile(crushmap_flag_file):
        _operator = CephApiOperator()
        if not is_aio_system(dbapi):
            # At least two monitors have to be running on a standard deployment,
            # otherwise don't even try to load the crushmap.
            active_mons, required_mons, __ = _operator.get_monitors_status(
                dbapi)
            if required_mons > active_mons:
                LOG.info("Not enough monitors yet available to fix crushmap.")
                return False

        # Crushmap may be already loaded thorough puppet, avoid doing it twice.
        default_ceph_tier_name = constants.SB_TIER_DEFAULT_NAMES[
            constants.SB_TIER_TYPE_CEPH] + constants.CEPH_CRUSH_TIER_SUFFIX
        rule_is_present, __, __ = _operator._crush_rule_status(
            default_ceph_tier_name)
        if rule_is_present:
            _create_crushmap_flag_file()
            return False

        stor_model = get_ceph_storage_model(dbapi)
        if stor_model == constants.CEPH_AIO_SX_MODEL:
            crushmap_txt = "/etc/sysinv/crushmap-aio-sx.txt"
        elif stor_model == constants.CEPH_CONTROLLER_MODEL:
            crushmap_txt = "/etc/sysinv/crushmap-controller-model.txt"
        else:
            crushmap_txt = "/etc/sysinv/crushmap-storage-model.txt"
        LOG.info("Updating crushmap with: %s" % crushmap_txt)

        try:
            # Compile crushmap
            crushmap_bin = "/etc/sysinv/crushmap.bin"
            subprocess.check_output("crushtool -c %s "
                                    "-o %s" % (crushmap_txt, crushmap_bin),
                                    stderr=subprocess.STDOUT,
                                    shell=True)
            # Set crushmap
            subprocess.check_output("ceph osd setcrushmap -i %s" %
                                    crushmap_bin,
                                    stderr=subprocess.STDOUT,
                                    shell=True)
        except subprocess.CalledProcessError as e:
            # May not be critical, depends on where this is called.
            reason = "Error: %s Output: %s" % (str(e), e.output)
            raise exception.CephCrushMapNotApplied(reason=reason)
        _create_crushmap_flag_file()

        return True
    return False