示例#1
0
    def _is_enabled(self, app_name, chart_name, namespace):
        """
        Check if the chart is enable at an application level

        :param app_name: Application name
        :param chart_name: Chart supplied with the application
        :param namespace: Namespace where the chart will be executed

        Returns true by default if an exception occurs as most charts are
        enabled.
        """
        return utils.is_chart_enabled(self.dbapi, app_name, chart_name,
                                      namespace)
示例#2
0
文件: ceph.py 项目: starlingx/config
    def get_system_config(self):
        ceph_backend = StorageBackendConfig.get_backend_conf(
            self.dbapi, constants.CINDER_BACKEND_CEPH)
        if not ceph_backend:
            return {}  # ceph is not configured

        ceph_mon_ips = StorageBackendConfig.get_ceph_mon_ip_addresses(
            self.dbapi)

        if not ceph_mon_ips:
            return {}  # system configuration is not yet ready

        controller_hosts = [
            constants.CONTROLLER_0_HOSTNAME, constants.CONTROLLER_1_HOSTNAME
        ]
        mon_2_host = [
            mon['hostname'] for mon in self.dbapi.ceph_mon_get_list()
            if mon['hostname'] not in controller_hosts
        ]
        if len(mon_2_host) > 1:
            raise exception.SysinvException(
                'Too many ceph monitor hosts, expected 1, got: %s.' %
                mon_2_host)
        if mon_2_host:
            mon_2_host = mon_2_host[0]
        else:
            mon_2_host = None

        mon_0_ip = ceph_mon_ips[constants.CEPH_MON_0]
        mon_1_ip = ceph_mon_ips[constants.CEPH_MON_1]
        mon_2_ip = ceph_mon_ips.get(constants.CEPH_MON_2, None)
        floating_mon_ip = ceph_mon_ips[constants.CEPH_FLOATING_MON]

        mon_0_addr = self._format_ceph_mon_address(mon_0_ip)
        mon_1_addr = self._format_ceph_mon_address(mon_1_ip)
        if mon_2_ip:
            mon_2_addr = self._format_ceph_mon_address(mon_2_ip)
        else:
            mon_2_addr = None
        floating_mon_addr = self._format_ceph_mon_address(floating_mon_ip)

        # ceph can not bind to multiple address families, so only enable IPv6
        # if the monitors are IPv6 addresses
        ms_bind_ipv6 = (
            netaddr.IPAddress(mon_0_ip).version == constants.IPV6_FAMILY)

        skip_osds_during_restore = \
            (utils.is_std_system(self.dbapi) and
            ceph_backend.task == constants.SB_TASK_RESTORE)

        is_sx_to_dx_migration = self._get_system_capability(
            'simplex_to_duplex_migration')

        config = {
            'ceph::ms_bind_ipv6':
            ms_bind_ipv6,
            'platform::ceph::params::service_enabled':
            True,
            'platform::ceph::params::floating_mon_host':
            constants.CONTROLLER_HOSTNAME,
            'platform::ceph::params::mon_0_host':
            constants.CONTROLLER_0_HOSTNAME,
            'platform::ceph::params::mon_1_host':
            constants.CONTROLLER_1_HOSTNAME,
            'platform::ceph::params::mon_2_host':
            mon_2_host,
            'platform::ceph::params::floating_mon_ip':
            floating_mon_ip,
            'platform::ceph::params::mon_0_ip':
            mon_0_ip,
            'platform::ceph::params::mon_1_ip':
            mon_1_ip,
            'platform::ceph::params::mon_2_ip':
            mon_2_ip,
            'platform::ceph::params::floating_mon_addr':
            floating_mon_addr,
            'platform::ceph::params::mon_0_addr':
            mon_0_addr,
            'platform::ceph::params::mon_1_addr':
            mon_1_addr,
            'platform::ceph::params::mon_2_addr':
            mon_2_addr,
            'platform::ceph::params::rgw_enabled':
            self._is_radosgw_enabled(),
            'platform::ceph::rgw::keystone::swift_endpts_enabled':
            False,
            'platform::ceph::rgw::keystone::rgw_admin_user':
            self._get_service_user_name(self.SERVICE_NAME_RGW),
            'platform::ceph::rgw::keystone::rgw_admin_password':
            self._get_service_password(self.SERVICE_NAME_RGW),
            'platform::ceph::rgw::keystone::rgw_admin_domain':
            self._get_service_user_domain_name(),
            'platform::ceph::rgw::keystone::rgw_admin_project':
            self._get_service_tenant_name(),
            'platform::ceph::params::skip_osds_during_restore':
            skip_osds_during_restore,
            'platform::ceph::params::simplex_to_duplex_migration':
            bool(is_sx_to_dx_migration),
        }

        if is_sx_to_dx_migration:
            cephfs_filesystems = self._get_cephfs_filesystems()
            if cephfs_filesystems:
                config[
                    'platform::ceph::params::cephfs_filesystems'] = cephfs_filesystems

        if (utils.is_openstack_applied(self.dbapi) and utils.is_chart_enabled(
                self.dbapi, constants.HELM_APP_OPENSTACK,
                self.HELM_CHART_SWIFT, common.HELM_NS_OPENSTACK)):
            app = self.dbapi.kube_app_get(constants.HELM_APP_OPENSTACK)
            override = self.dbapi.helm_override_get(app.id,
                                                    self.SERVICE_NAME_RGW,
                                                    common.HELM_NS_OPENSTACK)
            password = override.system_overrides.get(self.SERVICE_NAME_RGW,
                                                     None)
            if password:
                swift_auth_password = password.encode('utf8', 'strict')
                config.update({
                    'platform::ceph::rgw::keystone::swift_endpts_enabled':
                    True
                })
                config.pop('platform::ceph::rgw::keystone::rgw_admin_user')
                config.update({
                    'platform::ceph::rgw::keystone::rgw_admin_password':
                    swift_auth_password
                })
                config.update({
                    'platform::ceph::rgw::keystone::rgw_admin_domain':
                    self.RADOSGW_SERVICE_DOMAIN_NAME
                })
                config.update({
                    'platform::ceph::rgw::keystone::rgw_admin_project':
                    self.RADOSGW_SERVICE_PROJECT_NAME
                })
            else:
                raise exception.SysinvException(
                    "Unable to retreive containerized swift auth password")

        return config
示例#3
0
    def _check_monitor_labels(hosts):

        logstash_active = cutils.is_chart_enabled(
            pecan.request.dbapi, constants.HELM_APP_MONITOR,
            helm_common.HELM_CHART_LOGSTASH, helm_common.HELM_NS_MONITOR)

        elasticsearch_client_active = cutils.is_chart_enabled(
            pecan.request.dbapi, constants.HELM_APP_MONITOR,
            helm_common.HELM_CHART_ELASTICSEARCH_CLIENT,
            helm_common.HELM_NS_MONITOR)

        elasticsearch_data_active = cutils.is_chart_enabled(
            pecan.request.dbapi, constants.HELM_APP_MONITOR,
            helm_common.HELM_CHART_ELASTICSEARCH_DATA,
            helm_common.HELM_NS_MONITOR)

        elasticsearch_master_active = cutils.is_chart_enabled(
            pecan.request.dbapi, constants.HELM_APP_MONITOR,
            helm_common.HELM_CHART_ELASTICSEARCH_MASTER,
            helm_common.HELM_NS_MONITOR)

        elasticsearch_active = (elasticsearch_client_active
                                and elasticsearch_data_active
                                and elasticsearch_master_active)

        # elasticsearch charts must either all be active or
        # all inactive
        if (not elasticsearch_active
                and (elasticsearch_client_active or elasticsearch_data_active
                     or elasticsearch_master_active)):
            raise wsme.exc.ClientSideError(
                _("Operation rejected: application stx-monitor "
                  "requires charts: elasticsearch-master, "
                  "elasticsearch-client and elasticsearch-data either all "
                  "enabled, or all disabled"))

        curator_active = cutils.is_chart_enabled(
            pecan.request.dbapi, constants.HELM_APP_MONITOR,
            helm_common.HELM_CHART_ELASTICSEARCH_CURATOR,
            helm_common.HELM_NS_MONITOR)

        if (not elasticsearch_active) and curator_active:
            raise wsme.exc.ClientSideError(
                _("Operation rejected: application stx-monitor "
                  "does not allow elasticsearch-curator chart enabled "
                  "without the elasticsearch charts also enabled"))

        if not elasticsearch_active and not logstash_active:
            # Nothing to check, exit
            return

        # The required counts of labelled
        # and unlocked-enabled hosts.
        required_label_counts = dict()

        # The counts of labelled hosts.
        label_counts = dict()

        # The counts of labelled hosts
        # that are also unlocked and enabled.
        good_label_counts = dict()

        is_aio_simplex = cutils.is_aio_simplex_system(pecan.request.dbapi)

        if elasticsearch_active:
            label_counts = {
                helm_common.LABEL_MONITOR_MASTER: 0,
                helm_common.LABEL_MONITOR_DATA: 0,
                helm_common.LABEL_MONITOR_CLIENT: 0
            }

            good_label_counts = {
                helm_common.LABEL_MONITOR_MASTER: 0,
                helm_common.LABEL_MONITOR_DATA: 0,
                helm_common.LABEL_MONITOR_CLIENT: 0
            }

            if is_aio_simplex:
                # AIO simplex means one of every label.
                required_label_counts = {
                    helm_common.LABEL_MONITOR_MASTER: 1,
                    helm_common.LABEL_MONITOR_DATA: 1,
                    helm_common.LABEL_MONITOR_CLIENT: 1
                }
            else:
                # Dual controller configs
                required_label_counts = {
                    helm_common.LABEL_MONITOR_DATA: 2,
                    helm_common.LABEL_MONITOR_CLIENT: 2,
                    helm_common.LABEL_MONITOR_MASTER: 3
                }

                # For AIO-DX without worker nodes, we only need 2
                # hosts labelled as master.
                if (cutils.is_aio_duplex_system(pecan.request.dbapi)
                        and (pecan.request.dbapi.count_hosts_by_label(
                            helm_common.LABEL_MONITOR_MASTER) < 3)):
                    required_label_counts[helm_common.LABEL_MONITOR_MASTER] = 2

        if logstash_active:
            good_label_counts[helm_common.LABEL_MONITOR_CONTROLLER] = 0
            label_counts[helm_common.LABEL_MONITOR_CONTROLLER] = 0

            if is_aio_simplex:
                required_label_counts[helm_common.LABEL_MONITOR_CONTROLLER] = 1
            else:
                required_label_counts[helm_common.LABEL_MONITOR_CONTROLLER] = 2

        # Examine all the required labels on the given hosts
        # and build up our actual and good label counts.
        host_info = {}
        for host in hosts:
            labels = pecan.request.dbapi.label_get_by_host(host.uuid)

            host_good = (host.administrative == constants.ADMIN_UNLOCKED
                         and host.operational == constants.OPERATIONAL_ENABLED)

            host_labels_dict = {}
            for label in labels:
                if label.label_key in required_label_counts:
                    if label.label_value == helm_common.LABEL_VALUE_ENABLED:
                        label_counts[label.label_key] += 1
                        if host_good:
                            good_label_counts[label.label_key] += 1

                    host_labels_dict[label.label_key] = label.label_value

            host_info[host.hostname] = {
                "personality": host.personality,
                "labels": host_labels_dict
            }

        # If we are short of labels on unlocked and enabled hosts
        # inform the user with a detailed message.
        msg = ""
        for k, v in required_label_counts.items():
            if good_label_counts[k] < required_label_counts[k]:
                msg += (", label:%s=%s, required=%d, labelled=%d,"
                        " labelled and unlocked-enabled=%d" %
                        (k, helm_common.LABEL_VALUE_ENABLED, v,
                         label_counts[k], good_label_counts[k]))

        if msg:
            app_helper = KubeAppHelper(pecan.request.dbapi)
            msg += "\n"
            msg += app_helper._extract_missing_labels_message(
                host_info, required_label_counts)

        if msg:
            raise wsme.exc.ClientSideError(
                _("Operation rejected: application stx-monitor "
                  "does not have required unlocked-enabled and "
                  "labelled hosts{}".format(msg)))