def get_secure_system_config(self): dbpass = self._get_database_password(self.IDENTITY_SERVICE_NAME) kspass = self._get_service_password(self.SERVICE_NAME) config = { 'dcdbsync::database_connection': self._format_database_connection( self.IDENTITY_SERVICE_NAME, database=self.IDENTITY_SERVICE_DB), 'dcdbsync::db::postgresql::password': dbpass, 'dcdbsync::keystone::auth::password': kspass, 'dcdbsync::api::keystone_password': kspass, } if utils.is_openstack_applied(self.dbapi): helm_data = helm.HelmOperatorData(self.dbapi) # The dcdbsync instance for openstack is authenticated with # pod based keystone. endpoints_data = helm_data.get_dcdbsync_endpoint_data() db_data = helm_data.get_keystone_oslo_db_data() service_auth_config = { 'dcdbsync::openstack_api::keystone_password': endpoints_data['keystone_password'], 'dcdbsync::openstack_init::database_connection': db_data['connection'], } config.update(service_auth_config) return config
def get_system_config(self): ksuser = self._get_service_user_name(self.SERVICE_NAME) config = { # The region in which the identity server can be found 'dcdbsync::region_name': self._keystone_region_name(), 'dcdbsync::keystone::auth::public_url': self.get_public_url(), 'dcdbsync::keystone::auth::internal_url': self.get_internal_url(), 'dcdbsync::keystone::auth::admin_url': self.get_admin_url(), 'dcdbsync::keystone::auth::region': self._region_name(), 'dcdbsync::keystone::auth::auth_name': ksuser, 'dcdbsync::keystone::auth::auth_domain': self._get_service_user_domain_name(), 'dcdbsync::keystone::auth::service_name': self.SERVICE_NAME, 'dcdbsync::keystone::auth::tenant': self._get_service_tenant_name(), 'dcdbsync::api::bind_host': self._get_management_address(), 'dcdbsync::api::keystone_auth_uri': self._keystone_auth_uri(), 'dcdbsync::api::keystone_identity_uri': self._keystone_identity_uri(), 'dcdbsync::api::keystone_tenant': self._get_service_project_name(), 'dcdbsync::api::keystone_user_domain': self._get_service_user_domain_name(), 'dcdbsync::api::keystone_project_domain': self._get_service_project_domain_name(), 'dcdbsync::api::keystone_user': ksuser, 'platform::dcdbsync::params::region_name': self.get_region_name(), 'platform::dcdbsync::params::service_create': self._to_create_services(), } if utils.is_openstack_applied(self.dbapi): helm_data = helm.HelmOperatorData(self.dbapi) # The dcdbsync instance for openstack is authenticated with # pod based keystone. endpoints_data = helm_data.get_keystone_endpoint_data() service_config = { 'dcdbsync::openstack_init::region_name': endpoints_data['region_name'], 'dcdbsync::openstack_api::keystone_auth_uri': endpoints_data['endpoint_override'], 'dcdbsync::openstack_api::keystone_identity_uri': endpoints_data['endpoint_override'], } config.update(service_config) return config
def get_secure_system_config(self): dbpass = self._get_database_password(self.SERVICE_NAME) kspass = self._get_service_password(self.SERVICE_NAME) admin_password = self._get_keyring_password(self.ADMIN_SERVICE, self.ADMIN_USER) dm_kspass = self._operator.dcmanager.get_ks_user_password() config = { 'dcorch::database_connection': self._format_database_connection(self.SERVICE_NAME), 'dcorch::db::postgresql::password': dbpass, 'dcorch::keystone::auth::password': kspass, 'dcorch::api_proxy::keystone_password': kspass, 'dcorch::api_proxy::keystone_admin_password': admin_password, 'dcorch::api_proxy::dcmanager_keystone_password': dm_kspass, } if utils.is_openstack_applied(self.dbapi): helm_data = helm.HelmOperatorData(self.dbapi) auth_data = helm_data.get_keystone_auth_data() app_auth_config = { 'dcorch::stx_openstack::' 'keystone_admin_password': auth_data['admin_password'], } config.update(app_auth_config) return config
def get_system_config(self): admin_username = self.get_admin_user_name() admin_project = self.get_admin_project_name() config = { 'keystone::public_bind_host': self._get_management_address(), 'keystone::admin_bind_host': self._get_management_address(), 'keystone::endpoint::public_url': self.get_public_url(), 'keystone::endpoint::internal_url': self.get_internal_url(), 'keystone::endpoint::admin_url': self.get_admin_url(), 'keystone::endpoint::region': self._region_name(), 'keystone::roles::admin::admin': admin_username, 'platform::client::params::admin_username': admin_username, 'platform::client::params::admin_project_name': admin_project, 'platform::client::params::admin_user_domain': self.get_admin_user_domain(), 'platform::client::params::admin_project_domain': self.get_admin_project_domain(), 'platform::client::params::identity_region': self._region_name(), 'platform::client::params::identity_auth_url': self.get_auth_url(), 'platform::client::params::keystone_identity_region': self._identity_specific_region_name(), 'platform::client::params::auth_region': self._identity_specific_region_name(), 'openstack::keystone::params::api_version': self.SERVICE_PATH, 'openstack::keystone::params::identity_uri': self.get_identity_uri(), 'openstack::keystone::params::auth_uri': self.get_auth_uri(), 'openstack::keystone::params::host_url': self._format_url_address(self._get_management_address()), # The region in which the identity server can be found # and it could be different than the region where the # system resides 'openstack::keystone::params::region_name': self._identity_specific_region_name(), 'openstack::keystone::params::system_controller_region': constants.SYSTEM_CONTROLLER_REGION, 'openstack::keystone::params::service_create': self._to_create_services(), 'CONFIG_KEYSTONE_ADMIN_USERNAME': self.get_admin_user_name(), } if utils.is_openstack_applied(self.dbapi): config['openstack::keystone::params::openstack_auth_uri'] = \ self.get_openstack_auth_uri() config.update(self._get_service_parameter_config()) config.update(self._get_password_rule()) return config
def _get_system_config(self): system = self._get_system() application_applied = utils.is_openstack_applied(self.dbapi) return { 'platform::params::controller_upgrade': False, 'platform::params::config_path': tsconfig.CONFIG_PATH, 'platform::params::security_profile': system.security_profile, 'platform::params::security_feature': system.security_feature, 'platform::config::params::timezone': system.timezone, 'platform::params::vswitch_type': self._vswitch_type(), 'platform::params::stx_openstack_applied': application_applied, }
def get_system_config(self): system = self._get_system() if system.system_mode == constants.SYSTEM_MODE_SIMPLEX: single_hypervisor = True single_controller = True else: single_hypervisor = False single_controller = False config = { 'nfv::keystone::auth::public_url': self.get_public_url(), 'nfv::keystone::auth::internal_url': self.get_internal_url(), 'nfv::keystone::auth::admin_url': self.get_admin_url(), 'nfv::keystone::auth::auth_name': self._get_service_user_name(self.SERVICE_NAME), 'nfv::keystone::auth::region': self._get_service_region_name(self.SERVICE_NAME), 'nfv::keystone::auth::tenant': self._get_service_tenant_name(), 'nfv::nfvi::host_listener_host': self._get_management_address(), 'nfv::nfvi::platform_username': self._operator.keystone.get_admin_user_name(), 'nfv::nfvi::platform_tenant': self._operator.keystone.get_admin_project_name(), 'nfv::nfvi::platform_auth_host': self._keystone_auth_address(), 'nfv::nfvi::platform_user_domain': self._operator.keystone.get_admin_user_domain(), 'nfv::nfvi::platform_project_domain': self._operator.keystone.get_admin_project_domain(), 'nfv::nfvi::platform_keyring_service': self.PLATFORM_KEYRING_SERVICE, 'nfv::nfvi::keystone_region_name': self._keystone_region_name(), 'nfv::nfvi::keystone_service_name': self._operator.keystone.get_service_name(), 'nfv::nfvi::keystone_service_type': self._operator.keystone.get_service_type(), 'nfv::nfvi::sysinv_region_name': self._operator.sysinv.get_region_name(), 'nfv::nfvi::patching_region_name': self._operator.patching.get_region_name(), 'nfv::nfvi::fm_region_name': self._operator.fm.get_region_name(), 'nfv::vim::vim_api_ip': self._get_management_address(), 'nfv::vim::vim_webserver_ip': self._get_oam_address(), 'nfv::vim::instance_single_hypervisor': single_hypervisor, 'nfv::vim::sw_mgmt_single_controller': single_controller, # This flag is used to disable raising alarm to containerized fm # and will be removed in future. 'nfv::alarm::fault_management_pod_disabled': True, 'nfv::event_log::fault_management_pod_disabled': True, 'nfv::vim::fault_management_pod_disabled': True, 'platform::nfv::params::service_create': self._to_create_services(), } if utils.is_openstack_applied(self.dbapi): helm_data = helm.HelmOperatorData(self.dbapi) # The openstack services are authenticated with pod based # keystone. keystone_auth_data = helm_data.get_keystone_auth_data() openstack_auth_config = { 'nfv::nfvi::openstack_username': keystone_auth_data['admin_user_name'], 'nfv::nfvi::openstack_tenant': keystone_auth_data['admin_project_name'], 'nfv::nfvi::openstack_auth_host': keystone_auth_data['auth_host'], 'nfv::nfvi::openstack_user_domain': keystone_auth_data['admin_user_domain'], 'nfv::nfvi::openstack_project_domain': keystone_auth_data['admin_project_domain'], 'nfv::nfvi::openstack_keyring_service': self.PLATFORM_KEYRING_SERVICE, 'nfv::alarm::openstack_username': keystone_auth_data['admin_user_name'], 'nfv::alarm::openstack_tenant': keystone_auth_data['admin_project_name'], 'nfv::alarm::openstack_auth_host': keystone_auth_data['auth_host'], 'nfv::alarm::openstack_user_domain': keystone_auth_data['admin_user_domain'], 'nfv::alarm::openstack_project_domain': keystone_auth_data['admin_project_domain'], 'nfv::alarm::openstack_keyring_service': self.PLATFORM_KEYRING_SERVICE, 'nfv::event_log::openstack_username': keystone_auth_data['admin_user_name'], 'nfv::event_log::openstack_tenant': keystone_auth_data['admin_project_name'], 'nfv::event_log::openstack_auth_host': keystone_auth_data['auth_host'], 'nfv::event_log::openstack_user_domain': keystone_auth_data['admin_user_domain'], 'nfv::event_log::openstack_project_domain': keystone_auth_data['admin_project_domain'], 'nfv::event_log::openstack_keyring_service': self.PLATFORM_KEYRING_SERVICE, } config.update(openstack_auth_config) # Nova is running in a pod nova_endpoint_data = helm_data.get_nova_endpoint_data() nova_config = { 'nfv::nfvi::nova_endpoint_override': nova_endpoint_data['endpoint_override'], 'nfv::nfvi::nova_region_name': nova_endpoint_data['region_name'], } config.update(nova_config) # Cinder is running in a pod cinder_endpoint_data = helm_data.get_cinder_endpoint_data() cinder_config = { 'nfv::nfvi::cinder_region_name': cinder_endpoint_data['region_name'], 'nfv::nfvi::cinder_service_name': cinder_endpoint_data['service_name'], 'nfv::nfvi::cinder_service_type': cinder_endpoint_data['service_type'], } config.update(cinder_config) # Glance is running in a pod glance_endpoint_data = helm_data.get_glance_endpoint_data() glance_config = { 'nfv::nfvi::glance_region_name': glance_endpoint_data['region_name'], 'nfv::nfvi::glance_service_name': glance_endpoint_data['service_name'], 'nfv::nfvi::glance_service_type': glance_endpoint_data['service_type'], } config.update(glance_config) # Neutron is running in a pod neutron_endpoint_data = helm_data.get_neutron_endpoint_data() neutron_config = { 'nfv::nfvi::neutron_region_name': neutron_endpoint_data['region_name'], } config.update(neutron_config) # Heat is running in a pod heat_endpoint_data = helm_data.get_heat_endpoint_data() heat_config = { 'nfv::nfvi::heat_region_name': heat_endpoint_data['region_name'], } config.update(heat_config) # Ceilometer is running in a pod ceilometer_endpoint_data = \ helm_data.get_ceilometer_endpoint_data() ceilometer_config = { 'nfv::nfvi::ceilometer_region_name': ceilometer_endpoint_data['region_name'], } config.update(ceilometer_config) # The openstack rabbitmq is running in a pod nova_oslo_messaging_data = \ helm_data.get_nova_oslo_messaging_data() rabbit_config = { 'nfv::nfvi::rabbit_host': nova_oslo_messaging_data['host'], 'nfv::nfvi::rabbit_port': nova_oslo_messaging_data['port'], 'nfv::nfvi::rabbit_virtual_host': nova_oslo_messaging_data['virt_host'], 'nfv::nfvi::rabbit_userid': nova_oslo_messaging_data['username'], 'nfv::nfvi::rabbit_password': nova_oslo_messaging_data['password'], } config.update(rabbit_config) # Listen to nova api proxy on management address nova_api_proxy_config = { 'nfv::nfvi::compute_rest_api_host': self._get_management_address(), } config.update(nova_api_proxy_config) else: # The openstack auth info is still required as the VIM will # audit some keystone entities (e.g. tenants). Point it to # the platform keystone. openstack_auth_config = { 'nfv::nfvi::openstack_username': self._operator.keystone.get_admin_user_name(), 'nfv::nfvi::openstack_tenant': self._operator.keystone.get_admin_project_name(), 'nfv::nfvi::openstack_auth_host': self._keystone_auth_address(), 'nfv::nfvi::openstack_user_domain': self._operator.keystone.get_admin_user_domain(), 'nfv::nfvi::openstack_project_domain': self._operator.keystone.get_admin_project_domain(), 'nfv::nfvi::openstack_keyring_service': self.PLATFORM_KEYRING_SERVICE, } config.update(openstack_auth_config) vim_disabled = { # Disable VIM plugins for resources not yet active. 'nfv::vim::block_storage_plugin_disabled': True, 'nfv::vim::compute_plugin_disabled': True, 'nfv::vim::network_plugin_disabled': True, 'nfv::vim::image_plugin_disabled': True, 'nfv::vim::guest_plugin_disabled': True, 'nfv::vim::fault_mgmt_plugin_disabled': True, 'nfv::nfvi::nova_endpoint_disabled': True, 'nfv::nfvi::neutron_endpoint_disabled': True, 'nfv::nfvi::cinder_endpoint_disabled': True, 'nfv::alarm::fault_mgmt_endpoint_disabled': True, 'nfv::event_log::fault_mgmt_endpoint_disabled': True, } config.update(vim_disabled) return config
def get_system_config(self): ceph_backend = StorageBackendConfig.get_backend_conf( self.dbapi, constants.CINDER_BACKEND_CEPH) if not ceph_backend: return {} # ceph is not configured ceph_mon_ips = StorageBackendConfig.get_ceph_mon_ip_addresses( self.dbapi) if not ceph_mon_ips: return {} # system configuration is not yet ready controller_hosts = [ constants.CONTROLLER_0_HOSTNAME, constants.CONTROLLER_1_HOSTNAME ] mon_2_host = [ mon['hostname'] for mon in self.dbapi.ceph_mon_get_list() if mon['hostname'] not in controller_hosts ] if len(mon_2_host) > 1: raise exception.SysinvException( 'Too many ceph monitor hosts, expected 1, got: %s.' % mon_2_host) if mon_2_host: mon_2_host = mon_2_host[0] else: mon_2_host = None mon_0_ip = ceph_mon_ips[constants.CEPH_MON_0] mon_1_ip = ceph_mon_ips[constants.CEPH_MON_1] mon_2_ip = ceph_mon_ips.get(constants.CEPH_MON_2, None) floating_mon_ip = ceph_mon_ips[constants.CEPH_FLOATING_MON] mon_0_addr = self._format_ceph_mon_address(mon_0_ip) mon_1_addr = self._format_ceph_mon_address(mon_1_ip) if mon_2_ip: mon_2_addr = self._format_ceph_mon_address(mon_2_ip) else: mon_2_addr = None floating_mon_addr = self._format_ceph_mon_address(floating_mon_ip) # ceph can not bind to multiple address families, so only enable IPv6 # if the monitors are IPv6 addresses ms_bind_ipv6 = ( netaddr.IPAddress(mon_0_ip).version == constants.IPV6_FAMILY) skip_osds_during_restore = \ (utils.is_std_system(self.dbapi) and ceph_backend.task == constants.SB_TASK_RESTORE) is_sx_to_dx_migration = self._get_system_capability( 'simplex_to_duplex_migration') config = { 'ceph::ms_bind_ipv6': ms_bind_ipv6, 'platform::ceph::params::service_enabled': True, 'platform::ceph::params::floating_mon_host': constants.CONTROLLER_HOSTNAME, 'platform::ceph::params::mon_0_host': constants.CONTROLLER_0_HOSTNAME, 'platform::ceph::params::mon_1_host': constants.CONTROLLER_1_HOSTNAME, 'platform::ceph::params::mon_2_host': mon_2_host, 'platform::ceph::params::floating_mon_ip': floating_mon_ip, 'platform::ceph::params::mon_0_ip': mon_0_ip, 'platform::ceph::params::mon_1_ip': mon_1_ip, 'platform::ceph::params::mon_2_ip': mon_2_ip, 'platform::ceph::params::floating_mon_addr': floating_mon_addr, 'platform::ceph::params::mon_0_addr': mon_0_addr, 'platform::ceph::params::mon_1_addr': mon_1_addr, 'platform::ceph::params::mon_2_addr': mon_2_addr, 'platform::ceph::params::rgw_enabled': self._is_radosgw_enabled(), 'platform::ceph::rgw::keystone::swift_endpts_enabled': False, 'platform::ceph::rgw::keystone::rgw_admin_user': self._get_service_user_name(self.SERVICE_NAME_RGW), 'platform::ceph::rgw::keystone::rgw_admin_password': self._get_service_password(self.SERVICE_NAME_RGW), 'platform::ceph::rgw::keystone::rgw_admin_domain': self._get_service_user_domain_name(), 'platform::ceph::rgw::keystone::rgw_admin_project': self._get_service_tenant_name(), 'platform::ceph::params::skip_osds_during_restore': skip_osds_during_restore, 'platform::ceph::params::simplex_to_duplex_migration': bool(is_sx_to_dx_migration), } if is_sx_to_dx_migration: cephfs_filesystems = self._get_cephfs_filesystems() if cephfs_filesystems: config[ 'platform::ceph::params::cephfs_filesystems'] = cephfs_filesystems if (utils.is_openstack_applied(self.dbapi) and utils.is_chart_enabled( self.dbapi, constants.HELM_APP_OPENSTACK, self.HELM_CHART_SWIFT, common.HELM_NS_OPENSTACK)): app = self.dbapi.kube_app_get(constants.HELM_APP_OPENSTACK) override = self.dbapi.helm_override_get(app.id, self.SERVICE_NAME_RGW, common.HELM_NS_OPENSTACK) password = override.system_overrides.get(self.SERVICE_NAME_RGW, None) if password: swift_auth_password = password.encode('utf8', 'strict') config.update({ 'platform::ceph::rgw::keystone::swift_endpts_enabled': True }) config.pop('platform::ceph::rgw::keystone::rgw_admin_user') config.update({ 'platform::ceph::rgw::keystone::rgw_admin_password': swift_auth_password }) config.update({ 'platform::ceph::rgw::keystone::rgw_admin_domain': self.RADOSGW_SERVICE_DOMAIN_NAME }) config.update({ 'platform::ceph::rgw::keystone::rgw_admin_project': self.RADOSGW_SERVICE_PROJECT_NAME }) else: raise exception.SysinvException( "Unable to retreive containerized swift auth password") return config
def get_system_config(self): ksuser = self._get_service_user_name(self.SERVICE_NAME) dm_ksuser = self._operator.dcmanager.get_ks_user_name() config = { # The region in which the identity server can be found 'dcorch::region_name': self._keystone_region_name(), 'dcorch::keystone::auth::neutron_proxy_internal_url': self.get_proxy_internal_url(self.NETWORKING_SERVICE_PORT, self.NETWORKING_SERVICE_PATH), 'dcorch::keystone::auth::nova_proxy_internal_url': self.get_proxy_internal_url(self.COMPUTE_SERVICE_PORT, self.COMPUTE_SERVICE_PATH), 'dcorch::keystone::auth::sysinv_proxy_internal_url': self.get_proxy_internal_url(self.PLATFORM_SERVICE_PORT, self.PLATFORM_SERVICE_PATH), 'dcorch::keystone::auth::cinder_proxy_internal_url_v2': self.get_proxy_internal_url(self.CINDER_SERVICE_PORT, self.CINDER_SERVICE_PATH_V2), 'dcorch::keystone::auth::cinder_proxy_internal_url_v3': self.get_proxy_internal_url(self.CINDER_SERVICE_PORT, self.CINDER_SERVICE_PATH_V3), 'dcorch::keystone::auth::patching_proxy_internal_url': self.get_proxy_internal_url(self.PATCHING_SERVICE_PORT, self.PATCHING_SERVICE_PATH), 'dcorch::keystone::auth::identity_proxy_internal_url': self.get_proxy_internal_url(self.IDENTITY_SERVICE_PORT, self.IDENTITY_SERVICE_PATH), 'dcorch::keystone::auth::neutron_proxy_public_url': self.get_proxy_public_url(self.NETWORKING_SERVICE_PORT, self.NETWORKING_SERVICE_PATH), 'dcorch::keystone::auth::nova_proxy_public_url': self.get_proxy_public_url(self.COMPUTE_SERVICE_PORT, self.COMPUTE_SERVICE_PATH), 'dcorch::keystone::auth::sysinv_proxy_public_url': self.get_proxy_public_url(self.PLATFORM_SERVICE_PORT, self.PLATFORM_SERVICE_PATH), 'dcorch::keystone::auth::cinder_proxy_public_url_v2': self.get_proxy_public_url(self.CINDER_SERVICE_PORT, self.CINDER_SERVICE_PATH_V2), 'dcorch::keystone::auth::cinder_proxy_public_url_v3': self.get_proxy_public_url(self.CINDER_SERVICE_PORT, self.CINDER_SERVICE_PATH_V3), 'dcorch::keystone::auth::patching_proxy_public_url': self.get_proxy_public_url(self.PATCHING_SERVICE_PORT, self.PATCHING_SERVICE_PATH), 'dcorch::keystone::auth::identity_proxy_public_url': self.get_proxy_public_url(self.IDENTITY_SERVICE_PORT, self.IDENTITY_SERVICE_PATH), 'dcorch::keystone::auth::sysinv_proxy_admin_url': self.get_proxy_admin_url(self.PLATFORM_SERVICE_PORT, self.PLATFORM_SERVICE_PATH), 'dcorch::keystone::auth::identity_proxy_admin_url': self.get_proxy_admin_url(self.IDENTITY_SERVICE_PORT, self.IDENTITY_SERVICE_PATH), 'dcorch::keystone::auth::patching_proxy_admin_url': self.get_proxy_admin_url(self.PATCHING_SERVICE_PORT, self.PATCHING_SERVICE_PATH), 'dcorch::keystone::auth::region': self.get_region_name(), 'dcorch::keystone::auth::auth_name': ksuser, 'dcorch::keystone::auth::service_name': self.SERVICE_NAME, 'dcorch::keystone::auth::tenant': self._get_service_tenant_name(), 'dcorch::api_proxy::bind_host': self._get_management_address(), 'dcorch::api_proxy::keystone_auth_uri': self._keystone_auth_uri(), 'dcorch::api_proxy::keystone_identity_uri': self._keystone_identity_uri(), 'dcorch::api_proxy::keystone_tenant': self._get_service_project_name(), 'dcorch::api_proxy::keystone_user_domain': self._get_service_user_domain_name(), 'dcorch::api_proxy::keystone_project_domain': self._get_service_project_domain_name(), 'dcorch::api_proxy::keystone_user': ksuser, 'dcorch::api_proxy::dcmanager_keystone_user': dm_ksuser, 'dcorch::api_proxy::keystone_admin_user': self.ADMIN_USER, 'dcorch::api_proxy::keystone_admin_tenant': self.ADMIN_TENANT, 'openstack::dcorch::params::region_name': self.get_region_name(), 'platform::dcorch::params::service_create': self._to_create_services(), } if utils.is_openstack_applied(self.dbapi): helm_data = helm.HelmOperatorData(self.dbapi) endpoints_data = helm_data.get_keystone_endpoint_data() auth_data = helm_data.get_keystone_auth_data() app_config = { 'dcorch::stx_openstack::' 'keystone_identity_uri': endpoints_data['endpoint_override'], 'dcorch::stx_openstack::' 'keystone_admin_user': auth_data['admin_user_name'], 'dcorch::stx_openstack::' 'keystone_admin_tenant': auth_data['admin_project_name'], } config.update(app_config) return config
def get_system_config(self): config = {} if utils.is_openstack_applied(self.dbapi): helm_data = helm.HelmOperatorData(self.dbapi) # The openstack services are authenticated with pod based # keystone. keystone_auth_data = helm_data.get_keystone_auth_data() openstack_auth_config = { 'platform::pciirqaffinity::params::openstack_enabled': True, 'platform::pciirqaffinity::params::openstack_username': keystone_auth_data['admin_user_name'], 'platform::pciirqaffinity::params::openstack_tenant': keystone_auth_data['admin_project_name'], 'platform::pciirqaffinity::params::openstack_auth_host': keystone_auth_data['auth_host'], 'platform::pciirqaffinity::params::openstack_user_domain': keystone_auth_data['admin_user_domain'], 'platform::pciirqaffinity::params::openstack_project_domain': keystone_auth_data['admin_project_domain'], 'platform::pciirqaffinity::params::openstack_keyring_service': self.PLATFORM_KEYRING_SERVICE, } config.update(openstack_auth_config) # The openstack rabbitmq is running in a pod nova_oslo_messaging_data = \ helm_data.get_nova_oslo_messaging_data() rabbit_config = { 'platform::pciirqaffinity::params::rabbit_host': nova_oslo_messaging_data['host'], 'platform::pciirqaffinity::params::rabbit_port': nova_oslo_messaging_data['port'], 'platform::pciirqaffinity::params::rabbit_virtual_host': nova_oslo_messaging_data['virt_host'], 'platform::pciirqaffinity::params::rabbit_userid': nova_oslo_messaging_data['username'], 'platform::pciirqaffinity::params::rabbit_password': nova_oslo_messaging_data['password'], } config.update(rabbit_config) else: configs = { 'platform::pciirqaffinity::params::openstack_enabled': False, 'platform::pciirqaffinity::params::openstack_username': '******', 'platform::pciirqaffinity::params::openstack_tenant': 'undef', 'platform::pciirqaffinity::params::openstack_auth_host': 'undef', 'platform::pciirqaffinity::params::openstack_user_domain': 'undef', 'platform::pciirqaffinity::params::openstack_project_domain': 'undef', 'platform::pciirqaffinity::params::openstack_keyring_service': 'undef', 'platform::pciirqaffinity::params::rabbit_host': 'undef', 'platform::pciirqaffinity::params::rabbit_port': 'undef', 'platform::pciirqaffinity::params::rabbit_virtual_host': 'undef', 'platform::pciirqaffinity::params::rabbit_userid': 'undef', 'platform::pciirqaffinity::params::rabbit_password': '******', } config.update(configs) return config