def __call__(self): if isinstance(self.external_ports, six.string_types): self.external_ports = [self.external_ports] if not self.external_ports or not https(): return {} self.configure_ca() self.enable_modules() ctxt = { 'namespace': self.service_namespace, 'endpoints': [], 'ext_ports': [] } for cn in self.canonical_names(): self.configure_cert(cn) addresses = self.get_network_addresses() for address, endpoint in sorted(set(addresses)): for api_port in self.external_ports: ext_port = determine_apache_port(api_port) int_port = determine_api_port(api_port) portmap = (address, endpoint, int(ext_port), int(int_port)) ctxt['endpoints'].append(portmap) ctxt['ext_ports'].append(int(ext_port)) ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports']))) return ctxt
def __call__(self): ''' Extends the main charmhelpers HAProxyContext with a port mapping specific to this charm. Also used to extend nova.conf context with correct api_listening_ports ''' from nova_cc_utils import api_port ctxt = super(HAProxyContext, self).__call__() # determine which port api processes should bind to, depending # on existence of haproxy + apache frontends compute_api = determine_api_port(api_port('nova-api-os-compute'), singlenode_mode=True) ec2_api = determine_api_port(api_port('nova-api-ec2'), singlenode_mode=True) s3_api = determine_api_port(api_port('nova-objectstore'), singlenode_mode=True) neutron_api = determine_api_port(api_port('neutron-server'), singlenode_mode=True) # Apache ports a_compute_api = determine_apache_port(api_port('nova-api-os-compute'), singlenode_mode=True) a_ec2_api = determine_apache_port(api_port('nova-api-ec2'), singlenode_mode=True) a_s3_api = determine_apache_port(api_port('nova-objectstore'), singlenode_mode=True) a_neutron_api = determine_apache_port(api_port('neutron-server'), singlenode_mode=True) # to be set in nova.conf accordingly. listen_ports = { 'osapi_compute_listen_port': compute_api, 'ec2_listen_port': ec2_api, 's3_listen_port': s3_api, } port_mapping = { 'nova-api-os-compute': [ api_port('nova-api-os-compute'), a_compute_api], 'nova-api-ec2': [ api_port('nova-api-ec2'), a_ec2_api], 'nova-objectstore': [ api_port('nova-objectstore'), a_s3_api], } if not is_relation_made('neutron-api'): if neutron.network_manager() == 'neutron': port_mapping.update({ 'neutron-server': [ api_port('neutron-server'), a_neutron_api] }) # neutron.conf listening port, set separte from nova's. ctxt['neutron_bind_port'] = neutron_api # for haproxy.conf ctxt['service_ports'] = port_mapping # for nova.conf ctxt['listen_ports'] = listen_ports return ctxt
def __call__(self): from keystone_utils import (api_port, set_admin_token, endpoint_url, resolve_address, PUBLIC, ADMIN, PKI_CERTS_DIR, ensure_pki_cert_paths, get_admin_domain_id) ctxt = {} ctxt['token'] = set_admin_token(config('admin-token')) ctxt['api_version'] = int(config('preferred-api-version')) ctxt['admin_role'] = config('admin-role') if ctxt['api_version'] > 2: ctxt['admin_domain_id'] = (get_admin_domain_id() or 'admin_domain_id') ctxt['admin_port'] = determine_api_port(api_port('keystone-admin'), singlenode_mode=True) ctxt['public_port'] = determine_api_port(api_port('keystone-public'), singlenode_mode=True) ctxt['debug'] = config('debug') ctxt['verbose'] = config('verbose') ctxt['token_expiration'] = config('token-expiration') ctxt['identity_backend'] = config('identity-backend') ctxt['assignment_backend'] = config('assignment-backend') if config('identity-backend') == 'ldap': ctxt['ldap_server'] = config('ldap-server') ctxt['ldap_user'] = config('ldap-user') ctxt['ldap_password'] = config('ldap-password') ctxt['ldap_suffix'] = config('ldap-suffix') ctxt['ldap_readonly'] = config('ldap-readonly') ldap_flags = config('ldap-config-flags') if ldap_flags: flags = context.config_flags_parser(ldap_flags) ctxt['ldap_config_flags'] = flags enable_pki = config('enable-pki') if enable_pki and bool_from_string(enable_pki): log("Enabling PKI", level=DEBUG) ctxt['token_provider'] = 'pki' ensure_pki_cert_paths() certs = os.path.join(PKI_CERTS_DIR, 'certs') privates = os.path.join(PKI_CERTS_DIR, 'privates') ctxt.update({ 'certfile': os.path.join(certs, 'signing_cert.pem'), 'keyfile': os.path.join(privates, 'signing_key.pem'), 'ca_certs': os.path.join(certs, 'ca.pem'), 'ca_key': os.path.join(certs, 'ca_key.pem') }) # Base endpoint URL's which are used in keystone responses # to unauthenticated requests to redirect clients to the # correct auth URL. ctxt['public_endpoint'] = endpoint_url( resolve_address(PUBLIC), api_port('keystone-public')).replace('v2.0', '') ctxt['admin_endpoint'] = endpoint_url( resolve_address(ADMIN), api_port('keystone-admin')).replace('v2.0', '') return ctxt
def __call__(self): from keystone_utils import ( api_port, set_admin_token, endpoint_url, resolve_address, PUBLIC, ADMIN, PKI_CERTS_DIR, ensure_pki_cert_paths, get_admin_domain_id ) ctxt = {} ctxt['token'] = set_admin_token(config('admin-token')) ctxt['api_version'] = int(config('preferred-api-version')) ctxt['admin_role'] = config('admin-role') if ctxt['api_version'] > 2: ctxt['admin_domain_id'] = ( get_admin_domain_id() or 'admin_domain_id') ctxt['admin_port'] = determine_api_port(api_port('keystone-admin'), singlenode_mode=True) ctxt['public_port'] = determine_api_port(api_port('keystone-public'), singlenode_mode=True) ctxt['debug'] = config('debug') ctxt['verbose'] = config('verbose') ctxt['token_expiration'] = config('token-expiration') ctxt['identity_backend'] = config('identity-backend') ctxt['assignment_backend'] = config('assignment-backend') if config('identity-backend') == 'ldap': ctxt['ldap_server'] = config('ldap-server') ctxt['ldap_user'] = config('ldap-user') ctxt['ldap_password'] = config('ldap-password') ctxt['ldap_suffix'] = config('ldap-suffix') ctxt['ldap_readonly'] = config('ldap-readonly') ldap_flags = config('ldap-config-flags') if ldap_flags: flags = context.config_flags_parser(ldap_flags) ctxt['ldap_config_flags'] = flags enable_pki = config('enable-pki') if enable_pki and bool_from_string(enable_pki): log("Enabling PKI", level=DEBUG) ctxt['token_provider'] = 'pki' ensure_pki_cert_paths() certs = os.path.join(PKI_CERTS_DIR, 'certs') privates = os.path.join(PKI_CERTS_DIR, 'privates') ctxt.update({'certfile': os.path.join(certs, 'signing_cert.pem'), 'keyfile': os.path.join(privates, 'signing_key.pem'), 'ca_certs': os.path.join(certs, 'ca.pem'), 'ca_key': os.path.join(certs, 'ca_key.pem')}) # Base endpoint URL's which are used in keystone responses # to unauthenticated requests to redirect clients to the # correct auth URL. ctxt['public_endpoint'] = endpoint_url( resolve_address(PUBLIC), api_port('keystone-public')).replace('v2.0', '') ctxt['admin_endpoint'] = endpoint_url( resolve_address(ADMIN), api_port('keystone-admin')).replace('v2.0', '') return ctxt
def __call__(self): ''' Extends the main charmhelpers HAProxyContext with a port mapping specific to this charm. Also used to extend nova.conf context with correct api_listening_ports ''' ctxt = super(HAProxyContext, self).__call__() # determine which port api processes should bind to, depending # on existence of haproxy + apache frontends compute_api = ch_cluster.determine_api_port( common.api_port('nova-api-os-compute'), singlenode_mode=True) ec2_api = ch_cluster.determine_api_port( common.api_port('nova-api-ec2'), singlenode_mode=True) s3_api = ch_cluster.determine_api_port( common.api_port('nova-objectstore'), singlenode_mode=True) placement_api = ch_cluster.determine_api_port( common.api_port('nova-placement-api'), singlenode_mode=True) metadata_api = ch_cluster.determine_api_port( common.api_port('nova-api-metadata'), singlenode_mode=True) # Apache ports a_compute_api = ch_cluster.determine_apache_port( common.api_port('nova-api-os-compute'), singlenode_mode=True) a_ec2_api = ch_cluster.determine_apache_port( common.api_port('nova-api-ec2'), singlenode_mode=True) a_s3_api = ch_cluster.determine_apache_port( common.api_port('nova-objectstore'), singlenode_mode=True) a_placement_api = ch_cluster.determine_apache_port( common.api_port('nova-placement-api'), singlenode_mode=True) a_metadata_api = ch_cluster.determine_apache_port( common.api_port('nova-api-metadata'), singlenode_mode=True) # to be set in nova.conf accordingly. listen_ports = { 'osapi_compute_listen_port': compute_api, 'ec2_listen_port': ec2_api, 's3_listen_port': s3_api, 'placement_listen_port': placement_api, 'metadata_listen_port': metadata_api, } port_mapping = { 'nova-api-os-compute': [common.api_port('nova-api-os-compute'), a_compute_api], 'nova-api-ec2': [common.api_port('nova-api-ec2'), a_ec2_api], 'nova-objectstore': [common.api_port('nova-objectstore'), a_s3_api], 'nova-placement-api': [common.api_port('nova-placement-api'), a_placement_api], 'nova-api-metadata': [common.api_port('nova-api-metadata'), a_metadata_api], } # for haproxy.conf ctxt['service_ports'] = port_mapping # for nova.conf ctxt['listen_ports'] = listen_ports ctxt['port'] = placement_api return ctxt
def __call__(self): ''' Extends the main charmhelpers HAProxyContext with a port mapping specific to this charm. Also used to extend nova.conf context with correct api_listening_ports ''' from neutron_api_utils import api_port ctxt = super(HAProxyContext, self).__call__() # Apache ports a_neutron_api = determine_apache_port(api_port('neutron-server'), singlenode_mode=True) port_mapping = { 'neutron-server': [ api_port('neutron-server'), a_neutron_api] } ctxt['neutron_bind_port'] = determine_api_port( api_port('neutron-server'), singlenode_mode=True, ) # for haproxy.conf ctxt['service_ports'] = port_mapping return ctxt
def __call__(self): ''' Extends the main charmhelpers HAProxyContext with a port mapping specific to this charm. Also used to extend nova.conf context with correct api_listening_ports ''' from neutron_api_utils import api_port ctxt = super(HAProxyContext, self).__call__() # Apache ports a_neutron_api = determine_apache_port(api_port('neutron-server'), singlenode_mode=True) port_mapping = { 'neutron-server': [api_port('neutron-server'), a_neutron_api] } ctxt['neutron_bind_port'] = determine_api_port( api_port('neutron-server'), singlenode_mode=True, ) # for haproxy.conf ctxt['service_ports'] = port_mapping return ctxt
def test_determine_api_port_clustered_https(self, peer_units, https, is_clustered): '''It determines API port in presence of hacluster + https''' peer_units.return_value = [] is_clustered.return_value = True https.return_value = True self.assertEquals(9676, cluster_utils.determine_api_port(9696))
def __call__(self): if isinstance(self.external_ports, six.string_types): self.external_ports = [self.external_ports] if not self.external_ports or not https(): return {} self.configure_ca() self.enable_modules() ctxt = {'namespace': self.service_namespace, 'endpoints': [], 'ext_ports': []} for cn in self.canonical_names(): self.configure_cert(cn) addresses = self.get_network_addresses() for address, endpoint in sorted(set(addresses)): for api_port in self.external_ports: ext_port = determine_apache_port(api_port, singlenode_mode=True) int_port = determine_api_port(api_port, singlenode_mode=True) portmap = (address, endpoint, int(ext_port), int(int_port)) ctxt['endpoints'].append(portmap) ctxt['ext_ports'].append(int(ext_port)) ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports']))) return ctxt
def add_user(): """Add a swauth user to swift.""" if config('auth-type') == 'swauth': try_initialize_swauth() account = action_get('account') username = action_get('username') password = action_get('password') bind_port = config('bind-port') bind_port = determine_api_port(bind_port, singlenode_mode=True) success = True try: check_call([ "swauth-add-user", "-A", "http://localhost:{}/auth/".format(bind_port), "-K", leader_get('swauth-admin-key'), "-a", account, username, password ]) except CalledProcessError as e: success = False log("Has a problem adding user: {}".format(e.output)) action_fail("Adding user {} failed with: \"{}\"".format( username, str(e))) if success: message = "Successfully added the user {}".format(username) action_set({ 'add-user.result': 'Success', 'add-user.message': message, })
def add_user(): """Add a swauth user to swift.""" if config('auth-type') == 'swauth': try_initialize_swauth() account = action_get('account') username = action_get('username') password = action_get('password') bind_port = config('bind-port') bind_port = determine_api_port(bind_port, singlenode_mode=True) success = True try: check_call([ "swauth-add-user", "-A", "http://localhost:{}/auth/".format(bind_port), "-K", leader_get('swauth-admin-key'), "-a", account, username, password]) except CalledProcessError as e: success = False log("Has a problem adding user: {}".format(e.output)) action_fail( "Adding user {} failed with: \"{}\"" .format(username, str(e))) if success: message = "Successfully added the user {}".format(username) action_set({ 'add-user.result': 'Success', 'add-user.message': message, })
def endpoints(self): """List of endpoint information. Endpoint information used to configure apache Client -> endpoint -> address:ext_port -> local:int_port NOTE: endpoint map be a vi returns [ (address1, endpoint1, ext_port1, int_port1), (address2, endpoint2, ext_port2, int_port2) ... ] """ endpoints = [] for address, endpoint in sorted(set(self.network_addresses)): for api_port in self.external_ports: ext_port = ch_cluster.determine_apache_port( api_port, singlenode_mode=True) int_port = ch_cluster.determine_api_port( api_port, singlenode_mode=True) portmap = (address, endpoint, int(ext_port), int(int_port)) endpoints.append(portmap) return endpoints
def __call__(self): from neutron_api_utils import api_port ctxt = super(NeutronCCContext, self).__call__() if config('neutron-plugin') == 'nsx': ctxt['nsx_username'] = config('nsx-username') ctxt['nsx_password'] = config('nsx-password') ctxt['nsx_tz_uuid'] = config('nsx-tz-uuid') ctxt['nsx_l3_uuid'] = config('nsx-l3-uuid') if 'nsx-controllers' in config(): ctxt['nsx_controllers'] = \ ','.join(config('nsx-controllers').split()) ctxt['nsx_controllers_list'] = \ config('nsx-controllers').split() ctxt['l2_population'] = self.neutron_l2_population ctxt['overlay_network_type'] = self.neutron_overlay_network_type ctxt['external_network'] = config('neutron-external-network') ctxt['verbose'] = config('verbose') ctxt['debug'] = config('debug') ctxt['neutron_bind_port'] = \ determine_api_port(api_port('neutron-server'), singlenode_mode=True) for rid in relation_ids('neutron-api'): for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) cell_type = rdata.get('cell_type') ctxt['nova_url'] = rdata.get('nova_url') ctxt['restart_trigger'] = rdata.get('restart_trigger') # If there are multiple nova-cloud-controllers joined to this # service in a cell deployment then ignore the non-api cell # ones if cell_type and not cell_type == "api": continue if ctxt['nova_url']: return ctxt return ctxt
def __call__(self): from neutron_api_utils import api_port ctxt = super(NeutronCCContext, self).__call__() if config('neutron-plugin') == 'nsx': ctxt['nsx_username'] = config('nsx-username') ctxt['nsx_password'] = config('nsx-password') ctxt['nsx_tz_uuid'] = config('nsx-tz-uuid') ctxt['nsx_l3_uuid'] = config('nsx-l3-uuid') if 'nsx-controllers' in config(): ctxt['nsx_controllers'] = \ ','.join(config('nsx-controllers').split()) ctxt['nsx_controllers_list'] = \ config('nsx-controllers').split() if config('neutron-plugin') == 'plumgrid': ctxt['pg_username'] = config('plumgrid-username') ctxt['pg_password'] = config('plumgrid-password') ctxt['virtual_ip'] = config('plumgrid-virtual-ip') ctxt['l2_population'] = self.neutron_l2_population ctxt['enable_dvr'] = self.neutron_dvr ctxt['l3_ha'] = self.neutron_l3ha if self.neutron_l3ha: ctxt['max_l3_agents_per_router'] = \ config('max-l3-agents-per-router') ctxt['min_l3_agents_per_router'] = \ config('min-l3-agents-per-router') ctxt['overlay_network_type'] = self.neutron_overlay_network_type ctxt['external_network'] = config('neutron-external-network') ctxt['verbose'] = config('verbose') ctxt['debug'] = config('debug') ctxt['neutron_bind_port'] = \ determine_api_port(api_port('neutron-server'), singlenode_mode=True) ctxt['quota_security_group'] = config('quota-security-group') ctxt['quota_security_group_rule'] = \ config('quota-security-group-rule') ctxt['quota_network'] = config('quota-network') ctxt['quota_subnet'] = config('quota-subnet') ctxt['quota_port'] = config('quota-port') ctxt['quota_vip'] = config('quota-vip') ctxt['quota_pool'] = config('quota-pool') ctxt['quota_member'] = config('quota-member') ctxt['quota_health_monitors'] = config('quota-health-monitors') ctxt['quota_router'] = config('quota-router') ctxt['quota_floatingip'] = config('quota-floatingip') n_api_settings = self.get_neutron_api_rel_settings() if n_api_settings: ctxt.update(n_api_settings) flat_providers = config('flat-network-providers') if flat_providers: ctxt['network_providers'] = ','.join(flat_providers.split()) vlan_ranges = config('vlan-ranges') if vlan_ranges: ctxt['vlan_ranges'] = ','.join(vlan_ranges.split()) return ctxt
def __call__(self): bind_port = config('bind-port') workers = config('workers') if workers == '0': import multiprocessing workers = multiprocessing.cpu_count() ctxt = { 'proxy_ip': get_host_ip(unit_get('private-address')), 'bind_port': determine_api_port(bind_port), 'workers': workers, 'operator_roles': config('operator-roles'), 'delay_auth_decision': config('delay-auth-decision') } ctxt['ssl'] = False auth_type = config('auth-type') auth_host = config('keystone-auth-host') admin_user = config('keystone-admin-user') admin_password = config('keystone-admin-user') if (auth_type == 'keystone' and auth_host and admin_user and admin_password): log('Using user-specified Keystone configuration.') ks_auth = { 'auth_type': 'keystone', 'auth_protocol': config('keystone-auth-protocol'), 'keystone_host': auth_host, 'auth_port': config('keystone-auth-port'), 'service_user': admin_user, 'service_password': admin_password, 'service_tenant': config('keystone-admin-tenant-name') } ctxt.update(ks_auth) for relid in relation_ids('identity-service'): log('Using Keystone configuration from identity-service.') for unit in related_units(relid): ks_auth = { 'auth_type': 'keystone', 'auth_protocol': 'http', # TODO: http hardcode 'keystone_host': relation_get('auth_host', unit, relid), 'auth_port': relation_get('auth_port', unit, relid), 'service_user': relation_get('service_username', unit, relid), 'service_password': relation_get('service_password', unit, relid), 'service_tenant': relation_get('service_tenant', unit, relid), 'service_port': relation_get('service_port', unit, relid), 'admin_token': relation_get('admin_token', unit, relid), } if context_complete(ks_auth): ctxt.update(ks_auth) return ctxt
def __call__(self): ''' Extends the main charmhelpers HAProxyContext with a port mapping specific to this charm. Also used to extend nova.conf context with correct api_listening_ports ''' from nova_cc_utils import api_port ctxt = super(HAProxyContext, self).__call__() # determine which port api processes should bind to, depending # on existence of haproxy + apache frontends compute_api = determine_api_port(api_port('nova-api-os-compute'), singlenode_mode=True) ec2_api = determine_api_port(api_port('nova-api-ec2'), singlenode_mode=True) s3_api = determine_api_port(api_port('nova-objectstore'), singlenode_mode=True) # Apache ports a_compute_api = determine_apache_port(api_port('nova-api-os-compute'), singlenode_mode=True) a_ec2_api = determine_apache_port(api_port('nova-api-ec2'), singlenode_mode=True) a_s3_api = determine_apache_port(api_port('nova-objectstore'), singlenode_mode=True) # to be set in nova.conf accordingly. listen_ports = { 'osapi_compute_listen_port': compute_api, 'ec2_listen_port': ec2_api, 's3_listen_port': s3_api, } port_mapping = { 'nova-api-os-compute': [ api_port('nova-api-os-compute'), a_compute_api], 'nova-api-ec2': [ api_port('nova-api-ec2'), a_ec2_api], 'nova-objectstore': [ api_port('nova-objectstore'), a_s3_api], } # for haproxy.conf ctxt['service_ports'] = port_mapping # for nova.conf ctxt['listen_ports'] = listen_ports return ctxt
def __call__(self): ''' Extends the main charmhelpers HAProxyContext with a port mapping specific to this charm. Also used to extend nova.conf context with correct api_listening_ports ''' from nova_cc_utils import api_port ctxt = super(HAProxyContext, self).__call__() # determine which port api processes should bind to, depending # on existence of haproxy + apache frontends compute_api = determine_api_port(api_port('nova-api-os-compute'), singlenode_mode=True) ec2_api = determine_api_port(api_port('nova-api-ec2'), singlenode_mode=True) s3_api = determine_api_port(api_port('nova-objectstore'), singlenode_mode=True) # Apache ports a_compute_api = determine_apache_port(api_port('nova-api-os-compute'), singlenode_mode=True) a_ec2_api = determine_apache_port(api_port('nova-api-ec2'), singlenode_mode=True) a_s3_api = determine_apache_port(api_port('nova-objectstore'), singlenode_mode=True) # to be set in nova.conf accordingly. listen_ports = { 'osapi_compute_listen_port': compute_api, 'ec2_listen_port': ec2_api, 's3_listen_port': s3_api, } port_mapping = { 'nova-api-os-compute': [api_port('nova-api-os-compute'), a_compute_api], 'nova-api-ec2': [api_port('nova-api-ec2'), a_ec2_api], 'nova-objectstore': [api_port('nova-objectstore'), a_s3_api], } # for haproxy.conf ctxt['service_ports'] = port_mapping # for nova.conf ctxt['listen_ports'] = listen_ports return ctxt
def __call__(self): """ Extends the main charmhelpers HAProxyContext with a port mapping specific to this charm. Also used to extend glance-api.conf context with correct bind_port """ haproxy_port = determine_haproxy_port(9292) api_port = determine_api_port(9292) ctxt = {"service_ports": {"glance_api": [haproxy_port, api_port]}, "bind_port": api_port} return ctxt
def __call__(self): from neutron_api_utils import api_port ctxt = super(NeutronCCContext, self).__call__() if config("neutron-plugin") == "nsx": ctxt["nsx_username"] = config("nsx-username") ctxt["nsx_password"] = config("nsx-password") ctxt["nsx_tz_uuid"] = config("nsx-tz-uuid") ctxt["nsx_l3_uuid"] = config("nsx-l3-uuid") if "nsx-controllers" in config(): ctxt["nsx_controllers"] = ",".join(config("nsx-controllers").split()) ctxt["nsx_controllers_list"] = config("nsx-controllers").split() if config("neutron-plugin") == "plumgrid": ctxt["pg_username"] = config("plumgrid-username") ctxt["pg_password"] = config("plumgrid-password") ctxt["virtual_ip"] = config("plumgrid-virtual-ip") ctxt["l2_population"] = self.neutron_l2_population ctxt["enable_dvr"] = self.neutron_dvr ctxt["l3_ha"] = self.neutron_l3ha if self.neutron_l3ha: ctxt["max_l3_agents_per_router"] = config("max-l3-agents-per-router") ctxt["min_l3_agents_per_router"] = config("min-l3-agents-per-router") ctxt["overlay_network_type"] = self.neutron_overlay_network_type ctxt["external_network"] = config("neutron-external-network") ctxt["verbose"] = config("verbose") ctxt["debug"] = config("debug") ctxt["neutron_bind_port"] = determine_api_port(api_port("neutron-server"), singlenode_mode=True) ctxt["quota_security_group"] = config("quota-security-group") ctxt["quota_security_group_rule"] = config("quota-security-group-rule") ctxt["quota_network"] = config("quota-network") ctxt["quota_subnet"] = config("quota-subnet") ctxt["quota_port"] = config("quota-port") ctxt["quota_vip"] = config("quota-vip") ctxt["quota_pool"] = config("quota-pool") ctxt["quota_member"] = config("quota-member") ctxt["quota_health_monitors"] = config("quota-health-monitors") ctxt["quota_router"] = config("quota-router") ctxt["quota_floatingip"] = config("quota-floatingip") n_api_settings = self.get_neutron_api_rel_settings() if n_api_settings: ctxt.update(n_api_settings) flat_providers = config("flat-network-providers") if flat_providers: ctxt["network_providers"] = ",".join(flat_providers.split()) vlan_ranges = config("vlan-ranges") if vlan_ranges: ctxt["vlan_ranges"] = ",".join(vlan_ranges.split()) return ctxt
def __call__(self): ''' Extends the main charmhelpers HAProxyContext with a port mapping specific to this charm. Also used to extend cinder.conf context with correct api_listening_port ''' haproxy_port = determine_haproxy_port(config('bind-port')) api_port = determine_api_port(config('bind-port')) ctxt = { 'service_ports': {'swift_api': [haproxy_port, api_port]}, } return ctxt
def __call__(self): '''Extends the main charmhelpers HAProxyContext with a port mapping specific to this charm. ''' haproxy_port = CEILOMETER_PORT api_port = determine_api_port(CEILOMETER_PORT, singlenode_mode=True) apache_port = determine_apache_port(CEILOMETER_PORT, singlenode_mode=True) ctxt = { 'service_ports': {'ceilometer_api': [haproxy_port, apache_port]}, 'port': api_port } return ctxt
def __call__(self): """Extends the main charmhelpers HAProxyContext with a port mapping specific to this charm. Also used to extend cinder.conf context with correct api_listening_port """ haproxy_port = API_PORTS['heat-api'] api_port = determine_api_port(haproxy_port, singlenode_mode=True) apache_port = determine_apache_port(haproxy_port, singlenode_mode=True) haproxy_cfn_port = API_PORTS['heat-api-cfn'] api_cfn_port = determine_api_port(haproxy_cfn_port, singlenode_mode=True) apache_cfn_port = determine_apache_port(haproxy_cfn_port, singlenode_mode=True) ctxt = { 'service_ports': {'heat_api': [haproxy_port, apache_port], 'heat_cfn_api': [haproxy_cfn_port, apache_cfn_port]}, 'api_listen_port': api_port, 'api_cfn_listen_port': api_cfn_port, } return ctxt
def __call__(self): '''Extends the main charmhelpers HAProxyContext with a port mapping specific to this charm. Also used to extend glance-api.conf context with correct bind_port ''' haproxy_port = 9292 apache_port = determine_apache_port(9292, singlenode_mode=True) api_port = determine_api_port(9292, singlenode_mode=True) ctxt = { 'service_ports': {'glance_api': [haproxy_port, apache_port]}, 'bind_port': api_port, } return ctxt
def __call__(self): if isinstance(self.external_ports, basestring): self.external_ports = [self.external_ports] if (not self.external_ports or not https()): return {} self.configure_ca() self.enable_modules() ctxt = { 'namespace': self.service_namespace, 'endpoints': [], 'ext_ports': [] } for cn in self.canonical_names(): self.configure_cert(cn) addresses = [] vips = [] if config('vip'): vips = config('vip').split() for network_type in ['os-internal-network', 'os-admin-network', 'os-public-network']: address = get_address_in_network(config(network_type), unit_get('private-address')) if len(vips) > 0 and is_clustered(): for vip in vips: if is_address_in_network(config(network_type), vip): addresses.append((address, vip)) break elif is_clustered(): addresses.append((address, config('vip'))) else: addresses.append((address, address)) for address, endpoint in set(addresses): for api_port in self.external_ports: ext_port = determine_apache_port(api_port) int_port = determine_api_port(api_port) portmap = (address, endpoint, int(ext_port), int(int_port)) ctxt['endpoints'].append(portmap) ctxt['ext_ports'].append(int(ext_port)) ctxt['ext_ports'] = list(set(ctxt['ext_ports'])) return ctxt
def __call__(self): '''Extends the main charmhelpers HAProxyContext with a port mapping specific to this charm. Also used to extend cinder.conf context with correct api_listening_port ''' haproxy_port = config('api-listening-port') api_port = determine_api_port(config('api-listening-port'), singlenode_mode=True) apache_port = determine_apache_port(config('api-listening-port'), singlenode_mode=True) ctxt = { 'service_ports': {'cinder_api': [haproxy_port, apache_port]}, 'osapi_volume_listen_port': api_port, } return ctxt
def update_nrpe_config(): # python-dbus is used by check_upstart_job apt_install('python-dbus') hostname = nrpe.get_nagios_hostname() current_unit = nrpe.get_nagios_unit_name() nrpe_setup = nrpe.NRPE(hostname=hostname) nrpe.copy_nrpe_checks() nrpe.add_init_service_checks(nrpe_setup, services(), current_unit) nrpe.add_haproxy_checks(nrpe_setup, current_unit) api_port = determine_api_port(config('bind-port'), singlenode_mode=True) nrpe_setup.add_check(shortname="swift-proxy-healthcheck", description="Check Swift Proxy Healthcheck", check_cmd="/usr/lib/nagios/plugins/check_http \ -I localhost -u /healthcheck -p {} \ -e \"OK\"".format(api_port)) nrpe_setup.write()
def __call__(self): ctxt = super(HAProxyContext, self).__call__() port = utils.listen_port() # Apache ports a_cephradosgw_api = determine_apache_port(port, singlenode_mode=True) port_mapping = {'cephradosgw-server': [port, a_cephradosgw_api]} ctxt['cephradosgw_bind_port'] = determine_api_port( port, singlenode_mode=True, ) # for haproxy.conf ctxt['service_ports'] = port_mapping return ctxt
def __call__(self): if isinstance(self.external_ports, basestring): self.external_ports = [self.external_ports] if not self.external_ports or not https(): return {} self.configure_cert() self.enable_modules() ctxt = {"namespace": self.service_namespace, "private_address": unit_get("private-address"), "endpoints": []} for ext_port in self.external_ports: if peer_units() or is_clustered(): int_port = determine_haproxy_port(ext_port) else: int_port = determine_api_port(ext_port) portmap = (int(ext_port), int(int_port)) ctxt["endpoints"].append(portmap) return ctxt
def __call__(self): ctxt = super(HAProxyContext, self).__call__() port = config('port') # Apache ports a_cephradosgw_api = determine_apache_port(port, singlenode_mode=True) port_mapping = { 'cephradosgw-server': [port, a_cephradosgw_api] } ctxt['cephradosgw_bind_port'] = determine_api_port( port, singlenode_mode=True, ) # for haproxy.conf ctxt['service_ports'] = port_mapping return ctxt
def __call__(self): if isinstance(self.external_ports, basestring): self.external_ports = [self.external_ports] if (not self.external_ports or not https()): return {} self.configure_cert() self.enable_modules() ctxt = { 'namespace': self.service_namespace, 'private_address': unit_get('private-address'), 'endpoints': [] } for api_port in self.external_ports: ext_port = determine_apache_port(api_port) int_port = determine_api_port(api_port) portmap = (int(ext_port), int(int_port)) ctxt['endpoints'].append(portmap) return ctxt
def __call__(self): from cplane_utils import api_port ctxt = super(HAProxyContext, self).__call__() # Apache ports a_msm_api = determine_apache_port(api_port('msm'), singlenode_mode=True) port_mapping = { 'msm': [ api_port('msm'), a_msm_api] } ctxt['msm_bind_port'] = determine_api_port( api_port('msm'), singlenode_mode=True, ) # for haproxy.conf ctxt['service_ports'] = port_mapping return ctxt
def try_initialize_swauth(): if is_leader() and config('auth-type') == 'swauth': if leader_get('swauth-init') is not True: try: admin_key = config('swauth-admin-key') if admin_key == '' or admin_key is None: admin_key = leader_get('swauth-admin-key') if admin_key is None: admin_key = uuid.uuid4() leader_set({'swauth-admin-key': admin_key}) bind_port = config('bind-port') bind_port = determine_api_port(bind_port, singlenode_mode=True) subprocess.check_call([ 'swauth-prep', '-A', 'http://localhost:{}/auth'.format(bind_port), '-K', admin_key]) leader_set({'swauth-init': True}) except subprocess.CalledProcessError: log("had a problem initializing swauth!")
def __call__(self): if isinstance(self.external_ports, basestring): self.external_ports = [self.external_ports] if (not self.external_ports or not https()): return {} self.configure_cert() self.enable_modules() ctxt = { 'namespace': self.service_namespace, 'private_address': unit_get('private-address'), 'endpoints': [] } for ext_port in self.external_ports: if peer_units() or is_clustered(): int_port = determine_haproxy_port(ext_port) else: int_port = determine_api_port(ext_port) portmap = (int(ext_port), int(int_port)) ctxt['endpoints'].append(portmap) return ctxt
def __call__(self): if isinstance(self.external_ports, six.string_types): self.external_ports = [self.external_ports] if not self.external_ports or not https(): return {} self.configure_ca() self.enable_modules() ctxt = { 'namespace': self.service_namespace, 'endpoints': [], 'ext_ports': [] } cns = self.canonical_names() if cns: for cn in cns: self.configure_cert(cn) else: # Expect cert/key provided in config (currently assumed that ca # uses ip for cn) cn = resolve_address(endpoint_type=INTERNAL) self.configure_cert(cn) addresses = self.get_network_addresses() for address, endpoint in sorted(set(addresses)): for api_port in self.external_ports: ext_port = determine_apache_port(api_port, singlenode_mode=True) int_port = determine_api_port(api_port, singlenode_mode=True) portmap = (address, endpoint, int(ext_port), int(int_port)) ctxt['endpoints'].append(portmap) ctxt['ext_ports'].append(int(ext_port)) ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports']))) return ctxt
def __call__(self): ctxt = {} if config('use-embedded-webserver'): log("Skipping ApacheContext since we are using the embedded " "webserver") return {} status_set('maintenance', 'configuring apache') src = 'files/www/*' dst = '/var/www/' log("Installing www scripts", level=DEBUG) try: for x in glob.glob(src): shutil.copy(x, dst) except IOError as e: log("Error copying files from '%s' to '%s': %s" % (src, dst, e), level=WARNING) try: subprocess.check_call(['a2enmod', 'fastcgi']) subprocess.check_call(['a2enmod', 'rewrite']) except subprocess.CalledProcessError as e: log("Error enabling apache modules - %s" % e, level=WARNING) try: if is_apache_24(): subprocess.check_call(['a2dissite', '000-default']) else: subprocess.check_call(['a2dissite', 'default']) except subprocess.CalledProcessError as e: log("Error disabling apache sites - %s" % e, level=WARNING) ctxt['hostname'] = socket.gethostname() ctxt['port'] = determine_api_port(config('port'), singlenode_mode=True) return ctxt
def __call__(self): if isinstance(self.external_ports, six.string_types): self.external_ports = [self.external_ports] if not self.external_ports or not https(): return {} self.configure_ca() self.enable_modules() ctxt = {'namespace': self.service_namespace, 'endpoints': [], 'ext_ports': []} cns = self.canonical_names() if cns: for cn in cns: self.configure_cert(cn) else: # Expect cert/key provided in config (currently assumed that ca # uses ip for cn) cn = resolve_address(endpoint_type=INTERNAL) self.configure_cert(cn) addresses = self.get_network_addresses() for address, endpoint in sorted(set(addresses)): for api_port in self.external_ports: ext_port = determine_apache_port(api_port, singlenode_mode=True) int_port = determine_api_port(api_port, singlenode_mode=True) portmap = (address, endpoint, int(ext_port), int(int_port)) ctxt['endpoints'].append(portmap) ctxt['ext_ports'].append(int(ext_port)) ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports']))) return ctxt
def __call__(self): from neutron_api_utils import api_port ctxt = super(NeutronCCContext, self).__call__() if config('neutron-plugin') == 'nsx': ctxt['nsx_username'] = config('nsx-username') ctxt['nsx_password'] = config('nsx-password') ctxt['nsx_tz_uuid'] = config('nsx-tz-uuid') ctxt['nsx_l3_uuid'] = config('nsx-l3-uuid') if 'nsx-controllers' in config(): ctxt['nsx_controllers'] = \ ','.join(config('nsx-controllers').split()) ctxt['nsx_controllers_list'] = \ config('nsx-controllers').split() if config('neutron-plugin') == 'plumgrid': ctxt['pg_username'] = config('plumgrid-username') ctxt['pg_password'] = config('plumgrid-password') ctxt['virtual_ip'] = config('plumgrid-virtual-ip') elif config('neutron-plugin') == 'midonet': ctxt.update(MidonetContext()()) identity_context = IdentityServiceContext(service='neutron', service_user='******')() if identity_context is not None: ctxt.update(identity_context) ctxt['l2_population'] = self.neutron_l2_population ctxt['enable_dvr'] = self.neutron_dvr ctxt['l3_ha'] = self.neutron_l3ha if self.neutron_l3ha: ctxt['max_l3_agents_per_router'] = \ config('max-l3-agents-per-router') ctxt['min_l3_agents_per_router'] = \ config('min-l3-agents-per-router') ctxt['dhcp_agents_per_network'] = config('dhcp-agents-per-network') ctxt['overlay_network_type'] = self.neutron_overlay_network_type ctxt['external_network'] = config('neutron-external-network') release = os_release('neutron-server') if config('neutron-plugin') in ['vsp']: _config = config() for k, v in _config.iteritems(): if k.startswith('vsd'): ctxt[k.replace('-', '_')] = v for rid in relation_ids('vsd-rest-api'): for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) vsd_ip = rdata.get('vsd-ip-address') if release >= 'kilo': cms_id_value = rdata.get('nuage-cms-id') log('relation data:cms_id required for' ' nuage plugin: {}'.format(cms_id_value)) if cms_id_value is not None: ctxt['vsd_cms_id'] = cms_id_value log('relation data:vsd-ip-address: {}'.format(vsd_ip)) if vsd_ip is not None: ctxt['vsd_server'] = '{}:8443'.format(vsd_ip) if 'vsd_server' not in ctxt: ctxt['vsd_server'] = '1.1.1.1:8443' ctxt['verbose'] = config('verbose') ctxt['debug'] = config('debug') ctxt['neutron_bind_port'] = \ determine_api_port(api_port('neutron-server'), singlenode_mode=True) ctxt['quota_security_group'] = config('quota-security-group') ctxt['quota_security_group_rule'] = \ config('quota-security-group-rule') ctxt['quota_network'] = config('quota-network') ctxt['quota_subnet'] = config('quota-subnet') ctxt['quota_port'] = config('quota-port') ctxt['quota_vip'] = config('quota-vip') ctxt['quota_pool'] = config('quota-pool') ctxt['quota_member'] = config('quota-member') ctxt['quota_health_monitors'] = config('quota-health-monitors') ctxt['quota_router'] = config('quota-router') ctxt['quota_floatingip'] = config('quota-floatingip') n_api_settings = self.get_neutron_api_rel_settings() if n_api_settings: ctxt.update(n_api_settings) flat_providers = config('flat-network-providers') if flat_providers: ctxt['network_providers'] = ','.join(flat_providers.split()) vlan_ranges = config('vlan-ranges') if vlan_ranges: ctxt['vlan_ranges'] = ','.join(vlan_ranges.split()) vni_ranges = config('vni-ranges') if vni_ranges: ctxt['vni_ranges'] = ','.join(vni_ranges.split()) ctxt['enable_ml2_port_security'] = config('enable-ml2-port-security') ctxt['enable_sriov'] = config('enable-sriov') if release == 'kilo' or release >= 'mitaka': ctxt['enable_hyperv'] = True else: ctxt['enable_hyperv'] = False return ctxt
def __call__(self): bind_port = config('bind-port') workers = config('workers') if workers == 0: import multiprocessing workers = multiprocessing.cpu_count() if config('prefer-ipv6'): proxy_ip = ('[{}]'.format( get_ipv6_addr(exc_list=[config('vip')])[0])) memcached_ip = 'ip6-localhost' else: proxy_ip = get_host_ip(unit_get('private-address')) memcached_ip = get_host_ip(unit_get('private-address')) ctxt = { 'proxy_ip': proxy_ip, 'memcached_ip': memcached_ip, 'bind_port': determine_api_port(bind_port, singlenode_mode=True), 'workers': workers, 'operator_roles': config('operator-roles'), 'delay_auth_decision': config('delay-auth-decision'), 'node_timeout': config('node-timeout'), 'recoverable_node_timeout': config('recoverable-node-timeout'), 'log_headers': config('log-headers'), 'statsd_host': config('statsd-host'), 'statsd_port': config('statsd-port'), 'statsd_sample_rate': config('statsd-sample-rate'), 'static_large_object_segments': config('static-large-object-segments'), 'enable_multi_region': config('enable-multi-region'), 'read_affinity': get_read_affinity(), 'write_affinity': get_write_affinity(), 'write_affinity_node_count': get_write_affinity_node_count() } cmp_openstack = CompareOpenStackReleases(os_release('swift')) if cmp_openstack < 'train': # swauth is no longer supported for OpenStack Train and later admin_key = leader_get('swauth-admin-key') if admin_key is not None: ctxt['swauth_admin_key'] = admin_key if config('debug'): ctxt['log_level'] = 'DEBUG' else: ctxt['log_level'] = 'INFO' # Instead of duplicating code lets use charm-helpers to set signing_dir # TODO(hopem): refactor this context handler to use charm-helpers # code. _ctxt = IdentityServiceContext(service='swift', service_user='******')() signing_dir = _ctxt.get('signing_dir') if signing_dir: ctxt['signing_dir'] = signing_dir ctxt['ssl'] = False auth_type = config('auth-type') ctxt['auth_type'] = auth_type auth_host = config('keystone-auth-host') admin_user = config('keystone-admin-user') admin_password = config('keystone-admin-user') if (auth_type == 'keystone' and auth_host and admin_user and admin_password): log('Using user-specified Keystone configuration.') ks_auth = { 'auth_type': 'keystone', 'auth_protocol': config('keystone-auth-protocol'), 'keystone_host': auth_host, 'auth_port': config('keystone-auth-port'), 'service_user': admin_user, 'service_password': admin_password, 'service_tenant': config('keystone-admin-tenant-name'), } ctxt.update(ks_auth) # Sometime during the 20.08 development cycle, keystone changed from # every unit setting relation data to just the leader. This means that # the charm needs to data from the first relation that actually has # data, or almalgamate the data from all the relations. For this charm, # it merges from the relation ids available like the charms.reactive # system does. _keys = (('auth_protocol', 'auth_protocol', 'http'), ('service_protocol', 'service_protocol', 'http'), ('keystone_host', 'auth_host', None), ('service_host', 'service_host', None), ('auth_port', 'auth_port', None), ('service_user', 'service_username', None), ('service_password', 'service_password', None), ('service_tenant', 'service_tenant', None), ('service_port', 'service_port', None), ('api_version', 'api_version', '2')) _keysv3 = (('admin_domain_id', 'admin_domain_id'), ('service_tenant_id', 'service_tenant_id'), ('admin_domain_name', 'service_domain'), ('admin_tenant_name', 'service_tenant')) kvs = {} relids = relation_ids('identity-service') # if we have relids at all, then set the auth_type to keystone if relids: kvs['auth_type'] = 'keystone' # merge the data from the related units for (key, source, default) in _keys: for relid in relids: for unit in related_units(relid): value = relation_get(source, unit, relid) if value is not None: kvs[key] = value else: kvs[key] = kvs.get(key, default) # if the api is version 3, also merge the additional keys if kvs.get('api_version', None) == '3': for (key, source) in _keysv3: for relid in relids: for unit in related_units(relid): value = relation_get(source, unit, relid) if value is not None: kvs[key] = value # merge in the creds from the relation; which override the config ctxt.update(kvs) if config('prefer-ipv6'): for key in ['keystone_host', 'service_host']: host = ctxt.get(key) if host: ctxt[key] = format_ipv6_addr(host) return ctxt
def __call__(self): bind_port = config('bind-port') workers = config('workers') if workers == 0: import multiprocessing workers = multiprocessing.cpu_count() if config('prefer-ipv6'): proxy_ip = '[%s]' % get_ipv6_addr(exc_list=[config('vip')])[0] memcached_ip = 'ip6-localhost' else: proxy_ip = get_host_ip(unit_get('private-address')) memcached_ip = get_host_ip(unit_get('private-address')) ctxt = { 'proxy_ip': proxy_ip, 'memcached_ip': memcached_ip, 'bind_port': determine_api_port(bind_port, singlenode_mode=True), 'workers': workers, 'operator_roles': config('operator-roles'), 'delay_auth_decision': config('delay-auth-decision'), 'node_timeout': config('node-timeout'), 'recoverable_node_timeout': config('recoverable-node-timeout'), 'log_headers': config('log-headers') } if config('debug'): ctxt['log_level'] = 'DEBUG' else: ctxt['log_level'] = 'INFO' # Instead of duplicating code lets use charm-helpers to set signing_dir # TODO(hopem): refactor this context handler to use charm-helpers # code. _ctxt = IdentityServiceContext(service='swift', service_user='******')() signing_dir = _ctxt.get('signing_dir') if signing_dir: ctxt['signing_dir'] = signing_dir ctxt['ssl'] = False auth_type = config('auth-type') auth_host = config('keystone-auth-host') admin_user = config('keystone-admin-user') admin_password = config('keystone-admin-user') if (auth_type == 'keystone' and auth_host and admin_user and admin_password): log('Using user-specified Keystone configuration.') ks_auth = { 'auth_type': 'keystone', 'auth_protocol': config('keystone-auth-protocol'), 'keystone_host': auth_host, 'auth_port': config('keystone-auth-port'), 'service_user': admin_user, 'service_password': admin_password, 'service_tenant': config('keystone-admin-tenant-name') } ctxt.update(ks_auth) for relid in relation_ids('identity-service'): log('Using Keystone configuration from identity-service.') for unit in related_units(relid): ks_auth = { 'auth_type': 'keystone', 'auth_protocol': relation_get('auth_protocol', unit, relid) or 'http', 'service_protocol': relation_get('service_protocol', unit, relid) or 'http', 'keystone_host': relation_get('auth_host', unit, relid), 'service_host': relation_get('service_host', unit, relid), 'auth_port': relation_get('auth_port', unit, relid), 'service_user': relation_get('service_username', unit, relid), 'service_password': relation_get('service_password', unit, relid), 'service_tenant': relation_get('service_tenant', unit, relid), 'service_port': relation_get('service_port', unit, relid), 'admin_token': relation_get('admin_token', unit, relid), } if context_complete(ks_auth): ctxt.update(ks_auth) if config('prefer-ipv6'): for key in ['keystone_host', 'service_host']: host = ctxt.get(key) if host: ctxt[key] = format_ipv6_addr(host) return ctxt
def test_determine_api_port_nopeers_singlemode(self, peer_units, https): '''It determines API port with a single unit in singlemode''' peer_units.return_value = [] https.return_value = False port = cluster_utils.determine_api_port(9696, singlenode_mode=True) self.assertEquals(9686, port)
def test_determine_api_port_with_peers(self, peer_units, https): '''It determines API port in presence of peers''' peer_units.return_value = ['peer1'] https.return_value = False self.assertEquals(9686, cluster_utils.determine_api_port(9696))
def __call__(self): from keystone_utils import ( api_port, set_admin_token, endpoint_url, resolve_address, PUBLIC, ADMIN, PKI_CERTS_DIR, SSH_USER, ensure_permissions, ) ctxt = {} ctxt['token'] = set_admin_token(config('admin-token')) ctxt['admin_port'] = determine_api_port(api_port('keystone-admin'), singlenode_mode=True) ctxt['public_port'] = determine_api_port(api_port('keystone-public'), singlenode_mode=True) debug = config('debug') ctxt['debug'] = debug and bool_from_string(debug) verbose = config('verbose') ctxt['verbose'] = verbose and bool_from_string(verbose) ctxt['token_expiration'] = config('token-expiration') ctxt['identity_backend'] = config('identity-backend') ctxt['assignment_backend'] = config('assignment-backend') if config('identity-backend') == 'ldap': ctxt['ldap_server'] = config('ldap-server') ctxt['ldap_user'] = config('ldap-user') ctxt['ldap_password'] = config('ldap-password') ctxt['ldap_suffix'] = config('ldap-suffix') ctxt['ldap_readonly'] = config('ldap-readonly') ldap_flags = config('ldap-config-flags') if ldap_flags: flags = context.config_flags_parser(ldap_flags) ctxt['ldap_config_flags'] = flags enable_pki = config('enable-pki') if enable_pki and bool_from_string(enable_pki): ctxt['signing'] = True ctxt['token_provider'] = 'pki' if 'token_provider' in ctxt: log("Configuring PKI token cert paths", level=DEBUG) certs = os.path.join(PKI_CERTS_DIR, 'certs') privates = os.path.join(PKI_CERTS_DIR, 'privates') for path in [PKI_CERTS_DIR, certs, privates]: perms = 0o755 if not os.path.isdir(path): mkdir(path=path, owner=SSH_USER, group='keystone', perms=perms) else: # Ensure accessible by ssh user and group (for sync). ensure_permissions(path, user=SSH_USER, group='keystone', perms=perms) signing_paths = {'certfile': os.path.join(certs, 'signing_cert.pem'), 'keyfile': os.path.join(privates, 'signing_key.pem'), 'ca_certs': os.path.join(certs, 'ca.pem'), 'ca_key': os.path.join(certs, 'ca_key.pem')} for key, val in signing_paths.iteritems(): ctxt[key] = val # Base endpoint URL's which are used in keystone responses # to unauthenticated requests to redirect clients to the # correct auth URL. ctxt['public_endpoint'] = endpoint_url( resolve_address(PUBLIC), api_port('keystone-public')).rstrip('v2.0') ctxt['admin_endpoint'] = endpoint_url( resolve_address(ADMIN), api_port('keystone-admin')).rstrip('v2.0') return ctxt
def __call__(self): ''' Extends the main charmhelpers HAProxyContext with a port mapping specific to this charm. Also used to extend nova.conf context with correct api_listening_ports ''' ctxt = super(HAProxyContext, self).__call__() os_rel = ch_utils.os_release('nova-common') cmp_os_rel = ch_utils.CompareOpenStackReleases(os_rel) # determine which port api processes should bind to, depending # on existence of haproxy + apache frontends compute_api = ch_cluster.determine_api_port( common.api_port('nova-api-os-compute'), singlenode_mode=True) ec2_api = ch_cluster.determine_api_port( common.api_port('nova-api-ec2'), singlenode_mode=True) s3_api = ch_cluster.determine_api_port( common.api_port('nova-objectstore'), singlenode_mode=True) placement_api = ch_cluster.determine_api_port( common.api_port('nova-placement-api'), singlenode_mode=True) metadata_api = ch_cluster.determine_api_port( common.api_port('nova-api-metadata'), singlenode_mode=True) # Apache ports a_compute_api = ch_cluster.determine_apache_port( common.api_port('nova-api-os-compute'), singlenode_mode=True) a_ec2_api = ch_cluster.determine_apache_port( common.api_port('nova-api-ec2'), singlenode_mode=True) a_s3_api = ch_cluster.determine_apache_port( common.api_port('nova-objectstore'), singlenode_mode=True) a_placement_api = ch_cluster.determine_apache_port( common.api_port('nova-placement-api'), singlenode_mode=True) a_metadata_api = ch_cluster.determine_apache_port( common.api_port('nova-api-metadata'), singlenode_mode=True) # to be set in nova.conf accordingly. listen_ports = { 'osapi_compute_listen_port': compute_api, 'ec2_listen_port': ec2_api, 's3_listen_port': s3_api, 'placement_listen_port': placement_api, 'metadata_listen_port': metadata_api, } port_mapping = { 'nova-api-os-compute': [ common.api_port('nova-api-os-compute'), a_compute_api], 'nova-api-ec2': [ common.api_port('nova-api-ec2'), a_ec2_api], 'nova-objectstore': [ common.api_port('nova-objectstore'), a_s3_api], 'nova-placement-api': [ common.api_port('nova-placement-api'), a_placement_api], 'nova-api-metadata': [ common.api_port('nova-api-metadata'), a_metadata_api], } if cmp_os_rel >= 'kilo': del listen_ports['ec2_listen_port'] del listen_ports['s3_listen_port'] del port_mapping['nova-api-ec2'] del port_mapping['nova-objectstore'] if cmp_os_rel < 'ocata': del listen_ports['placement_listen_port'] del port_mapping['nova-placement-api'] # for haproxy.conf ctxt['service_ports'] = port_mapping # for nova.conf ctxt['listen_ports'] = listen_ports return ctxt
def __call__(self): from keystone_utils import ( api_port, set_admin_token, endpoint_url, resolve_address, PUBLIC, ADMIN, ADMIN_DOMAIN, snap_install_requested, get_api_version, ) ctxt = {} ctxt['token'] = set_admin_token(config('admin-token')) ctxt['api_version'] = get_api_version() ctxt['admin_role'] = config('admin-role') if ctxt['api_version'] > 2: ctxt['service_tenant_id'] = \ leader_get(attribute='service_tenant_id') ctxt['admin_domain_name'] = ADMIN_DOMAIN ctxt['admin_domain_id'] = \ leader_get(attribute='admin_domain_id') ctxt['default_domain_id'] = \ leader_get(attribute='default_domain_id') ctxt['admin_port'] = determine_api_port(api_port('keystone-admin'), singlenode_mode=True) ctxt['public_port'] = determine_api_port(api_port('keystone-public'), singlenode_mode=True) ctxt['debug'] = config('debug') ctxt['verbose'] = config('verbose') ctxt['token_expiration'] = config('token-expiration') ctxt['identity_backend'] = config('identity-backend') ctxt['assignment_backend'] = config('assignment-backend') ctxt['token_provider'] = config('token-provider') ctxt['fernet_max_active_keys'] = config('fernet-max-active-keys') if config('identity-backend') == 'ldap': ctxt['ldap_server'] = config('ldap-server') ctxt['ldap_user'] = config('ldap-user') ctxt['ldap_password'] = config('ldap-password') ctxt['ldap_suffix'] = config('ldap-suffix') ctxt['ldap_readonly'] = config('ldap-readonly') ldap_flags = config('ldap-config-flags') if ldap_flags: flags = context.config_flags_parser(ldap_flags) ctxt['ldap_config_flags'] = flags # Base endpoint URL's which are used in keystone responses # to unauthenticated requests to redirect clients to the # correct auth URL. ctxt['public_endpoint'] = endpoint_url( resolve_address(PUBLIC), api_port('keystone-public')).replace('v2.0', '') ctxt['admin_endpoint'] = endpoint_url( resolve_address(ADMIN), api_port('keystone-admin')).replace('v2.0', '') if snap_install_requested(): ctxt['domain_config_dir'] = ( '/var/snap/keystone/common/etc/keystone/domains') ctxt['log_config'] = ( '/var/snap/keystone/common/etc/keystone/logging.conf') ctxt['paste_config_file'] = ( '/var/snap/keystone/common/etc/keystone/keystone-paste.ini') else: ctxt['domain_config_dir'] = '/etc/keystone/domains' ctxt['log_config'] = ('/etc/keystone/logging.conf') ctxt['paste_config_file'] = '/etc/keystone/keystone-paste.ini' return ctxt
def __call__(self): ''' Extends the main charmhelpers HAProxyContext with a port mapping specific to this charm. Also used to extend nova.conf context with correct api_listening_ports ''' from nova_cc_utils import api_port ctxt = super(HAProxyContext, self).__call__() # determine which port api processes should bind to, depending # on existence of haproxy + apache frontends compute_api = determine_api_port(api_port('nova-api-os-compute')) ec2_api = determine_api_port(api_port('nova-api-ec2')) s3_api = determine_api_port(api_port('nova-objectstore')) nvol_api = determine_api_port(api_port('nova-api-os-volume')) neutron_api = determine_api_port(api_port('neutron-server')) # to be set in nova.conf accordingly. listen_ports = { 'osapi_compute_listen_port': compute_api, 'ec2_listen_port': ec2_api, 's3_listen_port': s3_api, } port_mapping = { 'nova-api-os-compute': [ determine_haproxy_port(api_port('nova-api-os-compute')), compute_api, ], 'nova-api-ec2': [ determine_haproxy_port(api_port('nova-api-ec2')), ec2_api, ], 'nova-objectstore': [ determine_haproxy_port(api_port('nova-objectstore')), s3_api, ], } if relation_ids('nova-volume-service'): port_mapping.update({ 'nova-api-ec2': [ determine_haproxy_port(api_port('nova-api-ec2')), nvol_api], }) listen_ports['osapi_volume_listen_port'] = nvol_api if neutron.network_manager() in ['neutron', 'quantum']: port_mapping.update({ 'neutron-server': [ determine_haproxy_port(api_port('neutron-server')), neutron_api] }) # quantum/neutron.conf listening port, set separte from nova's. ctxt['neutron_bind_port'] = neutron_api # for haproxy.conf ctxt['service_ports'] = port_mapping # for nova.conf ctxt['listen_ports'] = listen_ports return ctxt
def __call__(self): if not relation_ids(self.interfaces[0]): return {} host = socket.gethostname() systemd_rgw = False mon_hosts = [] auths = [] fsid = None for rid in relation_ids(self.interfaces[0]): for unit in related_units(rid): fsid = relation_get('fsid', rid=rid, unit=unit) _auth = relation_get('auth', rid=rid, unit=unit) if _auth: auths.append(_auth) ceph_pub_addr = relation_get('ceph-public-address', rid=rid, unit=unit) unit_priv_addr = relation_get('private-address', rid=rid, unit=unit) ceph_addr = ceph_pub_addr or unit_priv_addr ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr if ceph_addr: mon_hosts.append(ceph_addr) if relation_get('rgw.{}_key'.format(host), rid=rid, unit=unit): systemd_rgw = True if len(set(auths)) != 1: e = ("Inconsistent or absent auth returned by mon units. Setting " "auth_supported to 'none'") log(e, level=WARNING) auth = 'none' else: auth = auths[0] # /etc/init.d/radosgw mandates that a dns name is used for this # parameter so ensure that address is resolvable if config('prefer-ipv6'): ensure_host_resolvable_v6(host) port = determine_api_port(utils.listen_port(), singlenode_mode=True) if config('prefer-ipv6'): port = "[::]:%s" % (port) mon_hosts.sort() ctxt = { 'auth_supported': auth, 'mon_hosts': ' '.join(mon_hosts), 'hostname': host, 'old_auth': cmp_pkgrevno('radosgw', "0.51") < 0, 'systemd_rgw': systemd_rgw, 'use_syslog': str(config('use-syslog')).lower(), 'loglevel': config('loglevel'), 'port': port, 'ipv6': config('prefer-ipv6'), # The public unit IP is only used in case the authentication is # *Not* keystone - in which case it is used to make sure the # storage endpoint returned by the built-in auth is the HAproxy # (since it defaults to the port the service runs on, and that is # not available externally). ~tribaal 'unit_public_ip': unit_public_ip(), 'fsid': fsid, } # NOTE(dosaboy): these sections must correspond to what is supported in # the config template. sections = ['global', 'client.radosgw.gateway'] user_provided = CephConfContext(permitted_sections=sections)() user_provided = { k.replace('.', '_'): user_provided[k] for k in user_provided } ctxt.update(user_provided) if self.context_complete(ctxt): # Multi-site Zone configuration is optional, # so add after assessment ctxt['rgw_zone'] = config('zone') return ctxt return {}
def __call__(self): ''' Extends the main charmhelpers HAProxyContext with a port mapping specific to this charm. Also used to extend nova.conf context with correct api_listening_ports ''' ctxt = super(HAProxyContext, self).__call__() os_rel = ch_utils.os_release('nova-common') cmp_os_rel = ch_utils.CompareOpenStackReleases(os_rel) # determine which port api processes should bind to, depending # on existence of haproxy + apache frontends compute_api = ch_cluster.determine_api_port( common.api_port('nova-api-os-compute'), singlenode_mode=True) ec2_api = ch_cluster.determine_api_port( common.api_port('nova-api-ec2'), singlenode_mode=True) s3_api = ch_cluster.determine_api_port( common.api_port('nova-objectstore'), singlenode_mode=True) placement_api = ch_cluster.determine_api_port( common.api_port('nova-placement-api'), singlenode_mode=True) metadata_api = ch_cluster.determine_api_port( common.api_port('nova-api-metadata'), singlenode_mode=True) # Apache ports a_compute_api = ch_cluster.determine_apache_port( common.api_port('nova-api-os-compute'), singlenode_mode=True) a_ec2_api = ch_cluster.determine_apache_port( common.api_port('nova-api-ec2'), singlenode_mode=True) a_s3_api = ch_cluster.determine_apache_port( common.api_port('nova-objectstore'), singlenode_mode=True) a_placement_api = ch_cluster.determine_apache_port( common.api_port('nova-placement-api'), singlenode_mode=True) a_metadata_api = ch_cluster.determine_apache_port( common.api_port('nova-api-metadata'), singlenode_mode=True) # to be set in nova.conf accordingly. listen_ports = { 'osapi_compute_listen_port': compute_api, 'ec2_listen_port': ec2_api, 's3_listen_port': s3_api, 'placement_listen_port': placement_api, 'metadata_listen_port': metadata_api, } port_mapping = { 'nova-api-os-compute': [common.api_port('nova-api-os-compute'), a_compute_api], 'nova-api-ec2': [common.api_port('nova-api-ec2'), a_ec2_api], 'nova-objectstore': [common.api_port('nova-objectstore'), a_s3_api], 'nova-placement-api': [common.api_port('nova-placement-api'), a_placement_api], 'nova-api-metadata': [common.api_port('nova-api-metadata'), a_metadata_api], } if cmp_os_rel >= 'kilo': del listen_ports['ec2_listen_port'] del listen_ports['s3_listen_port'] del port_mapping['nova-api-ec2'] del port_mapping['nova-objectstore'] rids = hookenv.relation_ids('placement') if (rids or cmp_os_rel < 'ocata' or cmp_os_rel > 'stein'): del listen_ports['placement_listen_port'] del port_mapping['nova-placement-api'] # for haproxy.conf ctxt['service_ports'] = port_mapping # for nova.conf ctxt['listen_ports'] = listen_ports return ctxt
def __call__(self): from neutron_api_utils import api_port ctxt = super(NeutronCCContext, self).__call__() if config('neutron-plugin') == 'nsx': ctxt['nsx_username'] = config('nsx-username') ctxt['nsx_password'] = config('nsx-password') ctxt['nsx_tz_uuid'] = config('nsx-tz-uuid') ctxt['nsx_l3_uuid'] = config('nsx-l3-uuid') if 'nsx-controllers' in config(): ctxt['nsx_controllers'] = \ ','.join(config('nsx-controllers').split()) ctxt['nsx_controllers_list'] = \ config('nsx-controllers').split() if config('neutron-plugin') == 'plumgrid': ctxt['pg_username'] = config('plumgrid-username') ctxt['pg_password'] = config('plumgrid-password') ctxt['virtual_ip'] = config('plumgrid-virtual-ip') elif config('neutron-plugin') == 'midonet': ctxt.update(MidonetContext()()) identity_context = IdentityServiceContext(service='neutron', service_user='******')() if identity_context is not None: ctxt.update(identity_context) ctxt['l2_population'] = self.neutron_l2_population ctxt['enable_dvr'] = self.neutron_dvr ctxt['l3_ha'] = self.neutron_l3ha if self.neutron_l3ha: max_agents = config('max-l3-agents-per-router') min_agents = config('min-l3-agents-per-router') if max_agents < min_agents: raise ValueError("max-l3-agents-per-router ({}) must be >= " "min-l3-agents-per-router " "({})".format(max_agents, min_agents)) ctxt['max_l3_agents_per_router'] = max_agents ctxt['min_l3_agents_per_router'] = min_agents ctxt['allow_automatic_l3agent_failover'] = \ config('allow-automatic-l3agent-failover') ctxt['allow_automatic_dhcp_failover'] = \ config('allow-automatic-dhcp-failover') ctxt['dhcp_agents_per_network'] = config('dhcp-agents-per-network') ctxt['tenant_network_types'] = self.neutron_tenant_network_types ctxt['overlay_network_type'] = self.neutron_overlay_network_type ctxt['external_network'] = config('neutron-external-network') release = os_release('neutron-server') cmp_release = CompareOpenStackReleases(release) if config('neutron-plugin') in ['vsp']: _config = config() for k, v in _config.items(): if k.startswith('vsd'): ctxt[k.replace('-', '_')] = v for rid in relation_ids('vsd-rest-api'): for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) vsd_ip = rdata.get('vsd-ip-address') if cmp_release >= 'kilo': cms_id_value = rdata.get('nuage-cms-id') log('relation data:cms_id required for' ' nuage plugin: {}'.format(cms_id_value)) if cms_id_value is not None: ctxt['vsd_cms_id'] = cms_id_value log('relation data:vsd-ip-address: {}'.format(vsd_ip)) if vsd_ip is not None: ctxt['vsd_server'] = '{}:8443'.format(vsd_ip) if 'vsd_server' not in ctxt: ctxt['vsd_server'] = '1.1.1.1:8443' ctxt['verbose'] = config('verbose') ctxt['debug'] = config('debug') ctxt['neutron_bind_port'] = \ determine_api_port(api_port('neutron-server'), singlenode_mode=True) ctxt['quota_security_group'] = config('quota-security-group') ctxt['quota_security_group_rule'] = \ config('quota-security-group-rule') ctxt['quota_network'] = config('quota-network') ctxt['quota_subnet'] = config('quota-subnet') ctxt['quota_port'] = config('quota-port') ctxt['quota_vip'] = config('quota-vip') ctxt['quota_pool'] = config('quota-pool') ctxt['quota_member'] = config('quota-member') ctxt['quota_health_monitors'] = config('quota-health-monitors') ctxt['quota_router'] = config('quota-router') ctxt['quota_floatingip'] = config('quota-floatingip') n_api_settings = self.get_neutron_api_rel_settings() if n_api_settings: ctxt.update(n_api_settings) flat_providers = config('flat-network-providers') if flat_providers: ctxt['network_providers'] = ','.join(flat_providers.split()) vlan_ranges = config('vlan-ranges') if vlan_ranges: ctxt['vlan_ranges'] = ','.join(vlan_ranges.split()) vni_ranges = config('vni-ranges') if vni_ranges: ctxt['vni_ranges'] = ','.join(vni_ranges.split()) enable_dns_extension_driver = False dns_domain = get_dns_domain() if dns_domain: enable_dns_extension_driver = True ctxt['dns_domain'] = dns_domain if cmp_release >= 'mitaka': for rid in relation_ids('external-dns'): if related_units(rid): enable_dns_extension_driver = True # AZAwareWeightScheduler inherits from WeightScheduler and is # available as of mitaka ctxt['network_scheduler_driver'] = ( 'neutron.scheduler.dhcp_agent_scheduler.AZAwareWeightScheduler' ) ctxt['dhcp_load_type'] = config('dhcp-load-type') extension_drivers = [] if config('enable-ml2-port-security'): extension_drivers.append(EXTENSION_DRIVER_PORT_SECURITY) if enable_dns_extension_driver: if cmp_release < 'queens': extension_drivers.append(EXTENSION_DRIVER_DNS) else: extension_drivers.append(EXTENSION_DRIVER_DNS_DOMAIN_PORTS) if is_qos_requested_and_valid(): extension_drivers.append(EXTENSION_DRIVER_QOS) if extension_drivers: ctxt['extension_drivers'] = ','.join(extension_drivers) ctxt['enable_sriov'] = config('enable-sriov') if cmp_release >= 'mitaka': if config('global-physnet-mtu'): ctxt['global_physnet_mtu'] = config('global-physnet-mtu') if config('path-mtu'): ctxt['path_mtu'] = config('path-mtu') else: ctxt['path_mtu'] = config('global-physnet-mtu') physical_network_mtus = config('physical-network-mtus') if physical_network_mtus: ctxt['physical_network_mtus'] = ','.join( physical_network_mtus.split()) if 'kilo' <= cmp_release <= 'mitaka': pci_vendor_devs = config('supported-pci-vendor-devs') if pci_vendor_devs: ctxt['supported_pci_vendor_devs'] = \ ','.join(pci_vendor_devs.split()) ctxt['mechanism_drivers'] = get_ml2_mechanism_drivers() n_load_balancer_settings = NeutronLoadBalancerContext()() if n_load_balancer_settings: ctxt.update(n_load_balancer_settings) if config('neutron-plugin') in ['ovs', 'ml2', 'Calico']: ctxt['service_plugins'] = [] service_plugins = { 'icehouse': [ ('neutron.services.l3_router.l3_router_plugin.' 'L3RouterPlugin'), 'neutron.services.firewall.fwaas_plugin.FirewallPlugin', 'neutron.services.loadbalancer.plugin.LoadBalancerPlugin', 'neutron.services.vpn.plugin.VPNDriverPlugin', ('neutron.services.metering.metering_plugin.' 'MeteringPlugin')], 'juno': [ ('neutron.services.l3_router.l3_router_plugin.' 'L3RouterPlugin'), 'neutron.services.firewall.fwaas_plugin.FirewallPlugin', 'neutron.services.loadbalancer.plugin.LoadBalancerPlugin', 'neutron.services.vpn.plugin.VPNDriverPlugin', ('neutron.services.metering.metering_plugin.' 'MeteringPlugin')], 'kilo': ['router', 'firewall', 'lbaas', 'vpnaas', 'metering'], 'liberty': ['router', 'firewall', 'lbaas', 'vpnaas', 'metering'], 'mitaka': ['router', 'firewall', 'lbaas', 'vpnaas', 'metering'], 'newton': ['router', 'firewall', 'vpnaas', 'metering', ('neutron_lbaas.services.loadbalancer.plugin.' 'LoadBalancerPluginv2')], 'ocata': ['router', 'firewall', 'vpnaas', 'metering', ('neutron_lbaas.services.loadbalancer.plugin.' 'LoadBalancerPluginv2'), 'segments', ('neutron_dynamic_routing.' 'services.bgp.bgp_plugin.BgpPlugin')], 'pike': ['router', 'firewall', 'metering', 'segments', ('neutron_lbaas.services.loadbalancer.plugin.' 'LoadBalancerPluginv2'), ('neutron_dynamic_routing.' 'services.bgp.bgp_plugin.BgpPlugin')], 'queens': ['router', 'firewall', 'metering', 'segments', ('neutron_lbaas.services.loadbalancer.plugin.' 'LoadBalancerPluginv2'), ('neutron_dynamic_routing.' 'services.bgp.bgp_plugin.BgpPlugin')], 'rocky': ['router', 'firewall', 'metering', 'segments', ('neutron_dynamic_routing.' 'services.bgp.bgp_plugin.BgpPlugin')], 'stein': ['router', 'firewall_v2', 'metering', 'segments', ('neutron_dynamic_routing.' 'services.bgp.bgp_plugin.BgpPlugin')], } if cmp_release >= 'rocky': if ctxt.get('load_balancer_name', None): # TODO(fnordahl): Remove when ``neutron_lbaas`` is retired service_plugins[release].append('lbaasv2-proxy') else: # TODO(fnordahl): Remove fall-back in next charm release service_plugins[release].append('lbaasv2') if cmp_release >= 'stein': ctxt['firewall_v2'] = True ctxt['service_plugins'] = service_plugins.get( release, service_plugins['stein']) if is_nsg_logging_enabled(): ctxt['service_plugins'].append('log') if is_qos_requested_and_valid(): ctxt['service_plugins'].append('qos') if is_vlan_trunking_requested_and_valid(): ctxt['service_plugins'].append('trunk') ctxt['service_plugins'] = ','.join(ctxt['service_plugins']) return ctxt
def __call__(self): from keystone_utils import ( api_port, endpoint_url, resolve_address, PUBLIC, ADMIN, ADMIN_DOMAIN, snap_install_requested, get_api_version, ) ctxt = {} ctxt['api_version'] = get_api_version() ctxt['admin_role'] = config('admin-role') if ctxt['api_version'] > 2: ctxt['service_tenant_id'] = \ leader_get(attribute='service_tenant_id') ctxt['admin_domain_name'] = ADMIN_DOMAIN ctxt['admin_domain_id'] = \ leader_get(attribute='admin_domain_id') ctxt['default_domain_id'] = \ leader_get(attribute='default_domain_id') # This is required prior to system-scope being implemented (Queens) ctxt['transitional_charm_user_id'] = leader_get( attribute='transitional_charm_user_id') ctxt['admin_port'] = determine_api_port(api_port('keystone-admin'), singlenode_mode=True) ctxt['public_port'] = determine_api_port(api_port('keystone-public'), singlenode_mode=True) ctxt['debug'] = config('debug') ctxt['verbose'] = config('verbose') ctxt['token_expiration'] = config('token-expiration') ctxt['identity_backend'] = config('identity-backend') ctxt['assignment_backend'] = config('assignment-backend') ctxt['token_provider'] = config('token-provider') ctxt['fernet_max_active_keys'] = config('fernet-max-active-keys') if config('identity-backend') == 'ldap': ctxt['ldap_server'] = config('ldap-server') ctxt['ldap_user'] = config('ldap-user') ctxt['ldap_password'] = config('ldap-password') ctxt['ldap_suffix'] = config('ldap-suffix') ctxt['ldap_readonly'] = config('ldap-readonly') ldap_flags = config('ldap-config-flags') if ldap_flags: flags = context.config_flags_parser(ldap_flags) ctxt['ldap_config_flags'] = flags # Only try to decode it if there is something actually set - this # siliences a NoneType warning in the logs if it isn't set password_security_compliance = config('password-security-compliance') if password_security_compliance: ctxt['password_security_compliance'] = ( self._decode_password_security_compliance_string( password_security_compliance)) # Base endpoint URL's which are used in keystone responses # to unauthenticated requests to redirect clients to the # correct auth URL. ctxt['public_endpoint'] = endpoint_url( resolve_address(PUBLIC), api_port('keystone-public')).replace('v2.0', '') ctxt['admin_endpoint'] = endpoint_url( resolve_address(ADMIN), api_port('keystone-admin')).replace('v2.0', '') if snap_install_requested(): ctxt['domain_config_dir'] = ( '/var/snap/keystone/common/etc/keystone/domains') ctxt['log_config'] = ( '/var/snap/keystone/common/etc/keystone/logging.conf') ctxt['paste_config_file'] = ( '/var/snap/keystone/common/etc/keystone/keystone-paste.ini') else: ctxt['domain_config_dir'] = '/etc/keystone/domains' ctxt['log_config'] = ('/etc/keystone/logging.conf') ctxt['paste_config_file'] = '/etc/keystone/keystone-paste.ini' return ctxt
def __call__(self): from keystone_utils import ( api_port, set_admin_token, endpoint_url, resolve_address, PUBLIC, ADMIN, PKI_CERTS_DIR, ensure_pki_cert_paths, ADMIN_DOMAIN, snap_install_requested, get_api_version, ) ctxt = {} ctxt['token'] = set_admin_token(config('admin-token')) ctxt['api_version'] = get_api_version() ctxt['admin_role'] = config('admin-role') if ctxt['api_version'] > 2: ctxt['service_tenant_id'] = \ leader_get(attribute='service_tenant_id') ctxt['admin_domain_name'] = ADMIN_DOMAIN ctxt['admin_domain_id'] = \ leader_get(attribute='admin_domain_id') ctxt['default_domain_id'] = \ leader_get(attribute='default_domain_id') ctxt['admin_port'] = determine_api_port(api_port('keystone-admin'), singlenode_mode=True) ctxt['public_port'] = determine_api_port(api_port('keystone-public'), singlenode_mode=True) ctxt['debug'] = config('debug') ctxt['verbose'] = config('verbose') ctxt['token_expiration'] = config('token-expiration') ctxt['identity_backend'] = config('identity-backend') ctxt['assignment_backend'] = config('assignment-backend') if config('identity-backend') == 'ldap': ctxt['ldap_server'] = config('ldap-server') ctxt['ldap_user'] = config('ldap-user') ctxt['ldap_password'] = config('ldap-password') ctxt['ldap_suffix'] = config('ldap-suffix') ctxt['ldap_readonly'] = config('ldap-readonly') ldap_flags = config('ldap-config-flags') if ldap_flags: flags = context.config_flags_parser(ldap_flags) ctxt['ldap_config_flags'] = flags enable_pki = config('enable-pki') if enable_pki and bool_from_string(enable_pki): log("Enabling PKI", level=DEBUG) ctxt['token_provider'] = 'pki' # NOTE(jamespage): Only check PKI configuration if the PKI # token format is in use, which has been # removed as of OpenStack Ocata. ensure_pki_cert_paths() certs = os.path.join(PKI_CERTS_DIR, 'certs') privates = os.path.join(PKI_CERTS_DIR, 'privates') ctxt['enable_signing'] = True ctxt.update({ 'certfile': os.path.join(certs, 'signing_cert.pem'), 'keyfile': os.path.join(privates, 'signing_key.pem'), 'ca_certs': os.path.join(certs, 'ca.pem'), 'ca_key': os.path.join(certs, 'ca_key.pem') }) else: ctxt['enable_signing'] = False # Base endpoint URL's which are used in keystone responses # to unauthenticated requests to redirect clients to the # correct auth URL. ctxt['public_endpoint'] = endpoint_url( resolve_address(PUBLIC), api_port('keystone-public')).replace('v2.0', '') ctxt['admin_endpoint'] = endpoint_url( resolve_address(ADMIN), api_port('keystone-admin')).replace('v2.0', '') if snap_install_requested(): ctxt['domain_config_dir'] = ( '/var/snap/keystone/common/etc/keystone/domains') ctxt['log_config'] = ( '/var/snap/keystone/common/etc/keystone/logging.conf') ctxt['paste_config_file'] = ( '/var/snap/keystone/common/etc/keystone/keystone-paste.ini') else: ctxt['domain_config_dir'] = '/etc/keystone/domains' ctxt['log_config'] = ('/etc/keystone/logging.conf') ctxt['paste_config_file'] = '/etc/keystone/keystone-paste.ini' return ctxt
def __call__(self): from keystone_utils import ( api_port, set_admin_token, endpoint_url, resolve_address, PUBLIC, ADMIN, PKI_CERTS_DIR, SSH_USER, ensure_permissions, ) ctxt = {} ctxt['token'] = set_admin_token(config('admin-token')) ctxt['admin_port'] = determine_api_port(api_port('keystone-admin'), singlenode_mode=True) ctxt['public_port'] = determine_api_port(api_port('keystone-public'), singlenode_mode=True) debug = config('debug') ctxt['debug'] = debug and bool_from_string(debug) verbose = config('verbose') ctxt['verbose'] = verbose and bool_from_string(verbose) ctxt['token_expiration'] = config('token-expiration') ctxt['identity_backend'] = config('identity-backend') ctxt['assignment_backend'] = config('assignment-backend') if config('identity-backend') == 'ldap': ctxt['ldap_server'] = config('ldap-server') ctxt['ldap_user'] = config('ldap-user') ctxt['ldap_password'] = config('ldap-password') ctxt['ldap_suffix'] = config('ldap-suffix') ctxt['ldap_readonly'] = config('ldap-readonly') ldap_flags = config('ldap-config-flags') if ldap_flags: flags = context.config_flags_parser(ldap_flags) ctxt['ldap_config_flags'] = flags enable_pki = config('enable-pki') if enable_pki and bool_from_string(enable_pki): ctxt['signing'] = True ctxt['token_provider'] = 'pki' if 'token_provider' in ctxt: log("Configuring PKI token cert paths", level=DEBUG) certs = os.path.join(PKI_CERTS_DIR, 'certs') privates = os.path.join(PKI_CERTS_DIR, 'privates') for path in [PKI_CERTS_DIR, certs, privates]: perms = 0o755 if not os.path.isdir(path): mkdir(path=path, owner=SSH_USER, group='keystone', perms=perms) else: # Ensure accessible by ssh user and group (for sync). ensure_permissions(path, user=SSH_USER, group='keystone', perms=perms) signing_paths = { 'certfile': os.path.join(certs, 'signing_cert.pem'), 'keyfile': os.path.join(privates, 'signing_key.pem'), 'ca_certs': os.path.join(certs, 'ca.pem'), 'ca_key': os.path.join(certs, 'ca_key.pem') } for key, val in signing_paths.iteritems(): ctxt[key] = val # Base endpoint URL's which are used in keystone responses # to unauthenticated requests to redirect clients to the # correct auth URL. ctxt['public_endpoint'] = endpoint_url( resolve_address(PUBLIC), api_port('keystone-public')).rstrip('v2.0') ctxt['admin_endpoint'] = endpoint_url( resolve_address(ADMIN), api_port('keystone-admin')).rstrip('v2.0') return ctxt
def __call__(self): from neutron_api_utils import api_port ctxt = super(NeutronCCContext, self).__call__() if config('neutron-plugin') == 'nsx': ctxt['nsx_username'] = config('nsx-username') ctxt['nsx_password'] = config('nsx-password') ctxt['nsx_tz_uuid'] = config('nsx-tz-uuid') ctxt['nsx_l3_uuid'] = config('nsx-l3-uuid') if 'nsx-controllers' in config(): ctxt['nsx_controllers'] = \ ','.join(config('nsx-controllers').split()) ctxt['nsx_controllers_list'] = \ config('nsx-controllers').split() if config('neutron-plugin') == 'plumgrid': ctxt['pg_username'] = config('plumgrid-username') ctxt['pg_password'] = config('plumgrid-password') ctxt['virtual_ip'] = config('plumgrid-virtual-ip') elif config('neutron-plugin') == 'midonet': ctxt.update(MidonetContext()()) identity_context = IdentityServiceContext(service='neutron', service_user='******')() if identity_context is not None: ctxt.update(identity_context) ctxt['l2_population'] = self.neutron_l2_population ctxt['enable_dvr'] = self.neutron_dvr ctxt['l3_ha'] = self.neutron_l3ha if self.neutron_l3ha: max_agents = config('max-l3-agents-per-router') min_agents = config('min-l3-agents-per-router') if max_agents < min_agents: raise ValueError("max-l3-agents-per-router ({}) must be >= " "min-l3-agents-per-router " "({})".format(max_agents, min_agents)) ctxt['max_l3_agents_per_router'] = max_agents ctxt['min_l3_agents_per_router'] = min_agents ctxt['dhcp_agents_per_network'] = config('dhcp-agents-per-network') ctxt['tenant_network_types'] = self.neutron_tenant_network_types ctxt['overlay_network_type'] = self.neutron_overlay_network_type ctxt['external_network'] = config('neutron-external-network') release = os_release('neutron-server') cmp_release = CompareOpenStackReleases(release) if config('neutron-plugin') in ['vsp']: _config = config() for k, v in _config.items(): if k.startswith('vsd'): ctxt[k.replace('-', '_')] = v for rid in relation_ids('vsd-rest-api'): for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) vsd_ip = rdata.get('vsd-ip-address') if cmp_release >= 'kilo': cms_id_value = rdata.get('nuage-cms-id') log('relation data:cms_id required for' ' nuage plugin: {}'.format(cms_id_value)) if cms_id_value is not None: ctxt['vsd_cms_id'] = cms_id_value log('relation data:vsd-ip-address: {}'.format(vsd_ip)) if vsd_ip is not None: ctxt['vsd_server'] = '{}:8443'.format(vsd_ip) if 'vsd_server' not in ctxt: ctxt['vsd_server'] = '1.1.1.1:8443' ctxt['verbose'] = config('verbose') ctxt['debug'] = config('debug') ctxt['neutron_bind_port'] = \ determine_api_port(api_port('neutron-server'), singlenode_mode=True) ctxt['quota_security_group'] = config('quota-security-group') ctxt['quota_security_group_rule'] = \ config('quota-security-group-rule') ctxt['quota_network'] = config('quota-network') ctxt['quota_subnet'] = config('quota-subnet') ctxt['quota_port'] = config('quota-port') ctxt['quota_vip'] = config('quota-vip') ctxt['quota_pool'] = config('quota-pool') ctxt['quota_member'] = config('quota-member') ctxt['quota_health_monitors'] = config('quota-health-monitors') ctxt['quota_router'] = config('quota-router') ctxt['quota_floatingip'] = config('quota-floatingip') n_api_settings = self.get_neutron_api_rel_settings() if n_api_settings: ctxt.update(n_api_settings) flat_providers = config('flat-network-providers') if flat_providers: ctxt['network_providers'] = ','.join(flat_providers.split()) vlan_ranges = config('vlan-ranges') if vlan_ranges: ctxt['vlan_ranges'] = ','.join(vlan_ranges.split()) vni_ranges = config('vni-ranges') if vni_ranges: ctxt['vni_ranges'] = ','.join(vni_ranges.split()) enable_dns_extension_driver = False dns_domain = get_dns_domain() if dns_domain: enable_dns_extension_driver = True ctxt['dns_domain'] = dns_domain if cmp_release >= 'mitaka': for rid in relation_ids('external-dns'): if related_units(rid): enable_dns_extension_driver = True extension_drivers = [] if config('enable-ml2-port-security'): extension_drivers.append(EXTENSION_DRIVER_PORT_SECURITY) if enable_dns_extension_driver: extension_drivers.append(EXTENSION_DRIVER_DNS) if is_qos_requested_and_valid(): extension_drivers.append(EXTENSION_DRIVER_QOS) if extension_drivers: ctxt['extension_drivers'] = ','.join(extension_drivers) ctxt['enable_sriov'] = config('enable-sriov') if cmp_release >= 'mitaka': if config('global-physnet-mtu'): ctxt['global_physnet_mtu'] = config('global-physnet-mtu') if config('path-mtu'): ctxt['path_mtu'] = config('path-mtu') else: ctxt['path_mtu'] = config('global-physnet-mtu') physical_network_mtus = config('physical-network-mtus') if physical_network_mtus: ctxt['physical_network_mtus'] = ','.join( physical_network_mtus.split()) if 'kilo' <= cmp_release <= 'mitaka': pci_vendor_devs = config('supported-pci-vendor-devs') if pci_vendor_devs: ctxt['supported_pci_vendor_devs'] = \ ','.join(pci_vendor_devs.split()) ctxt['mechanism_drivers'] = get_ml2_mechanism_drivers() if config('neutron-plugin') in ['ovs', 'ml2', 'Calico']: ctxt['service_plugins'] = [] service_plugins = { 'icehouse': [('neutron.services.l3_router.l3_router_plugin.' 'L3RouterPlugin'), 'neutron.services.firewall.fwaas_plugin.FirewallPlugin', 'neutron.services.loadbalancer.plugin.LoadBalancerPlugin', 'neutron.services.vpn.plugin.VPNDriverPlugin', ('neutron.services.metering.metering_plugin.' 'MeteringPlugin')], 'juno': [('neutron.services.l3_router.l3_router_plugin.' 'L3RouterPlugin'), 'neutron.services.firewall.fwaas_plugin.FirewallPlugin', 'neutron.services.loadbalancer.plugin.LoadBalancerPlugin', 'neutron.services.vpn.plugin.VPNDriverPlugin', ('neutron.services.metering.metering_plugin.' 'MeteringPlugin')], 'kilo': ['router', 'firewall', 'lbaas', 'vpnaas', 'metering'], 'liberty': ['router', 'firewall', 'lbaas', 'vpnaas', 'metering'], 'mitaka': ['router', 'firewall', 'lbaas', 'vpnaas', 'metering'], 'newton': [ 'router', 'firewall', 'vpnaas', 'metering', ('neutron_lbaas.services.loadbalancer.plugin.' 'LoadBalancerPluginv2') ], 'ocata': [ 'router', 'firewall', 'vpnaas', 'metering', ('neutron_lbaas.services.loadbalancer.plugin.' 'LoadBalancerPluginv2') ], 'pike': [ 'router', 'firewall', 'metering', ('neutron_lbaas.services.loadbalancer.plugin.' 'LoadBalancerPluginv2') ], } ctxt['service_plugins'] = service_plugins.get( release, service_plugins['pike']) if is_qos_requested_and_valid(): ctxt['service_plugins'].append('qos') ctxt['service_plugins'] = ','.join(ctxt['service_plugins']) return ctxt
def __call__(self): if not relation_ids('mon'): return {} host = socket.gethostname() systemd_rgw = False mon_hosts = [] auths = [] for rid in relation_ids('mon'): for unit in related_units(rid): _auth = relation_get('auth', rid=rid, unit=unit) if _auth: auths.append(_auth) ceph_pub_addr = relation_get('ceph-public-address', rid=rid, unit=unit) unit_priv_addr = relation_get('private-address', rid=rid, unit=unit) ceph_addr = ceph_pub_addr or unit_priv_addr ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr if ceph_addr: mon_hosts.append(ceph_addr) if relation_get('rgw.{}_key'.format(host), rid=rid, unit=unit): systemd_rgw = True if len(set(auths)) != 1: e = ("Inconsistent or absent auth returned by mon units. Setting " "auth_supported to 'none'") log(e, level=WARNING) auth = 'none' else: auth = auths[0] # /etc/init.d/radosgw mandates that a dns name is used for this # parameter so ensure that address is resolvable if config('prefer-ipv6'): ensure_host_resolvable_v6(host) port = determine_api_port(config('port'), singlenode_mode=True) if config('prefer-ipv6'): port = "[::]:%s" % (port) mon_hosts.sort() ctxt = { 'auth_supported': auth, 'mon_hosts': ' '.join(mon_hosts), 'hostname': host, 'old_auth': cmp_pkgrevno('radosgw', "0.51") < 0, 'systemd_rgw': systemd_rgw, 'use_syslog': str(config('use-syslog')).lower(), 'loglevel': config('loglevel'), 'port': port, 'ipv6': config('prefer-ipv6'), # The public unit IP is only used in case the authentication is # *Not* keystone - in which case it is used to make sure the # storage endpoint returned by the built-in auth is the HAproxy # (since it defaults to the port the service runs on, and that is # not available externally). ~tribaal 'unit_public_ip': unit_public_ip(), } # NOTE(dosaboy): these sections must correspond to what is supported in # the config template. sections = ['global', 'client.radosgw.gateway'] user_provided = CephConfContext(permitted_sections=sections)() user_provided = {k.replace('.', '_'): user_provided[k] for k in user_provided} ctxt.update(user_provided) if self.context_complete(ctxt): # Multi-site Zone configuration is optional, # so add after assessment ctxt['rgw_zone'] = config('zone') return ctxt return {}