def __call__(self): log('Generating template context for identity-service', level=DEBUG) ctxt = {} for rid in relation_ids('identity-service'): for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) serv_host = rdata.get('service_host') serv_host = format_ipv6_addr(serv_host) or serv_host auth_host = rdata.get('auth_host') auth_host = format_ipv6_addr(auth_host) or auth_host svc_protocol = rdata.get('service_protocol') or 'http' auth_protocol = rdata.get('auth_protocol') or 'http' ctxt = {'service_port': rdata.get('service_port'), 'service_host': serv_host, 'auth_host': auth_host, 'auth_port': rdata.get('auth_port'), 'admin_tenant_name': rdata.get('service_tenant'), 'admin_user': rdata.get('service_username'), 'admin_password': rdata.get('service_password'), 'service_protocol': svc_protocol, 'auth_protocol': auth_protocol} if context_complete(ctxt): # NOTE(jamespage) this is required for >= icehouse # so a missing value just indicates keystone needs # upgrading ctxt['admin_tenant_id'] = rdata.get('service_tenant_id') return ctxt return {}
def get_mon_hosts(): hosts = [] addr = get_public_addr() hosts.append('{}:6789'.format(format_ipv6_addr(addr) or addr)) for relid in relation_ids('mon'): for unit in related_units(relid): addr = relation_get('ceph-public-address', unit, relid) if addr is not None: hosts.append('{}:6789'.format( format_ipv6_addr(addr) or addr)) hosts.sort() return hosts
def __call__(self): if not relation_ids('ceph'): return {} log('Generating template context for ceph', level=DEBUG) mon_hosts = [] auth = None key = None use_syslog = str(config('use-syslog')).lower() for rid in relation_ids('ceph'): for unit in related_units(rid): auth = relation_get('auth', rid=rid, unit=unit) key = relation_get('key', rid=rid, unit=unit) ceph_pub_addr = relation_get('ceph-public-address', rid=rid, unit=unit) unit_priv_addr = relation_get('private-address', rid=rid, unit=unit) ceph_addr = ceph_pub_addr or unit_priv_addr ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr mon_hosts.append(ceph_addr) ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)), 'auth': auth, 'key': key, 'use_syslog': use_syslog} if not os.path.isdir('/etc/ceph'): os.mkdir('/etc/ceph') if not context_complete(ctxt): return {} ensure_packages(['ceph-common']) return ctxt
def __call__(self): log('Generating template context for identity-service') ctxt = {} regions = set() for rid in relation_ids('identity-service'): for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) serv_host = rdata.get('service_host') serv_host = format_ipv6_addr(serv_host) or serv_host region = rdata.get('region') local_ctxt = { 'service_port': rdata.get('service_port'), 'service_host': serv_host, 'service_protocol': rdata.get('service_protocol') or 'http', 'api_version': rdata.get('api_version', '2') } # If using keystone v3 the context is incomplete without the # admin domain id if local_ctxt['api_version'] == '3': if not config('default_domain'): local_ctxt['admin_domain_id'] = rdata.get( 'admin_domain_id') if not context_complete(local_ctxt): continue # Update the service endpoint and title for each available # region in order to support multi-region deployments if region is not None: endpoint = ("%(service_protocol)s://%(service_host)s" ":%(service_port)s/v2.0") % local_ctxt for reg in region.split(): regions.add((endpoint, reg)) if len(ctxt) == 0: ctxt = local_ctxt if len(regions) > 1: avail_regions = map(lambda r: {'endpoint': r[0], 'title': r[1]}, regions) ctxt['regions'] = sorted(avail_regions, key=lambda k: k['endpoint']) # Allow the endpoint types to be specified via a config parameter. # The config parameter accepts either: # 1. a single endpoint type to be specified, in which case the # primary endpoint is configured # 2. a list of endpoint types, in which case the primary endpoint # is taken as the first entry and the secondary endpoint is # taken as the second entry. All subsequent entries are ignored. ep_types = config('endpoint-type') if ep_types: ep_types = [self.normalize(e) for e in ep_types.split(',')] ctxt['primary_endpoint'] = ep_types[0] if len(ep_types) > 1: ctxt['secondary_endpoint'] = ep_types[1] return ctxt
def __call__(self): ctxt = {} servers = [] try: for rid in relation_ids('memcache'): for rel in relations_for_id(rid): priv_addr = rel['private-address'] # Format it as IPv6 address if needed priv_addr = format_ipv6_addr(priv_addr) or priv_addr servers.append("%s:%s" % (priv_addr, rel['port'])) except Exception as ex: log("Could not get memcache servers: %s" % (ex), level='WARNING') servers = [] ctxt['memcached_servers'] = ','.join(servers) # Configure nova-novncproxy https if nova-api is using https. if https(): cn = resolve_address(endpoint_type=INTERNAL) if cn: cert_filename = 'cert_{}'.format(cn) key_filename = 'key_{}'.format(cn) else: cert_filename = 'cert' key_filename = 'key' ssl_dir = '/etc/apache2/ssl/nova' cert = os.path.join(ssl_dir, cert_filename) key = os.path.join(ssl_dir, key_filename) if os.path.exists(cert) and os.path.exists(key): ctxt['ssl_cert'] = cert ctxt['ssl_key'] = key return ctxt
def __call__(self): ctxt = super(NovaConfigContext, self).__call__() ctxt['scheduler_default_filters'] = config('scheduler-default-filters') ctxt['cpu_allocation_ratio'] = config('cpu-allocation-ratio') ctxt['ram_allocation_ratio'] = config('ram-allocation-ratio') addr = resolve_address(INTERNAL) ctxt['host_ip'] = format_ipv6_addr(addr) or addr return ctxt
def canonical_url(): """Returns the correct HTTP URL to this host given the state of HTTPS configuration and hacluster. """ scheme = 'http' if https(): scheme = 'https' addr = resolve_address(INTERNAL) return '%s://%s' % (scheme, format_ipv6_addr(addr) or addr)
def __call__(self): ip_addr = ch_ip.resolve_address(endpoint_type=ch_ip.PUBLIC) ip_addr = ch_network_ip.format_ipv6_addr(ip_addr) or ip_addr ctxt = { 'enable_serial_console': str(hookenv.config('enable-serial-console')).lower(), 'serial_console_base_url': 'ws://{}:6083/'.format(ip_addr) } return ctxt
def __call__(self): log('Generating template context for ' + self.rel_name, level=DEBUG) ctxt = {} if self.service and self.service_user: # This is required for pki token signing if we don't want /tmp to # be used. cachedir = '/var/cache/%s' % (self.service) if not os.path.isdir(cachedir): log("Creating service cache dir %s" % (cachedir), level=DEBUG) mkdir(path=cachedir, owner=self.service_user, group=self.service_user, perms=0o700) ctxt['signing_dir'] = cachedir for rid in relation_ids(self.rel_name): self.related = True for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) serv_host = rdata.get('service_host') serv_host = format_ipv6_addr(serv_host) or serv_host auth_host = rdata.get('auth_host') auth_host = format_ipv6_addr(auth_host) or auth_host svc_protocol = rdata.get('service_protocol') or 'http' auth_protocol = rdata.get('auth_protocol') or 'http' ctxt.update({'service_port': rdata.get('service_port'), 'service_host': serv_host, 'auth_host': auth_host, 'auth_port': rdata.get('auth_port'), 'admin_tenant_name': rdata.get('service_tenant'), 'admin_user': rdata.get('service_username'), 'admin_password': rdata.get('service_password'), 'service_protocol': svc_protocol, 'auth_protocol': auth_protocol}) if self.context_complete(ctxt): # NOTE(jamespage) this is required for >= icehouse # so a missing value just indicates keystone needs # upgrading ctxt['admin_tenant_id'] = rdata.get('service_tenant_id') return ctxt return {}
def get_host_ip(rid=None, unit=None): addr = relation_get('private-address', rid=rid, unit=unit) if config('prefer-ipv6'): host_ip = format_ipv6_addr(addr) if host_ip: return host_ip else: msg = ("Did not get IPv6 address from storage relation " "(got=%s)" % (addr)) log(msg, level=WARNING) return openstack.get_host_ip(addr)
def get_mon_hosts(): hosts = [] for relid in relation_ids('mon'): for unit in related_units(relid): addr = relation_get('ceph-public-address', unit, relid) or \ get_host_ip(relation_get('private-address', unit, relid)) if addr: hosts.append('{}:6789'.format(format_ipv6_addr(addr) or addr)) hosts.sort() return hosts
def get_ceph_nodes(): hosts = [] for r_id in utils.relation_ids('ceph'): for unit in utils.relation_list(r_id): ceph_addr = \ utils.relation_get('ceph-public-address', rid=r_id, unit=unit) or \ utils.relation_get('private-address', rid=r_id, unit=unit) # We host is an ipv6 address we need to wrap it in [] ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr hosts.append(ceph_addr) return hosts
def transport_url(self): """ oslo.messaging formatted transport URL :returns: oslo.messaging formatted transport URL :rtype: string """ hosts = self.relation.rabbitmq_hosts() transport_url_hosts = ','.join([ "{}:{}@{}:5672".format(self.username, self.password, ch_ip.format_ipv6_addr(host_) or host_) for host_ in hosts ]) return "rabbit://{}/{}".format(transport_url_hosts, self.vhost)
def __call__(self): self.database = self.database or config('database') self.user = self.user or config('database-user') if None in [self.database, self.user]: log("Could not generate shared_db context. Missing required charm " "config options. (database name and user)", level=ERROR) raise OSContextError ctxt = {} # NOTE(jamespage) if mysql charm provides a network upon which # access to the database should be made, reconfigure relation # with the service units local address and defer execution access_network = relation_get('access-network') if access_network is not None: if self.relation_prefix is not None: hostname_key = "{}_hostname".format(self.relation_prefix) else: hostname_key = "hostname" access_hostname = get_address_in_network(access_network, unit_get('private-address')) set_hostname = relation_get(attribute=hostname_key, unit=local_unit()) if set_hostname != access_hostname: relation_set(relation_settings={hostname_key: access_hostname}) return None # Defer any further hook execution for now.... password_setting = 'password' if self.relation_prefix: password_setting = self.relation_prefix + '_password' for rid in relation_ids(self.interfaces[0]): self.related = True for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) host = rdata.get('db_host') host = format_ipv6_addr(host) or host ctxt = { 'database_host': host, 'database': self.database, 'database_user': self.user, 'database_password': rdata.get(password_setting), 'database_type': 'mysql' } if self.context_complete(ctxt): db_ssl(rdata, ctxt, self.ssl_dir) return ctxt return {}
def __call__(self): ctxt = {} if (hookenv.config('console-ssl-cert') and hookenv.config('console-ssl-key') and hookenv.config('console-access-protocol')): ssl_dir = '/etc/nova/ssl/' if not os.path.exists(ssl_dir): hookenv.log('Creating %s.' % ssl_dir, level=hookenv.DEBUG) os.mkdir(ssl_dir) cert_path = os.path.join(ssl_dir, 'nova_cert.pem') decode_ssl_cert = base64.b64decode( hookenv.config('console-ssl-cert')) key_path = os.path.join(ssl_dir, 'nova_key.pem') decode_ssl_key = base64.b64decode( hookenv.config('console-ssl-key')) with open(cert_path, 'wb') as fh: fh.write(decode_ssl_cert) with open(key_path, 'wb') as fh: fh.write(decode_ssl_key) ctxt['ssl_only'] = True ctxt['ssl_cert'] = cert_path ctxt['ssl_key'] = key_path if ch_cluster.is_clustered(): ip_addr = ch_ip.resolve_address(endpoint_type=ch_ip.PUBLIC) else: ip_addr = hookenv.unit_get('private-address') ip_addr = ch_network_ip.format_ipv6_addr(ip_addr) or ip_addr _proto = hookenv.config('console-access-protocol') url = "https://%s:%s%s" % ( ip_addr, common.console_attributes('proxy-port', proto=_proto), common.console_attributes('proxy-page', proto=_proto)) if _proto == 'novnc': ctxt['novncproxy_base_url'] = url elif _proto == 'spice': ctxt['html5proxy_base_url'] = url return ctxt
def console_settings(): rel_settings = {} proto = common.console_attributes('protocol') if not proto: return {} rel_settings['console_keymap'] = hookenv.config('console-keymap') rel_settings['console_access_protocol'] = proto console_ssl = False if (hookenv.config('console-ssl-cert') and hookenv.config('console-ssl-key')): console_ssl = True if hookenv.config('console-proxy-ip') == 'local': if console_ssl: address = ch_ip.resolve_address(endpoint_type=ch_ip.PUBLIC) address = ch_network_ip.format_ipv6_addr(address) or address proxy_base_addr = 'https://%s' % address else: # canonical_url will only return 'https:' if API SSL are enabled. proxy_base_addr = ch_ip.canonical_url(CONFIGS, ch_ip.PUBLIC) else: if console_ssl or ch_cluster.https(): schema = "https" else: schema = "http" proxy_base_addr = ("{}://{}" .format(schema, hookenv.config('console-proxy-ip'))) if proto == 'vnc': protocols = ['novnc', 'xvpvnc'] else: protocols = [proto] for _proto in protocols: rel_settings['console_proxy_{}_address'.format(_proto)] = \ "{}:{}{}".format( proxy_base_addr, common.console_attributes('proxy-port', proto=_proto), common.console_attributes('proxy-page', proto=_proto)) rel_settings['console_proxy_%s_host' % (_proto)] = \ urlparse(proxy_base_addr).hostname rel_settings['console_proxy_%s_port' % (_proto)] = \ common.console_attributes('proxy-port', proto=_proto) return rel_settings
def console_settings(): rel_settings = {} proto = console_attributes('protocol') if not proto: return {} rel_settings['console_keymap'] = config('console-keymap') rel_settings['console_access_protocol'] = proto console_ssl = False if config('console-ssl-cert') and config('console-ssl-key'): console_ssl = True if config('console-proxy-ip') == 'local': if console_ssl: address = resolve_address(endpoint_type=PUBLIC) address = format_ipv6_addr(address) or address proxy_base_addr = 'https://%s' % address else: # canonical_url will only return 'https:' if API SSL are enabled. proxy_base_addr = canonical_url(CONFIGS, PUBLIC) else: if console_ssl or https(): schema = "https" else: schema = "http" proxy_base_addr = "%s://%s" % (schema, config('console-proxy-ip')) if proto == 'vnc': protocols = ['novnc', 'xvpvnc'] else: protocols = [proto] for _proto in protocols: rel_settings['console_proxy_%s_address' % (_proto)] = \ "%s:%s%s" % (proxy_base_addr, console_attributes('proxy-port', proto=_proto), console_attributes('proxy-page', proto=_proto)) rel_settings['console_proxy_%s_host' % (_proto)] = \ urlparse(proxy_base_addr).hostname rel_settings['console_proxy_%s_port' % (_proto)] = \ console_attributes('proxy-port', proto=_proto) return rel_settings
def __call__(self): servers = [] try: for rid in hookenv.relation_ids(self.interfaces[0]): for rel in hookenv.relations_for_id(rid): priv_addr = rel['private-address'] # Format it as IPv6 address if needed priv_addr = (ch_network_ip.format_ipv6_addr(priv_addr) or priv_addr) servers.append("%s:%s" % (priv_addr, rel['port'])) except Exception as ex: hookenv.log("Could not get memcache servers: %s" % (ex), level='WARNING') servers = [] if servers: return { 'memcached_servers': ','.join(servers) } return {}
def notify_peers_builders_available(broker_token, builders_only=False): """Notify peer swift-proxy units that they should synchronise ring and builder files. Note that this should only be called from the leader unit. """ if not is_elected_leader(SWIFT_HA_RES): log("Ring availability peer broadcast requested by non-leader - " "skipping", level=WARNING) return hostname = get_hostaddr() hostname = format_ipv6_addr(hostname) or hostname # Notify peers that builders are available log("Notifying peer(s) that rings are ready for sync.", level=INFO) rq = SwiftProxyClusterRPC().sync_rings_request(hostname, broker_token, builders_only=builders_only) for rid in relation_ids('cluster'): log("Notifying rid=%s (%s)" % (rid, rq), level=DEBUG) relation_set(relation_id=rid, relation_settings=rq)
def notify_storage_rings_available(): """Notify peer swift-storage relations that they should synchronise ring and builder files. Note that this should only be called from the leader unit. """ if not is_elected_leader(SWIFT_HA_RES): log("Ring availability storage-relation broadcast requested by " "non-leader - skipping", level=WARNING) return hostname = get_hostaddr() hostname = format_ipv6_addr(hostname) or hostname path = os.path.basename(get_www_dir()) rings_url = 'http://%s/%s' % (hostname, path) trigger = uuid.uuid4() # Notify storage nodes that there is a new ring to fetch. log("Notifying storage nodes that new ring is ready for sync.", level=INFO) for relid in relation_ids('swift-storage'): relation_set(relation_id=relid, swift_hash=get_swift_hash(), rings_url=rings_url, trigger=trigger)
def __call__(self): ''' Grab cert and key from configuraton for SSL config ''' ctxt = { 'http_port': 70, 'https_port': 433 } if config('enforce-ssl'): # NOTE(dosaboy): if ssl is not configured we shouldn't allow this if all(get_cert()): if config('vip'): addr = config('vip') elif config('prefer-ipv6'): addr = format_ipv6_addr(get_ipv6_addr()[0]) else: addr = get_host_ip(unit_get('private-address')) ctxt['ssl_addr'] = addr else: log("Enforce ssl redirect requested but ssl not configured - " "skipping redirect", level=WARNING) return ctxt
def __call__(self): ctxt = super(NovaConfigContext, self).__call__() ctxt['scheduler_default_filters'] = ( hookenv.config('scheduler-default-filters') or ','.join(default_enabled_filters())) if hookenv.config('pci-alias'): aliases = json.loads(hookenv.config('pci-alias')) if isinstance(aliases, list): ctxt['pci_aliases'] = [json.dumps(x, sort_keys=True) for x in aliases] else: ctxt['pci_alias'] = json.dumps(aliases, sort_keys=True) ctxt['disk_allocation_ratio'] = hookenv.config('disk-allocation-ratio') ctxt['cpu_allocation_ratio'] = hookenv.config('cpu-allocation-ratio') ctxt['ram_allocation_ratio'] = hookenv.config('ram-allocation-ratio') ctxt['enable_new_services'] = hookenv.config('enable-new-services') addr = ch_ip.resolve_address(ch_ip.INTERNAL) ctxt['host_ip'] = ch_network_ip.format_ipv6_addr(addr) or addr ctxt['quota_instances'] = hookenv.config('quota-instances') ctxt['quota_cores'] = hookenv.config('quota-cores') ctxt['quota_ram'] = hookenv.config('quota-ram') ctxt['quota_metadata_items'] = hookenv.config('quota-metadata-items') ctxt['quota_injected_files'] = hookenv.config('quota-injected-files') ctxt['quota_injected_file_content_bytes'] = hookenv.config( 'quota-injected-file-size') ctxt['quota_injected_file_path_length'] = hookenv.config( 'quota-injected-path-size') ctxt['quota_key_pairs'] = hookenv.config('quota-key-pairs') ctxt['quota_server_groups'] = hookenv.config('quota-server-groups') ctxt['quota_server_group_members'] = hookenv.config( 'quota-server-group-members') ctxt['console_access_protocol'] = hookenv.config( 'console-access-protocol') ctxt['console_access_port'] = hookenv.config('console-access-port') return ctxt
def _hostname(self): hostname = get_hostaddr() return format_ipv6_addr(hostname) or hostname
def __call__(self): bind_port = config('bind-port') workers = config('workers') if workers == 0: import multiprocessing workers = multiprocessing.cpu_count() if config('prefer-ipv6'): proxy_ip = '[%s]' % get_ipv6_addr(exc_list=[config('vip')])[0] memcached_ip = 'ip6-localhost' else: proxy_ip = get_host_ip(unit_get('private-address')) memcached_ip = get_host_ip(unit_get('private-address')) ctxt = { 'proxy_ip': proxy_ip, 'memcached_ip': memcached_ip, 'bind_port': determine_api_port(bind_port, singlenode_mode=True), 'workers': workers, 'operator_roles': config('operator-roles'), 'delay_auth_decision': config('delay-auth-decision'), 'node_timeout': config('node-timeout'), 'recoverable_node_timeout': config('recoverable-node-timeout'), 'log_headers': config('log-headers') } if config('debug'): ctxt['log_level'] = 'DEBUG' else: ctxt['log_level'] = 'INFO' # Instead of duplicating code lets use charm-helpers to set signing_dir # TODO(hopem): refactor this context handler to use charm-helpers # code. _ctxt = IdentityServiceContext(service='swift', service_user='******')() signing_dir = _ctxt.get('signing_dir') if signing_dir: ctxt['signing_dir'] = signing_dir ctxt['ssl'] = False auth_type = config('auth-type') auth_host = config('keystone-auth-host') admin_user = config('keystone-admin-user') admin_password = config('keystone-admin-user') if (auth_type == 'keystone' and auth_host and admin_user and admin_password): log('Using user-specified Keystone configuration.') ks_auth = { 'auth_type': 'keystone', 'auth_protocol': config('keystone-auth-protocol'), 'keystone_host': auth_host, 'auth_port': config('keystone-auth-port'), 'service_user': admin_user, 'service_password': admin_password, 'service_tenant': config('keystone-admin-tenant-name') } ctxt.update(ks_auth) for relid in relation_ids('identity-service'): log('Using Keystone configuration from identity-service.') for unit in related_units(relid): ks_auth = { 'auth_type': 'keystone', 'auth_protocol': relation_get('auth_protocol', unit, relid) or 'http', 'service_protocol': relation_get('service_protocol', unit, relid) or 'http', 'keystone_host': relation_get('auth_host', unit, relid), 'service_host': relation_get('service_host', unit, relid), 'auth_port': relation_get('auth_port', unit, relid), 'service_user': relation_get('service_username', unit, relid), 'service_password': relation_get('service_password', unit, relid), 'service_tenant': relation_get('service_tenant', unit, relid), 'service_port': relation_get('service_port', unit, relid), 'admin_token': relation_get('admin_token', unit, relid), } if context_complete(ks_auth): ctxt.update(ks_auth) if config('prefer-ipv6'): for key in ['keystone_host', 'service_host']: host = ctxt.get(key) if host: ctxt[key] = format_ipv6_addr(host) return ctxt
def __call__(self): log('Generating template context for identity-service') ctxt = {} regions = set() for rid in relation_ids('identity-service'): for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) serv_host = rdata.get('service_host') serv_host = format_ipv6_addr(serv_host) or serv_host region = rdata.get('region') local_ctxt = { 'service_port': rdata.get('service_port'), 'service_host': serv_host, 'service_protocol': rdata.get('service_protocol') or 'http', 'api_version': rdata.get('api_version', '2') } # If using keystone v3 the context is incomplete without the # admin domain id if local_ctxt['api_version'] == '3': if not config('default_domain'): local_ctxt['admin_domain_id'] = rdata.get( 'admin_domain_id') if not context_complete(local_ctxt): continue # Update the service endpoint and title for each available # region in order to support multi-region deployments if region is not None: endpoint = ("%(service_protocol)s://%(service_host)s" ":%(service_port)s/v2.0") % local_ctxt for reg in region.split(): regions.add((endpoint, reg)) if len(ctxt) == 0: ctxt = local_ctxt if len(regions) > 1: avail_regions = map(lambda r: { 'endpoint': r[0], 'title': r[1] }, regions) ctxt['regions'] = sorted(avail_regions) # Allow the endpoint types to be specified via a config parameter. # The config parameter accepts either: # 1. a single endpoint type to be specified, in which case the # primary endpoint is configured # 2. a list of endpoint types, in which case the primary endpoint # is taken as the first entry and the secondary endpoint is # taken as the second entry. All subsequent entries are ignored. ep_types = config('endpoint-type') if ep_types: ep_types = [self.normalize(e) for e in ep_types.split(',')] ctxt['primary_endpoint'] = ep_types[0] if len(ep_types) > 1: ctxt['secondary_endpoint'] = ep_types[1] return ctxt
def __call__(self): log('Generating template context for amqp', level=DEBUG) conf = config() if self.relation_prefix: user_setting = '%s-rabbit-user' % (self.relation_prefix) vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix) else: user_setting = 'rabbit-user' vhost_setting = 'rabbit-vhost' try: username = conf[user_setting] vhost = conf[vhost_setting] except KeyError as e: log('Could not generate shared_db context. Missing required charm ' 'config options: %s.' % e, level=ERROR) raise OSContextError ctxt = {} for rid in relation_ids(self.rel_name): ha_vip_only = False self.related = True for unit in related_units(rid): if relation_get('clustered', rid=rid, unit=unit): ctxt['clustered'] = True vip = relation_get('vip', rid=rid, unit=unit) vip = format_ipv6_addr(vip) or vip ctxt['rabbitmq_host'] = vip else: host = relation_get('private-address', rid=rid, unit=unit) host = format_ipv6_addr(host) or host ctxt['rabbitmq_host'] = host ctxt.update({ 'rabbitmq_user': username, 'rabbitmq_password': relation_get('password', rid=rid, unit=unit), 'rabbitmq_virtual_host': vhost, }) ssl_port = relation_get('ssl_port', rid=rid, unit=unit) if ssl_port: ctxt['rabbit_ssl_port'] = ssl_port ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit) if ssl_ca: ctxt['rabbit_ssl_ca'] = ssl_ca if relation_get('ha_queues', rid=rid, unit=unit) is not None: ctxt['rabbitmq_ha_queues'] = True ha_vip_only = relation_get('ha-vip-only', rid=rid, unit=unit) is not None if self.context_complete(ctxt): if 'rabbit_ssl_ca' in ctxt: if not self.ssl_dir: log( "Charm not setup for ssl support but ssl ca " "found", level=INFO) break ca_path = os.path.join(self.ssl_dir, 'rabbit-client-ca.pem') with open(ca_path, 'w') as fh: fh.write(b64decode(ctxt['rabbit_ssl_ca'])) ctxt['rabbit_ssl_ca'] = ca_path # Sufficient information found = break out! break # Used for active/active rabbitmq >= grizzly if (('clustered' not in ctxt or ha_vip_only) and len(related_units(rid)) > 1): rabbitmq_hosts = [] for unit in related_units(rid): host = relation_get('private-address', rid=rid, unit=unit) host = format_ipv6_addr(host) or host rabbitmq_hosts.append(host) ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts)) oslo_messaging_flags = conf.get('oslo-messaging-flags', None) if oslo_messaging_flags: ctxt['oslo_messaging_flags'] = config_flags_parser( oslo_messaging_flags) if not self.complete: return {} return ctxt
def __call__(self): bind_port = config('bind-port') workers = config('workers') if workers == 0: import multiprocessing workers = multiprocessing.cpu_count() if config('prefer-ipv6'): proxy_ip = ('[{}]' .format(get_ipv6_addr(exc_list=[config('vip')])[0])) memcached_ip = 'ip6-localhost' else: proxy_ip = get_host_ip(unit_get('private-address')) memcached_ip = get_host_ip(unit_get('private-address')) ctxt = { 'proxy_ip': proxy_ip, 'memcached_ip': memcached_ip, 'bind_port': determine_api_port(bind_port, singlenode_mode=True), 'workers': workers, 'operator_roles': config('operator-roles'), 'delay_auth_decision': config('delay-auth-decision'), 'node_timeout': config('node-timeout'), 'recoverable_node_timeout': config('recoverable-node-timeout'), 'log_headers': config('log-headers'), 'statsd_host': config('statsd-host'), 'statsd_port': config('statsd-port'), 'statsd_sample_rate': config('statsd-sample-rate'), 'static_large_object_segments': config( 'static-large-object-segments'), 'enable_multi_region': config('enable-multi-region'), 'read_affinity': get_read_affinity(), 'write_affinity': get_write_affinity(), 'write_affinity_node_count': get_write_affinity_node_count() } cmp_openstack = CompareOpenStackReleases(os_release('swift')) if cmp_openstack < 'train': # swauth is no longer supported for OpenStack Train and later admin_key = leader_get('swauth-admin-key') if admin_key is not None: ctxt['swauth_admin_key'] = admin_key if config('debug'): ctxt['log_level'] = 'DEBUG' else: ctxt['log_level'] = 'INFO' # Instead of duplicating code lets use charm-helpers to set signing_dir # TODO(hopem): refactor this context handler to use charm-helpers # code. _ctxt = IdentityServiceContext(service='swift', service_user='******')() signing_dir = _ctxt.get('signing_dir') if signing_dir: ctxt['signing_dir'] = signing_dir ctxt['ssl'] = False auth_type = config('auth-type') ctxt['auth_type'] = auth_type auth_host = config('keystone-auth-host') admin_user = config('keystone-admin-user') admin_password = config('keystone-admin-user') if (auth_type == 'keystone' and auth_host and admin_user and admin_password): log('Using user-specified Keystone configuration.') ks_auth = { 'auth_type': 'keystone', 'auth_protocol': config('keystone-auth-protocol'), 'keystone_host': auth_host, 'auth_port': config('keystone-auth-port'), 'service_user': admin_user, 'service_password': admin_password, 'service_tenant': config('keystone-admin-tenant-name'), } ctxt.update(ks_auth) for relid in relation_ids('identity-service'): log('Using Keystone configuration from identity-service.') for unit in related_units(relid): ks_auth = { 'auth_type': 'keystone', 'auth_protocol': relation_get('auth_protocol', unit, relid) or 'http', 'service_protocol': relation_get('service_protocol', unit, relid) or 'http', 'keystone_host': relation_get('auth_host', unit, relid), 'service_host': relation_get('service_host', unit, relid), 'auth_port': relation_get('auth_port', unit, relid), 'service_user': relation_get('service_username', unit, relid), 'service_password': relation_get('service_password', unit, relid), 'service_tenant': relation_get('service_tenant', unit, relid), 'service_port': relation_get('service_port', unit, relid), 'api_version': relation_get('api_version', unit, relid) or '2', } if ks_auth['api_version'] == '3': ks_auth['admin_domain_id'] = relation_get( 'admin_domain_id', unit, relid) ks_auth['service_tenant_id'] = relation_get( 'service_tenant_id', unit, relid) ks_auth['admin_domain_name'] = relation_get( 'service_domain', unit, relid) ks_auth['admin_tenant_name'] = relation_get( 'service_tenant', unit, relid) ctxt.update(ks_auth) if config('prefer-ipv6'): for key in ['keystone_host', 'service_host']: host = ctxt.get(key) if host: ctxt[key] = format_ipv6_addr(host) return ctxt
def test_format_ipv6_addr(self): DUMMY_ADDRESS = '2001:db8:1:0:f131:fc84:ea37:7d4' self.assertEquals(net_ip.format_ipv6_addr(DUMMY_ADDRESS), '[2001:db8:1:0:f131:fc84:ea37:7d4]')
def __call__(self): log('Generating template context for amqp', level=DEBUG) conf = config() if self.relation_prefix: user_setting = '%s-rabbit-user' % (self.relation_prefix) vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix) else: user_setting = 'rabbit-user' vhost_setting = 'rabbit-vhost' try: username = conf[user_setting] vhost = conf[vhost_setting] except KeyError as e: log('Could not generate shared_db context. Missing required charm ' 'config options: %s.' % e, level=ERROR) raise OSContextError ctxt = {} for rid in relation_ids(self.rel_name): ha_vip_only = False for unit in related_units(rid): if relation_get('clustered', rid=rid, unit=unit): ctxt['clustered'] = True vip = relation_get('vip', rid=rid, unit=unit) vip = format_ipv6_addr(vip) or vip ctxt['rabbitmq_host'] = vip else: host = relation_get('private-address', rid=rid, unit=unit) host = format_ipv6_addr(host) or host ctxt['rabbitmq_host'] = host ctxt.update({ 'rabbitmq_user': username, 'rabbitmq_password': relation_get('password', rid=rid, unit=unit), 'rabbitmq_virtual_host': vhost, }) ssl_port = relation_get('ssl_port', rid=rid, unit=unit) if ssl_port: ctxt['rabbit_ssl_port'] = ssl_port ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit) if ssl_ca: ctxt['rabbit_ssl_ca'] = ssl_ca if relation_get('ha_queues', rid=rid, unit=unit) is not None: ctxt['rabbitmq_ha_queues'] = True ha_vip_only = relation_get('ha-vip-only', rid=rid, unit=unit) is not None if context_complete(ctxt): if 'rabbit_ssl_ca' in ctxt: if not self.ssl_dir: log("Charm not setup for ssl support but ssl ca " "found", level=INFO) break ca_path = os.path.join( self.ssl_dir, 'rabbit-client-ca.pem') with open(ca_path, 'w') as fh: fh.write(b64decode(ctxt['rabbit_ssl_ca'])) ctxt['rabbit_ssl_ca'] = ca_path # Sufficient information found = break out! break # Used for active/active rabbitmq >= grizzly if (('clustered' not in ctxt or ha_vip_only) and len(related_units(rid)) > 1): rabbitmq_hosts = [] for unit in related_units(rid): host = relation_get('private-address', rid=rid, unit=unit) host = format_ipv6_addr(host) or host rabbitmq_hosts.append(host) ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts)) if not context_complete(ctxt): return {} return ctxt
def __call__(self): if not relation_ids('mon'): return {} host = socket.gethostname() systemd_rgw = False mon_hosts = [] auths = [] for rid in relation_ids('mon'): for unit in related_units(rid): _auth = relation_get('auth', rid=rid, unit=unit) if _auth: auths.append(_auth) ceph_pub_addr = relation_get('ceph-public-address', rid=rid, unit=unit) unit_priv_addr = relation_get('private-address', rid=rid, unit=unit) ceph_addr = ceph_pub_addr or unit_priv_addr ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr if ceph_addr: mon_hosts.append(ceph_addr) if relation_get('rgw.{}_key'.format(host), rid=rid, unit=unit): systemd_rgw = True if len(set(auths)) != 1: e = ("Inconsistent or absent auth returned by mon units. Setting " "auth_supported to 'none'") log(e, level=WARNING) auth = 'none' else: auth = auths[0] # /etc/init.d/radosgw mandates that a dns name is used for this # parameter so ensure that address is resolvable if config('prefer-ipv6'): ensure_host_resolvable_v6(host) port = determine_api_port(config('port'), singlenode_mode=True) if config('prefer-ipv6'): port = "[::]:%s" % (port) mon_hosts.sort() ctxt = { 'auth_supported': auth, 'mon_hosts': ' '.join(mon_hosts), 'hostname': host, 'old_auth': cmp_pkgrevno('radosgw', "0.51") < 0, 'systemd_rgw': systemd_rgw, 'use_syslog': str(config('use-syslog')).lower(), 'loglevel': config('loglevel'), 'port': port, 'ipv6': config('prefer-ipv6'), # The public unit IP is only used in case the authentication is # *Not* keystone - in which case it is used to make sure the # storage endpoint returned by the built-in auth is the HAproxy # (since it defaults to the port the service runs on, and that is # not available externally). ~tribaal 'unit_public_ip': unit_public_ip(), } # NOTE(dosaboy): these sections must correspond to what is supported in # the config template. sections = ['global', 'client.radosgw.gateway'] user_provided = CephConfContext(permitted_sections=sections)() user_provided = {k.replace('.', '_'): user_provided[k] for k in user_provided} ctxt.update(user_provided) if self.context_complete(ctxt): # Multi-site Zone configuration is optional, # so add after assessment ctxt['rgw_zone'] = config('zone') return ctxt return {}
def __call__(self): if not relation_ids('mon'): return {} mon_hosts = [] auths = [] for relid in relation_ids('mon'): for unit in related_units(relid): ceph_public_addr = relation_get('ceph-public-address', unit, relid) if ceph_public_addr: host_ip = format_ipv6_addr(ceph_public_addr) or \ get_host_ip(ceph_public_addr) mon_hosts.append('{}:6789'.format(host_ip)) _auth = relation_get('auth', unit, relid) if _auth: auths.append(_auth) if len(set(auths)) != 1: e = ("Inconsistent or absent auth returned by mon units. Setting " "auth_supported to 'none'") log(e, level=WARNING) auth = 'none' else: auth = auths[0] # /etc/init.d/radosgw mandates that a dns name is used for this # parameter so ensure that address is resolvable host = socket.gethostname() if config('prefer-ipv6'): ensure_host_resolvable_v6(host) port = determine_apache_port(config('port'), singlenode_mode=True) if config('prefer-ipv6'): port = "[::]:%s" % (port) mon_hosts.sort() ctxt = { 'auth_supported': auth, 'mon_hosts': ' '.join(mon_hosts), 'hostname': host, 'old_auth': cmp_pkgrevno('radosgw', "0.51") < 0, 'use_syslog': str(config('use-syslog')).lower(), 'embedded_webserver': config('use-embedded-webserver'), 'loglevel': config('loglevel'), 'port': port, 'ipv6': config('prefer-ipv6') } certs_path = '/var/lib/ceph/nss' paths = [os.path.join(certs_path, 'ca.pem'), os.path.join(certs_path, 'signing_certificate.pem')] if all([os.path.isfile(p) for p in paths]): ctxt['cms'] = True if (config('use-ceph-optimised-packages') and not config('use-embedded-webserver')): ctxt['disable_100_continue'] = False else: # NOTE: currently only applied if NOT using embedded webserver ctxt['disable_100_continue'] = True if self.context_complete(ctxt): return ctxt return {}