def ssh_compute_add(public_key, rid=None, unit=None, user=None): # If remote compute node hands us a hostname, ensure we have a # known hosts entry for its IP, hostname and FQDN. private_address = relation_get(rid=rid, unit=unit, attribute='private-address') hosts = [private_address] if not is_ipv6(private_address): if relation_get('hostname'): hosts.append(relation_get('hostname')) if not is_ip(private_address): hosts.append(get_host_ip(private_address)) short = private_address.split('.')[0] if ns_query(short): hosts.append(short) else: hn = get_hostname(private_address) if hn: hosts.append(hn) short = hn.split('.')[0] if ns_query(short): hosts.append(short) for host in list(set(hosts)): add_known_host(host, unit, user) if not ssh_authorized_key_exists(public_key, unit, user): log('Saving SSH authorized key for compute host at %s.' % private_address) add_authorized_key(public_key, unit, user)
def configure_link(bgp_info, remote_addr): CONF_ROUTER_BGP = ['conf t', 'router bgp {}'.format(quagga.get_asn())] EXIT_ROUTER_BGP_WRITE = ['exit', 'exit', 'write'] vtysh_cmd = copy.deepcopy(CONF_ROUTER_BGP) ch_core.hookenv.log('DEBUG: configure neighbour {} ' 'remote-as {}' ''.format(remote_addr, bgp_info['asn'])) vtysh_cmd += [ 'neighbor {} remote-as {}' ''.format(remote_addr, bgp_info['asn']) ] if bgp_info['passive']: vtysh_cmd += ['neighbor {} passive' ''.format(remote_addr)] if ch_net_ip.is_ipv6(remote_addr): vtysh_cmd += [ 'no neighbor {} activate'.format(remote_addr), 'address-family ipv6', # workaround for quagga redistribute connected # not working as expected for IPv6 'network {}'.format(ch_net_ip.resolve_network_cidr(remote_addr)), 'neighbor {} activate'.format(remote_addr), 'exit', ] # Exit and write vtysh_cmd += EXIT_ROUTER_BGP_WRITE # Execute the command quagga.vtysh(vtysh_cmd)
def ssh_compute_add(public_key, rid=None, unit=None, user=None): # If remote compute node hands us a hostname, ensure we have a # known hosts entry for its IP, hostname and FQDN. private_address = relation_get(rid=rid, unit=unit, attribute='private-address') hosts = [private_address] if not is_ipv6(private_address): if relation_get('hostname'): hosts.append(relation_get('hostname')) if not is_ip(private_address): hosts.append(get_host_ip(private_address)) hosts.append(private_address.split('.')[0]) else: hn = get_hostname(private_address) hosts.append(hn) hosts.append(hn.split('.')[0]) for host in list(set(hosts)): add_known_host(host, unit, user) if not ssh_authorized_key_exists(public_key, unit, user): log('Saving SSH authorized key for compute host at %s.' % private_address) add_authorized_key(public_key, unit, user)
def customer_check_assess_status(configs): """Custom check function provided to assess_status() to check the current status of current unit beyond checking that the relevant services are running @param configs: An OSConfigRenderer object of the current services @returns (status, message): The outcome of the checks. """ # Check for required swift-storage relation if len(relation_ids('swift-storage')) < 1: return ('blocked', 'Missing relation: storage') # Verify allowed_hosts is populated with enough unit IP addresses ctxt = SwiftRingContext()() if len(ctxt['allowed_hosts']) < config('replicas'): return ('blocked', 'Not enough related storage nodes') # Verify there are enough storage zones to satisfy minimum replicas rings = [r for r in SWIFT_RINGS.itervalues()] if not has_minimum_zones(rings): return ('blocked', 'Not enough storage zones for minimum replicas') if config('prefer-ipv6'): for rid in relation_ids('swift-storage'): for unit in related_units(rid): addr = relation_get(attribute='private-address', unit=unit, rid=rid) if not is_ipv6(addr): return ('blocked', 'Did not get IPv6 address from ' 'storage relation (got=%s)' % (addr)) return 'active', 'Unit is ready'
def resolve_hostname_to_ip(hostname): """Resolve hostname to IP @param hostname: hostname to be resolved @returns IP address or None if resolution was not possible via DNS """ try: import dns.resolver except ImportError: apt_install(filter_installed_packages(['python-dnspython']), fatal=True) import dns.resolver if config('prefer-ipv6'): if is_ipv6(hostname): return hostname query_type = 'AAAA' elif is_ip(hostname): return hostname else: query_type = 'A' # This may throw an NXDOMAIN exception; in which case # things are badly broken so just let it kill the hook answers = dns.resolver.query(hostname, query_type) if answers: return answers[0].address
def get_ha_nodes(): ha_units = peer_ips(peer_relation='hanode') ha_nodes = {} for unit in ha_units: corosync_id = get_corosync_id(unit) addr = ha_units[unit] if config('prefer-ipv6'): if not utils.is_ipv6(addr): # Not an error since cluster may still be forming/updating log("Expected an ipv6 address but got %s" % (addr), level=WARNING) ha_nodes[corosync_id] = addr else: ha_nodes[corosync_id] = get_host_ip(addr) corosync_id = get_corosync_id(local_unit()) if config('prefer-ipv6'): addr = get_ipv6_addr() else: addr = get_host_ip(unit_get('private-address')) ha_nodes[corosync_id] = addr return ha_nodes
def setup_keystone_certs(unit=None, rid=None): """ Get CA and signing certs from Keystone used to decrypt revoked token list. :param unit: context unit id :param rid: context relation id :returns: None """ certs_path = '/var/lib/ceph/nss' if not os.path.exists(certs_path): mkdir(certs_path) rdata = relation_get(unit=unit, rid=rid) required = ['admin_token', 'auth_host', 'auth_port'] settings = {key: rdata.get(key) for key in required} if not all(settings.values()): log("Missing relation settings ({}) - deferring cert setup".format( ', '.join([k for k in settings if not settings[k]])), level=DEBUG) return auth_protocol = rdata.get('auth_protocol', 'http') if is_ipv6(settings.get('auth_host')): settings['auth_host'] = format_ipv6_addr(settings.get('auth_host')) auth_endpoint = "{}://{}:{}/v2.0".format(auth_protocol, settings['auth_host'], settings['auth_port']) try: get_ks_ca_cert(settings['admin_token'], auth_endpoint, certs_path) get_ks_signing_cert(settings['admin_token'], auth_endpoint, certs_path) except KSCertSetupException as e: log("Keystone certs setup incomplete - {}".format(e), level=INFO)
def ha_relation_joined(relation_id=None): # Obtain the config values necessary for the cluster config. These # include multicast port and interface to bind to. cluster_config = get_hacluster_config() # Obtain resources resources = {'res_swift_haproxy': 'lsb:haproxy'} resource_params = {'res_swift_haproxy': 'op monitor interval="5s"'} if config('dns-ha'): update_dns_ha_resource_params(relation_id=relation_id, resources=resources, resource_params=resource_params) else: vip_group = [] for vip in cluster_config['vip'].split(): if is_ipv6(vip): res_swift_vip = 'ocf:heartbeat:IPv6addr' vip_params = 'ipv6addr' else: res_swift_vip = 'ocf:heartbeat:IPaddr2' vip_params = 'ip' iface = get_iface_for_address(vip) if iface is not None: vip_key = 'res_swift_{}_vip'.format(iface) if vip_key in vip_group: if vip not in resource_params[vip_key]: vip_key = '{}_{}'.format(vip_key, vip_params) else: log( "Resource '{}' (vip='{}') already exists in " "vip group - skipping".format(vip_key, vip), WARNING) continue resources[vip_key] = res_swift_vip resource_params[vip_key] = ( 'params {ip}="{vip}" cidr_netmask="{netmask}"' ' nic="{iface}"' ''.format(ip=vip_params, vip=vip, iface=iface, netmask=get_netmask_for_address(vip))) vip_group.append(vip_key) if len(vip_group) >= 1: relation_set(groups={'grp_swift_vips': ' '.join(vip_group)}) init_services = {'res_swift_haproxy': 'haproxy'} clones = {'cl_swift_haproxy': 'res_swift_haproxy'} relation_set(relation_id=relation_id, init_services=init_services, corosync_bindiface=cluster_config['ha-bindiface'], corosync_mcastport=cluster_config['ha-mcastport'], resources=resources, resource_params=resource_params, clones=clones)
def ha_joined(relation_id=None): cluster_config = get_hacluster_config() resources = { 'res_cinder_haproxy': 'lsb:haproxy' } resource_params = { 'res_cinder_haproxy': 'op monitor interval="5s"' } if config('dns-ha'): update_dns_ha_resource_params(relation_id=relation_id, resources=resources, resource_params=resource_params) else: vip_group = [] for vip in cluster_config['vip'].split(): if is_ipv6(vip): res_cinder_vip = 'ocf:heartbeat:IPv6addr' vip_params = 'ipv6addr' else: res_cinder_vip = 'ocf:heartbeat:IPaddr2' vip_params = 'ip' iface = (get_iface_for_address(vip) or config('vip_iface')) netmask = (get_netmask_for_address(vip) or config('vip_cidr')) if iface is not None: vip_key = 'res_cinder_{}_vip'.format(iface) resources[vip_key] = res_cinder_vip resource_params[vip_key] = ( 'params {ip}="{vip}" cidr_netmask="{netmask}"' ' nic="{iface}"'.format(ip=vip_params, vip=vip, iface=iface, netmask=netmask) ) vip_group.append(vip_key) if len(vip_group) >= 1: relation_set(relation_id=relation_id, groups={'grp_cinder_vips': ' '.join(vip_group)}) init_services = { 'res_cinder_haproxy': 'haproxy' } clones = { 'cl_cinder_haproxy': 'res_cinder_haproxy' } relation_set(relation_id=relation_id, init_services=init_services, corosync_bindiface=cluster_config['ha-bindiface'], corosync_mcastport=cluster_config['ha-mcastport'], resources=resources, resource_params=resource_params, clones=clones)
def ha_joined(): cluster_config = get_hacluster_config() resources = { 'res_nova_haproxy': 'lsb:haproxy', } resource_params = { 'res_nova_haproxy': 'op monitor interval="5s"', } vip_group = [] for vip in cluster_config['vip'].split(): if is_ipv6(vip): res_nova_vip = 'ocf:heartbeat:IPv6addr' vip_params = 'ipv6addr' else: res_nova_vip = 'ocf:heartbeat:IPaddr2' vip_params = 'ip' iface = (get_iface_for_address(vip) or config('vip_iface')) netmask = (get_netmask_for_address(vip) or config('vip_cidr')) if iface is not None: vip_key = 'res_nova_{}_vip'.format(iface) resources[vip_key] = res_nova_vip resource_params[vip_key] = ( 'params {ip}="{vip}" cidr_netmask="{netmask}"' ' nic="{iface}"'.format(ip=vip_params, vip=vip, iface=iface, netmask=netmask) ) vip_group.append(vip_key) if len(vip_group) >= 1: relation_set(groups={'grp_nova_vips': ' '.join(vip_group)}) init_services = { 'res_nova_haproxy': 'haproxy' } clones = { 'cl_nova_haproxy': 'res_nova_haproxy' } colocations = {} if config('single-nova-consoleauth') and console_attributes('protocol'): colocations['vip_consoleauth'] = COLO_CONSOLEAUTH init_services['res_nova_consoleauth'] = 'nova-consoleauth' resources['res_nova_consoleauth'] = AGENT_CONSOLEAUTH resource_params['res_nova_consoleauth'] = AGENT_CA_PARAMS relation_set(init_services=init_services, corosync_bindiface=cluster_config['ha-bindiface'], corosync_mcastport=cluster_config['ha-mcastport'], resources=resources, resource_params=resource_params, clones=clones, colocations=colocations)
def ha_joined(relation_id=None): cluster_config = get_hacluster_config() resources = { 'res_ks_haproxy': 'lsb:haproxy', } resource_params = { 'res_ks_haproxy': 'op monitor interval="5s"' } vip_group = [] for vip in cluster_config['vip'].split(): if is_ipv6(vip): res_ks_vip = 'ocf:heartbeat:IPv6addr' vip_params = 'ipv6addr' else: res_ks_vip = 'ocf:heartbeat:IPaddr2' vip_params = 'ip' iface = (get_iface_for_address(vip) or config('vip_iface')) netmask = (get_netmask_for_address(vip) or config('vip_cidr')) if iface is not None: vip_key = 'res_ks_{}_vip'.format(iface) if vip_key in vip_group: log("Resource '%s' (vip='%s') already exists in " "vip group - skipping" % (vip_key, vip), WARNING) continue vip_group.append(vip_key) resources[vip_key] = res_ks_vip resource_params[vip_key] = ( 'params {ip}="{vip}" cidr_netmask="{netmask}"' ' nic="{iface}"'.format(ip=vip_params, vip=vip, iface=iface, netmask=netmask) ) if len(vip_group) >= 1: relation_set(relation_id=relation_id, groups={CLUSTER_RES: ' '.join(vip_group)}) init_services = { 'res_ks_haproxy': 'haproxy' } clones = { 'cl_ks_haproxy': 'res_ks_haproxy' } relation_set(relation_id=relation_id, init_services=init_services, corosync_bindiface=cluster_config['ha-bindiface'], corosync_mcastport=cluster_config['ha-mcastport'], resources=resources, resource_params=resource_params, clones=clones)
def ha_joined(relation_id=None): cluster_config = get_hacluster_config() resources = { 'res_nova_haproxy': 'lsb:haproxy', } resource_params = { 'res_nova_haproxy': 'op monitor interval="5s"', } init_services = {'res_nova_haproxy': 'haproxy'} clones = {'cl_nova_haproxy': 'res_nova_haproxy'} colocations = {} if config('dns-ha'): update_dns_ha_resource_params(relation_id=relation_id, resources=resources, resource_params=resource_params) else: vip_group = [] for vip in cluster_config['vip'].split(): if is_ipv6(vip): res_nova_vip = 'ocf:heartbeat:IPv6addr' vip_params = 'ipv6addr' else: res_nova_vip = 'ocf:heartbeat:IPaddr2' vip_params = 'ip' iface = (get_iface_for_address(vip) or config('vip_iface')) netmask = (get_netmask_for_address(vip) or config('vip_cidr')) if iface is not None: vip_key = 'res_nova_{}_vip'.format(iface) resources[vip_key] = res_nova_vip resource_params[vip_key] = ( 'params {ip}="{vip}" cidr_netmask="{netmask}"' ' nic="{iface}"'.format(ip=vip_params, vip=vip, iface=iface, netmask=netmask)) vip_group.append(vip_key) if len(vip_group) >= 1: relation_set(groups={'grp_nova_vips': ' '.join(vip_group)}) if (config('single-nova-consoleauth') and console_attributes('protocol')): colocations['vip_consoleauth'] = COLO_CONSOLEAUTH init_services['res_nova_consoleauth'] = 'nova-consoleauth' resources['res_nova_consoleauth'] = AGENT_CONSOLEAUTH resource_params['res_nova_consoleauth'] = AGENT_CA_PARAMS relation_set(relation_id=relation_id, init_services=init_services, corosync_bindiface=cluster_config['ha-bindiface'], corosync_mcastport=cluster_config['ha-mcastport'], resources=resources, resource_params=resource_params, clones=clones, colocations=colocations)
def ensure_ipv6_requirements(hanode_rid): # hanode relation needs ipv6 private-address addr = relation_get(rid=hanode_rid, unit=local_unit(), attribute='private-address') log("Current private-address is %s" % (addr)) if not is_ipv6(addr): addr = get_ipv6_addr() log("New private-address is %s" % (addr)) relation_set(relation_id=hanode_rid, **{'private-address': addr})
def ssh_compute_add_host_and_key(public_key, hostname, private_address, application_name, user=None): """Add a compute nodes ssh details to local cache. Collect various hostname variations and add the corresponding host keys to the local known hosts file. Finally, add the supplied public key to the authorized_key file. :param public_key: Public key. :type public_key: str :param hostname: Hostname to collect host keys from. :type hostname: str :param private_address:aCorresponding private address for hostname :type private_address: str :param application_name: Name of application eg nova-compute-something :type application_name: str :param user: The user that the ssh asserts are for. :type user: str """ # If remote compute node hands us a hostname, ensure we have a # known hosts entry for its IP, hostname and FQDN. hosts = [private_address] if not is_ipv6(private_address): if hostname: hosts.append(hostname) if is_ip(private_address): hn = get_hostname(private_address) if hn: hosts.append(hn) short = hn.split('.')[0] if ns_query(short): hosts.append(short) else: hosts.append(get_host_ip(private_address)) short = private_address.split('.')[0] if ns_query(short): hosts.append(short) for host in list(set(hosts)): add_known_host(host, application_name, user) if not ssh_authorized_key_exists(public_key, application_name, user): log('Saving SSH authorized key for compute host at %s.' % private_address) add_authorized_key(public_key, application_name, user)
def ha_relation_joined(): # Obtain the config values necessary for the cluster config. These # include multicast port and interface to bind to. corosync_bindiface = config('ha-bindiface') corosync_mcastport = config('ha-mcastport') vip = config('vip') if not vip: msg = 'Unable to configure hacluster as vip not provided' raise SwiftProxyCharmException(msg) # Obtain resources resources = {'res_swift_haproxy': 'lsb:haproxy'} resource_params = {'res_swift_haproxy': 'op monitor interval="5s"'} vip_group = [] for vip in vip.split(): if is_ipv6(vip): res_swift_vip = 'ocf:heartbeat:IPv6addr' vip_params = 'ipv6addr' else: res_swift_vip = 'ocf:heartbeat:IPaddr2' vip_params = 'ip' iface = get_iface_for_address(vip) if iface is not None: vip_key = 'res_swift_{}_vip'.format(iface) resources[vip_key] = res_swift_vip resource_params[vip_key] = ( 'params {ip}="{vip}" cidr_netmask="{netmask}"' ' nic="{iface}"'.format(ip=vip_params, vip=vip, iface=iface, netmask=get_netmask_for_address(vip)) ) vip_group.append(vip_key) if len(vip_group) >= 1: relation_set(groups={'grp_swift_vips': ' '.join(vip_group)}) init_services = {'res_swift_haproxy': 'haproxy'} clones = {'cl_swift_haproxy': 'res_swift_haproxy'} relation_set(init_services=init_services, corosync_bindiface=corosync_bindiface, corosync_mcastport=corosync_mcastport, resources=resources, resource_params=resource_params, clones=clones)
def canonical_url(endpoint_type=PUBLIC): """ Returns the correct HTTP URL to this host given the state of HTTPS configuration, hacluster and charm configuration. :param endpoint_type str: The endpoint type to resolve. :returns str: Base URL for services on the current service unit. """ scheme = 'http' if charms.reactive.bus.get_state('ssl.enabled'): scheme = 'https' address = resolve_address(endpoint_type) if net_ip.is_ipv6(address): address = "[{}]".format(address) return "{0}://{1}".format(scheme, address)
def canonical_url(endpoint_type=PUBLIC): ''' Returns the correct HTTP URL to this host given the state of HTTPS configuration, hacluster and charm configuration. :endpoint_type str: The endpoint type to resolve. :returns str: Base URL for services on the current service unit. ''' scheme = 'http' # if 'https' in configs.complete_contexts(): # scheme = 'https' address = resolve_address(endpoint_type) if is_ipv6(address): address = "[{}]".format(address) return "{0}://{1}".format(scheme, address)
def canonical_url(configs, endpoint_type=PUBLIC): """Returns the correct HTTP URL to this host given the state of HTTPS configuration, hacluster and charm configuration. :param configs: OSTemplateRenderer config templating object to inspect for a complete https context. :param endpoint_type: str endpoint type to resolve. :param returns: str base URL for services on the current service unit. """ scheme = 'http' if 'https' in configs.complete_contexts(): scheme = 'https' address = resolve_address(endpoint_type) if is_ipv6(address): address = "[{}]".format(address) return '%s://%s' % (scheme, address)
def canonical_url(configs, endpoint_type=PUBLIC): """Returns the correct HTTP URL to this host given the state of HTTPS configuration, hacluster and charm configuration. :param configs: OSTemplateRenderer config templating object to inspect for a complete https context. :param endpoint_type: str endpoint type to resolve. :param returns: str base URL for services on the current service unit. """ scheme = _get_scheme(configs) address = resolve_address(endpoint_type) if is_ipv6(address): address = "[{}]".format(address) return '%s://%s' % (scheme, address)
def ha_joined(relation_id=None): cluster_config = get_hacluster_config() resources = {"res_ks_haproxy": "lsb:haproxy"} resource_params = {"res_ks_haproxy": 'op monitor interval="5s"'} if config("dns-ha"): update_dns_ha_resource_params(relation_id=relation_id, resources=resources, resource_params=resource_params) else: vip_group = [] for vip in cluster_config["vip"].split(): if is_ipv6(vip): res_ks_vip = "ocf:heartbeat:IPv6addr" vip_params = "ipv6addr" else: res_ks_vip = "ocf:heartbeat:IPaddr2" vip_params = "ip" iface = get_iface_for_address(vip) or config("vip_iface") netmask = get_netmask_for_address(vip) or config("vip_cidr") if iface is not None: vip_key = "res_ks_{}_vip".format(iface) if vip_key in vip_group: log("Resource '%s' (vip='%s') already exists in " "vip group - skipping" % (vip_key, vip), WARNING) continue vip_group.append(vip_key) resources[vip_key] = res_ks_vip resource_params[vip_key] = 'params {ip}="{vip}" cidr_netmask="{netmask}"' ' nic="{iface}"'.format( ip=vip_params, vip=vip, iface=iface, netmask=netmask ) if len(vip_group) >= 1: relation_set(relation_id=relation_id, groups={CLUSTER_RES: " ".join(vip_group)}) init_services = {"res_ks_haproxy": "haproxy"} clones = {"cl_ks_haproxy": "res_ks_haproxy"} relation_set( relation_id=relation_id, init_services=init_services, corosync_bindiface=cluster_config["ha-bindiface"], corosync_mcastport=cluster_config["ha-mcastport"], resources=resources, resource_params=resource_params, clones=clones, )
def get_ipv6_addr(): """Exclude any ip addresses configured or managed by corosync.""" excludes = [] for rid in relation_ids('ha'): for unit in related_units(rid): resources = parse_data(rid, unit, 'resources') for res in resources.values(): if 'ocf:heartbeat:IPv6addr' in res: res_params = parse_data(rid, unit, 'resource_params') res_p = res_params.get(res) if res_p: for k, v in res_p.values(): if utils.is_ipv6(v): log("Excluding '%s' from address list" % v, level=DEBUG) excludes.append(v) return utils.get_ipv6_addr(exc_list=excludes)[0]
def assess_status(configs): """Assess status of current unit""" required_interfaces = {} if is_paused(): status_set("maintenance", "Paused. Use 'resume' action to resume normal service.") return # Check for required swift-storage relation if len(relation_ids('swift-storage')) < 1: status_set('blocked', 'Missing relation: storage') return # Verify allowed_hosts is populated with enough unit IP addresses ctxt = SwiftRingContext()() if len(ctxt['allowed_hosts']) < config('replicas'): status_set('blocked', 'Not enough related storage nodes') return # Verify there are enough storage zones to satisfy minimum replicas rings = [r for r in SWIFT_RINGS.itervalues()] if not has_minimum_zones(rings): status_set('blocked', 'Not enough storage zones for minimum replicas') return if config('prefer-ipv6'): for rid in relation_ids('swift-storage'): for unit in related_units(rid): addr = relation_get(attribute='private-address', unit=unit, rid=rid) if not is_ipv6(addr): status_set('blocked', 'Did not get IPv6 address from ' 'storage relation (got=%s)' % (addr)) return if relation_ids('identity-service'): required_interfaces['identity'] = ['identity-service'] if required_interfaces: set_os_workload_status(configs, required_interfaces) else: status_set('active', 'Unit is ready')
def get_keystone_client_from_relation(relation_type='identity-service'): """ Get keystone client from relation data :param relation_type: Relation to keystone :returns: Keystone client """ required = ['admin_token', 'auth_host', 'auth_port', 'api_version'] settings = {} rdata = {} for relid in relation_ids(relation_type): for unit in related_units(relid): rdata = relation_get(unit=unit, rid=relid) or {} if set(required).issubset(set(rdata.keys())): settings = {key: rdata.get(key) for key in required} break if not settings: log( "Required settings not yet provided by any identity-service " "relation units", INFO) return None auth_protocol = rdata.get('auth_protocol', 'http') if is_ipv6(settings.get('auth_host')): settings['auth_host'] = format_ipv6_addr(settings.get('auth_host')) api_version = rdata.get('api_version') auth_endpoint = format_endpoint(auth_protocol, settings['auth_host'], settings['auth_port'], settings['api_version']) if api_version and '3' in api_version: ksclient = client_v3.Client(token=settings['admin_token'], endpoint=auth_endpoint) else: ksclient = client.Client(token=settings['admin_token'], endpoint=auth_endpoint) # Add simple way to retrieve keystone auth endpoint ksclient.auth_endpoint = auth_endpoint return ksclient
def sync_db_with_multi_ipv6_addresses(database, database_user, relation_prefix=None): hosts = get_ipv6_addr(dynamic_only=False) if config('vip'): vips = config('vip').split() for vip in vips: if vip and is_ipv6(vip): hosts.append(vip) kwargs = {'database': database, 'username': database_user, 'hostname': json.dumps(hosts)} if relation_prefix: for key in list(kwargs.keys()): kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key] del kwargs[key] for rid in relation_ids('shared-db'): relation_set(relation_id=rid, **kwargs)
def ha_joined(relation_id=None): cluster_config = get_hacluster_config() delete_resources = [] delete_resources.append('res_ceilometer_polling') resources = { 'res_ceilometer_haproxy': 'lsb:haproxy', 'res_ceilometer_agent_central': 'lsb:ceilometer-agent-central', } resource_params = { 'res_ceilometer_haproxy': 'op monitor interval="5s"', 'res_ceilometer_agent_central': 'op monitor interval="30s"' } if config('dns-ha'): update_dns_ha_resource_params(relation_id=relation_id, resources=resources, resource_params=resource_params) else: vip_group = [] for vip in cluster_config['vip'].split(): if is_ipv6(vip): res_ceilometer_vip = 'ocf:heartbeat:IPv6addr' vip_params = 'ipv6addr' else: res_ceilometer_vip = 'ocf:heartbeat:IPaddr2' vip_params = 'ip' iface = get_iface_for_address(vip) if iface is not None: vip_key = 'res_ceilometer_{}_vip'.format(iface) if vip_key in vip_group: if vip not in resource_params[vip_key]: vip_key = '{}_{}'.format(vip_key, vip_params) else: log( "Resource '%s' (vip='%s') already exists in " "vip group - skipping" % (vip_key, vip), WARNING) continue resources[vip_key] = res_ceilometer_vip resource_params[vip_key] = ( 'params {ip}="{vip}" cidr_netmask="{netmask}"' ' nic="{iface}"' ''.format(ip=vip_params, vip=vip, iface=iface, netmask=get_netmask_for_address(vip))) vip_group.append(vip_key) if len(vip_group) >= 1: relation_set(relation_id=relation_id, groups={'grp_ceilometer_vips': ' '.join(vip_group)}) init_services = {'res_ceilometer_haproxy': 'haproxy'} clones = {'cl_ceilometer_haproxy': 'res_ceilometer_haproxy'} relation_set(relation_id=relation_id, init_services=init_services, corosync_bindiface=cluster_config['ha-bindiface'], corosync_mcastport=cluster_config['ha-mcastport'], resources=resources, resource_params=resource_params, delete_resources=delete_resources, clones=clones)
def ha_joined(relation_id=None): cluster_config = get_hacluster_config() resources = { 'res_neutron_haproxy': 'lsb:haproxy', } resource_params = { 'res_neutron_haproxy': 'op monitor interval="5s"' } if config('dns-ha'): update_dns_ha_resource_params(relation_id=relation_id, resources=resources, resource_params=resource_params) else: vip_group = [] for vip in cluster_config['vip'].split(): if is_ipv6(vip): res_neutron_vip = 'ocf:heartbeat:IPv6addr' vip_params = 'ipv6addr' else: res_neutron_vip = 'ocf:heartbeat:IPaddr2' vip_params = 'ip' iface = (get_iface_for_address(vip) or config('vip_iface')) netmask = (get_netmask_for_address(vip) or config('vip_cidr')) if iface is not None: vip_key = 'res_neutron_{}_vip'.format(iface) if vip_key in vip_group: if vip not in resource_params[vip_key]: vip_key = '{}_{}'.format(vip_key, vip_params) else: log("Resource '%s' (vip='%s') already exists in " "vip group - skipping" % (vip_key, vip), WARNING) continue resources[vip_key] = res_neutron_vip resource_params[vip_key] = ( 'params {ip}="{vip}" cidr_netmask="{netmask}" ' 'nic="{iface}"'.format(ip=vip_params, vip=vip, iface=iface, netmask=netmask) ) vip_group.append(vip_key) if len(vip_group) >= 1: relation_set( relation_id=relation_id, json_groups=json.dumps({ 'grp_neutron_vips': ' '.join(vip_group) }, sort_keys=True) ) init_services = { 'res_neutron_haproxy': 'haproxy' } clones = { 'cl_nova_haproxy': 'res_neutron_haproxy' } relation_set(relation_id=relation_id, corosync_bindiface=cluster_config['ha-bindiface'], corosync_mcastport=cluster_config['ha-mcastport'], json_init_services=json.dumps(init_services, sort_keys=True), json_resources=json.dumps(resources, sort_keys=True), json_resource_params=json.dumps(resource_params, sort_keys=True), json_clones=json.dumps(clones, sort_keys=True)) # NOTE(jamespage): Clear any non-json based keys relation_set(relation_id=relation_id, groups=None, init_services=None, resources=None, resource_params=None, clones=None)
def ha_joined(relation_id=None): if config('controller-app-mode') == 'msm' or config('controller-app-mode') == 'doctl': cluster_config = get_hacluster_config() resources = { 'res_msm_haproxy': 'lsb:haproxy', } resource_params = { 'res_msm_haproxy': 'op monitor interval="5s"' } if config('dns-ha'): update_dns_ha_resource_params(relation_id=relation_id, resources=resources, resource_params=resource_params) else: vip_group = [] for vip in cluster_config['vip'].split(): if is_ipv6(vip): res_msm_vip = 'ocf:heartbeat:IPv6addr' vip_params = 'ipv6addr' else: res_msm_vip = 'ocf:heartbeat:IPaddr2' vip_params = 'ip' iface = (get_iface_for_address(vip) or config('vip_iface')) netmask = (get_netmask_for_address(vip) or config('vip_cidr')) if iface is not None: vip_key = 'res_msm_{}_vip'.format(iface) if vip_key in vip_group: if vip not in resource_params[vip_key]: vip_key = '{}_{}'.format(vip_key, vip_params) else: log("Resource '%s' (vip='%s') already exists in " "vip group - skipping" % (vip_key, vip), WARNING) continue resources[vip_key] = res_msm_vip resource_params[vip_key] = ( 'params {ip}="{vip}" cidr_netmask="{netmask}" ' 'nic="{iface}"'.format(ip=vip_params, vip=vip, iface=iface, netmask=netmask) ) vip_group.append(vip_key) if len(vip_group) >= 1: relation_set( relation_id=relation_id, json_groups=json.dumps({ 'grp_msm_vips': ' '.join(vip_group) }, sort_keys=True) ) init_services = { 'res_msm_haproxy': 'haproxy' } clones = { 'cl_msm_haproxy': 'res_msm_haproxy' } relation_set(relation_id=relation_id, corosync_bindiface=cluster_config['ha-bindiface'], corosync_mcastport=cluster_config['ha-mcastport'], json_init_services=json.dumps(init_services, sort_keys=True), json_resources=json.dumps(resources, sort_keys=True), json_resource_params=json.dumps(resource_params, sort_keys=True), json_clones=json.dumps(clones, sort_keys=True)) # NOTE(jamespage): Clear any non-json based keys relation_set(relation_id=relation_id, groups=None, init_services=None, resources=None, resource_params=None, clones=None)
def test_is_ipv6(self): self.assertFalse(net_ip.is_ipv6('myhost')) self.assertFalse(net_ip.is_ipv6('172.4.5.5')) self.assertTrue(net_ip.is_ipv6('2a01:348:2f4:0:685e:5748:ae62:209f'))