def test_get_ipv6_addr_w_iface(self, mock_get_iface_addr): mock_get_iface_addr.return_value = [] net_ip.get_ipv6_addr(iface='testif', fatal=False) mock_get_iface_addr.assert_called_once_with(iface='testif', inet_type='AF_INET6', inc_aliases=False, fatal=False, exc_list=None)
def test_get_ipv6_global_dynamic_address_invalid_address( self, mock_get_iface_addr, mock_check_out): mock_get_iface_addr.return_value = [] with nose.tools.assert_raises(Exception): net_ip.get_ipv6_addr() mock_get_iface_addr.return_value = ['2001:db8:1:0:2918:3444:852:5b8a'] mock_check_out.return_value = IP_OUTPUT_NO_VALID with nose.tools.assert_raises(Exception): net_ip.get_ipv6_addr()
def test_get_ipv6_addr_no_iface(self, mock_get_iface_addr, mock_get_iface_from_addr, mock_unit_get): mock_unit_get.return_value = '1.2.3.4' mock_get_iface_addr.return_value = [] mock_get_iface_from_addr.return_value = "testif" net_ip.get_ipv6_addr(fatal=False) mock_get_iface_from_addr.assert_called_once_with('1.2.3.4') mock_get_iface_addr.assert_called_once_with(iface='testif', inet_type='AF_INET6', inc_aliases=False, fatal=False, exc_list=None)
def ensure_host_resolvable_v6(hostname): """Ensure that we can resolve our hostname to an IPv6 address by adding it to /etc/hosts if it is not already resolvable. """ try: socket.getaddrinfo(hostname, None, socket.AF_INET6) except socket.gaierror: log("Host '%s' is not ipv6 resolvable - adding to /etc/hosts" % hostname, level=DEBUG) else: log("Host '%s' appears to be ipv6 resolvable" % (hostname), level=DEBUG) return # This must be the backend address used by haproxy host_addr = get_ipv6_addr(exc_list=[config('vip')])[0] dtmp = tempfile.mkdtemp() try: tmp_hosts = os.path.join(dtmp, 'hosts') shutil.copy('/etc/hosts', tmp_hosts) with open(tmp_hosts, 'a+') as fd: lines = fd.readlines() for line in lines: key = "^%s\s+" % (host_addr) if re.search(key, line): break else: fd.write("%s\t%s\n" % (host_addr, hostname)) os.rename(tmp_hosts, '/etc/hosts') finally: shutil.rmtree(dtmp)
def get_unit_ip(config_override=AMQP_OVERRIDE_CONFIG, interface=AMQP_INTERFACE): """Return this unit's IP. Future proof to allow for network spaces or other more complex addresss selection. @param config_override: string name of the config option for network override. Default to amqp-network @param interface: string name of the relation. Default to amqp. @raises Exception if prefer-ipv6 is configured but IPv6 unsupported. @returns IPv6 or IPv4 address """ fallback = get_host_ip(unit_get('private-address')) if config('prefer-ipv6'): assert_charm_supports_ipv6() return get_ipv6_addr()[0] elif config(config_override): # NOTE(jamespage) # override private-address settings if access-network is # configured and an appropriate network interface is # configured. return get_address_in_network(config(config_override), fallback) else: # NOTE(jamespage) # Try using network spaces if access-network is not # configured, fallback to private address if not # supported try: return network_get_primary_address(interface) except NotImplementedError: return fallback
def cluster_joined(): relation_settings = {} if config('prefer-ipv6'): addr = get_ipv6_addr(exc_list=[config('vip')])[0] relation_settings = { 'private-address': addr, 'hostname': socket.gethostname() } cluster_network = config('cluster-network') if cluster_network: cluster_addr = get_address_in_network(cluster_network, fatal=True) relation_settings['cluster-address'] = cluster_addr else: try: cluster_addr = network_get_primary_address('cluster') relation_settings['cluster-address'] = cluster_addr except NotImplementedError: # NOTE(jamespage): skip - fallback to previous behaviour pass log("Setting cluster relation: '%s'" % (relation_settings), level=INFO) relation_set(relation_settings=relation_settings) # Ensure all new peers are aware cluster_state_uuid = relation_get('bootstrap-uuid', unit=local_unit()) if cluster_state_uuid: notify_bootstrapped(cluster_rid=relation_id(), cluster_uuid=cluster_state_uuid)
def get_cluster_hosts(): hosts_map = {} hostname = get_host_ip() hosts = [hostname] # We need to add this localhost dns name to /etc/hosts along with peer # hosts to ensure percona gets consistently resolved addresses. if config('prefer-ipv6'): addr = get_ipv6_addr(exc_list=[config('vip')], fatal=True)[0] hosts_map = {addr: hostname} for relid in relation_ids('cluster'): for unit in related_units(relid): rdata = relation_get(unit=unit, rid=relid) private_address = rdata.get('private-address') if config('prefer-ipv6'): hostname = rdata.get('hostname') if not hostname or hostname in hosts: log("(unit=%s) Ignoring hostname '%s' provided by cluster " "relation for addr %s" % (unit, hostname, private_address), level=DEBUG) continue else: log("(unit=%s) hostname '%s' provided by cluster relation " "for addr %s" % (unit, hostname, private_address), level=DEBUG) hosts_map[private_address] = hostname hosts.append(hostname) else: hosts.append(get_host_ip(private_address)) if hosts_map: update_hosts_file(hosts_map) return hosts
def __call__(self): ''' Horizon specific HAProxy context; haproxy is used all the time in the openstack dashboard charm so a single instance just self refers ''' cluster_hosts = {} l_unit = local_unit().replace('/', '-') if config('prefer-ipv6'): cluster_hosts[l_unit] = get_ipv6_addr(exc_list=[config('vip')])[0] else: cluster_hosts[l_unit] = unit_get('private-address') for rid in relation_ids('cluster'): for unit in related_units(rid): _unit = unit.replace('/', '-') addr = relation_get('private-address', rid=rid, unit=unit) cluster_hosts[_unit] = addr log('Ensuring haproxy enabled in /etc/default/haproxy.') with open('/etc/default/haproxy', 'w') as out: out.write('ENABLED=1\n') ctxt = { 'units': cluster_hosts, 'service_ports': { 'dash_insecure': [80, 70], 'dash_secure': [443, 433] }, 'prefer_ipv6': config('prefer-ipv6') } return ctxt
def get_db_host(client_hostname): """Get address of local database host. If an access-network has been configured, expect selected address to be on that network. If none can be found, revert to primary address. If vip(s) are configured, chooses first available. """ vips = config('vip').split() if config('vip') else [] access_network = config('access-network') if access_network: client_ip = get_host_ip(client_hostname) if is_address_in_network(access_network, client_ip): if is_clustered(): for vip in vips: if is_address_in_network(access_network, vip): return vip log("Unable to identify a VIP in the access-network '%s'" % (access_network), level=WARNING) else: return get_address_in_network(access_network) else: log("Client address '%s' not in access-network '%s'" % (client_ip, access_network), level=WARNING) if is_clustered() and vips: return vips[0] # NOTE on private network if config('prefer-ipv6'): return get_ipv6_addr(exc_list=vips)[0] return unit_get('private-address')
def db_changed(relation_id=None, unit=None, admin=None): if not is_elected_leader(DC_RESOURCE_NAME): log('Service is peered, clearing db relation' ' as this service unit is not the leader') relation_clear(relation_id) return if is_clustered(): db_host = config('vip') else: if config('prefer-ipv6'): db_host = get_ipv6_addr(exc_list=[config('vip')])[0] else: db_host = unit_get('private-address') if admin not in [True, False]: admin = relation_type() == 'db-admin' db_name, _ = remote_unit().split("/") username = db_name db_helper = get_db_helper() addr = relation_get('private-address', unit=unit, rid=relation_id) password = db_helper.configure_db(addr, db_name, username, admin=admin) relation_set(relation_id=relation_id, relation_settings={ 'user': username, 'password': password, 'host': db_host, 'database': db_name, })
def emit_cephconf(): cephcontext = { 'auth_supported': config('auth-supported'), 'mon_hosts': ' '.join(get_mon_hosts()), 'fsid': leader_get('fsid'), 'old_auth': cmp_pkgrevno('ceph', "0.51") < 0, 'osd_journal_size': config('osd-journal-size'), 'use_syslog': str(config('use-syslog')).lower(), 'ceph_public_network': config('ceph-public-network'), 'ceph_cluster_network': config('ceph-cluster-network'), } if config('prefer-ipv6'): dynamic_ipv6_address = get_ipv6_addr()[0] if not config('ceph-public-network'): cephcontext['public_addr'] = dynamic_ipv6_address if not config('ceph-cluster-network'): cephcontext['cluster_addr'] = dynamic_ipv6_address # Install ceph.conf as an alternative to support # co-existence with other charms that write this file charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name()) mkdir(os.path.dirname(charm_ceph_conf)) with open(charm_ceph_conf, 'w') as cephconf: cephconf.write(render_template('ceph.conf', cephcontext)) install_alternative('ceph.conf', '/etc/ceph/ceph.conf', charm_ceph_conf, 100)
def resolve_address(endpoint_type=PUBLIC): resolved_address = None if is_clustered(): if config(_address_map[endpoint_type]['config']) is None: # Assume vip is simple and pass back directly resolved_address = config('vip') else: for vip in config('vip').split(): if is_address_in_network( config(_address_map[endpoint_type]['config']), vip): resolved_address = vip else: if config('prefer-ipv6'): fallback_addr = get_ipv6_addr() else: fallback_addr = unit_get(_address_map[endpoint_type]['fallback']) resolved_address = get_address_in_network( config(_address_map[endpoint_type]['config']), fallback_addr) if resolved_address is None: raise ValueError('Unable to resolve a suitable IP address' ' based on charm state and configuration') else: return resolved_address
def ensure_host_resolvable_v6(hostname): """Ensure that we can resolve our hostname to an IPv6 address by adding it to /etc/hosts if it is not already resolvable. """ try: socket.getaddrinfo(hostname, None, socket.AF_INET6) except socket.gaierror: log("Host '%s' is not ipv6 resolvable - adding to /etc/hosts" % hostname, level=DEBUG) else: log("Host '%s' appears to be ipv6 resolvable" % (hostname), level=DEBUG) return # This must be the backend address used by haproxy host_addr = get_ipv6_addr(exc_list=[config('vip')])[0] dtmp = tempfile.mkdtemp() try: tmp_hosts = os.path.join(dtmp, 'hosts') shutil.copy('/etc/hosts', tmp_hosts) with open(tmp_hosts, 'a+') as fd: lines = fd.readlines() for line in lines: key = r"^%s\s+" % (host_addr) if re.search(key, line): break else: fd.write("%s\t%s\n" % (host_addr, hostname)) os.rename(tmp_hosts, '/etc/hosts') finally: shutil.rmtree(dtmp)
def get_ceph_context(): networks = get_networks('ceph-public-network') public_network = ', '.join(networks) networks = get_networks('ceph-cluster-network') cluster_network = ', '.join(networks) cephcontext = { 'auth_supported': config('auth-supported'), 'mon_hosts': ' '.join(get_mon_hosts()), 'fsid': leader_get('fsid'), 'old_auth': cmp_pkgrevno('ceph', "0.51") < 0, 'use_syslog': str(config('use-syslog')).lower(), 'ceph_public_network': public_network, 'ceph_cluster_network': cluster_network, 'loglevel': config('loglevel'), 'dio': str(config('use-direct-io')).lower(), } if config('prefer-ipv6'): dynamic_ipv6_address = get_ipv6_addr()[0] if not public_network: cephcontext['public_addr'] = dynamic_ipv6_address if not cluster_network: cephcontext['cluster_addr'] = dynamic_ipv6_address else: cephcontext['public_addr'] = get_public_addr() cephcontext['cluster_addr'] = get_cluster_addr() # NOTE(dosaboy): these sections must correspond to what is supported in the # config template. sections = ['global', 'mds', 'mon'] cephcontext.update(CephConfContext(permitted_sections=sections)()) return cephcontext
def emit_cephconf(): networks = get_networks('ceph-public-network') public_network = ', '.join(networks) networks = get_networks('ceph-cluster-network') cluster_network = ', '.join(networks) cephcontext = { 'auth_supported': config('auth-supported'), 'mon_hosts': ' '.join(get_mon_hosts()), 'fsid': leader_get('fsid'), 'old_auth': cmp_pkgrevno('ceph', "0.51") < 0, 'osd_journal_size': config('osd-journal-size'), 'use_syslog': str(config('use-syslog')).lower(), 'ceph_public_network': public_network, 'ceph_cluster_network': cluster_network, 'loglevel': config('loglevel'), } if config('prefer-ipv6'): dynamic_ipv6_address = get_ipv6_addr()[0] if not public_network: cephcontext['public_addr'] = dynamic_ipv6_address if not cluster_network: cephcontext['cluster_addr'] = dynamic_ipv6_address # Install ceph.conf as an alternative to support # co-existence with other charms that write this file charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name()) mkdir(os.path.dirname(charm_ceph_conf), owner=ceph.ceph_user(), group=ceph.ceph_user()) render('ceph.conf', charm_ceph_conf, cephcontext, perms=0o644) install_alternative('ceph.conf', '/etc/ceph/ceph.conf', charm_ceph_conf, 100)
def cluster_joined(relation_id=None): if config('prefer-ipv6'): relation_settings = {'hostname': socket.gethostname(), 'private-address': get_ipv6_addr()[0]} relation_set(relation_id=relation_id, relation_settings=relation_settings) if is_relation_made('ha') and \ config('ha-vip-only') is False: log('hacluster relation is present, skipping native ' 'rabbitmq cluster config.') return configure_nodename() try: if not is_leader(): log('Not the leader, deferring cookie propagation to leader') return except NotImplementedError: if is_newer(): log('cluster_joined: Relation greater.') return if not os.path.isfile(rabbit.COOKIE_PATH): log('erlang cookie missing from %s' % rabbit.COOKIE_PATH, level=ERROR) return if not is_sufficient_peers(): return if is_elected_leader('res_rabbitmq_vip'): cookie = open(rabbit.COOKIE_PATH, 'r').read().strip() peer_store('cookie', cookie)
def get_ceph_context(upgrading=False): """Returns the current context dictionary for generating ceph.conf :param upgrading: bool - determines if the context is invoked as part of an upgrade proedure Setting this to true causes settings useful during an upgrade to be defined in the ceph.conf file """ mon_hosts = get_mon_hosts() log('Monitor hosts are ' + repr(mon_hosts)) networks = get_networks('ceph-public-network') public_network = ', '.join(networks) networks = get_networks('ceph-cluster-network') cluster_network = ', '.join(networks) cephcontext = { 'auth_supported': get_auth(), 'mon_hosts': ' '.join(mon_hosts), 'fsid': get_fsid(), 'old_auth': cmp_pkgrevno('ceph', "0.51") < 0, 'osd_journal_size': config('osd-journal-size'), 'use_syslog': str(config('use-syslog')).lower(), 'ceph_public_network': public_network, 'ceph_cluster_network': cluster_network, 'loglevel': config('loglevel'), 'dio': str(config('use-direct-io')).lower(), 'short_object_len': use_short_objects(), 'upgrade_in_progress': upgrading, 'bluestore': config('bluestore'), } if config('prefer-ipv6'): dynamic_ipv6_address = get_ipv6_addr()[0] if not public_network: cephcontext['public_addr'] = dynamic_ipv6_address if not cluster_network: cephcontext['cluster_addr'] = dynamic_ipv6_address else: cephcontext['public_addr'] = get_public_addr() cephcontext['cluster_addr'] = get_cluster_addr() if config('customize-failure-domain'): az = az_info() if az: cephcontext['crush_location'] = "root=default {} host={}" \ .format(az, socket.gethostname()) else: log( "Your Juju environment doesn't" "have support for Availability Zones" ) # NOTE(dosaboy): these sections must correspond to what is supported in the # config template. sections = ['global', 'osd'] cephcontext.update(CephConfContext(permitted_sections=sections)()) return cephcontext
def get_db_host(client_hostname, interface='shared-db'): """Get address of local database host. If an access-network has been configured, expect selected address to be on that network. If none can be found, revert to primary address. If network spaces are supported (Juju >= 2.0), use network-get to retrieve the network binding for the interface. If vip(s) are configured, chooses first available. """ vips = config('vip').split() if config('vip') else [] dns_ha = config('dns-ha') access_network = config('access-network') client_ip = get_host_ip(client_hostname) if is_clustered() and dns_ha: log("Using DNS HA hostname: {}".format(config('os-access-hostname'))) return config('os-access-hostname') elif access_network: if is_address_in_network(access_network, client_ip): if is_clustered(): for vip in vips: if is_address_in_network(access_network, vip): return vip log("Unable to identify a VIP in the access-network '%s'" % (access_network), level=WARNING) else: return get_address_in_network(access_network) else: log("Client address '%s' not in access-network '%s'" % (client_ip, access_network), level=WARNING) else: try: # NOTE(jamespage) # Try to use network spaces to resolve binding for # interface, and to resolve the VIP associated with # the binding if provided. interface_binding = network_get_primary_address(interface) if is_clustered() and vips: interface_cidr = resolve_network_cidr(interface_binding) for vip in vips: if is_address_in_network(interface_cidr, vip): return vip return interface_binding except NotImplementedError: # NOTE(jamespage): skip - fallback to previous behaviour pass if is_clustered() and vips: return vips[0] # NOTE on private network if config('prefer-ipv6'): return get_ipv6_addr(exc_list=vips)[0] return unit_get('private-address')
def test_get_ipv6_addr(self, _interfaces, _ifaddresses, mock_check_out, mock_get_iface_from_addr): mock_get_iface_from_addr.return_value = 'eth0' mock_check_out.return_value = \ b"inet6 2a01:348:2f4:0:685e:5748:ae62:209f/64 scope global dynamic" _interfaces.return_value = DUMMY_ADDRESSES.keys() _ifaddresses.side_effect = DUMMY_ADDRESSES.__getitem__ result = net_ip.get_ipv6_addr(dynamic_only=False) self.assertEqual(['2a01:348:2f4:0:685e:5748:ae62:209f'], result)
def local_address(self): """Return remotely accessible address of charm (not localhost) @return True if user has requested ipv6 support otherwise False """ if self.ipv6_mode: addr = ch_ip.get_ipv6_addr(exc_list=[self.vip])[0] else: addr = ch_utils.get_host_ip(hookenv.unit_get('private-address')) return addr
def local_address(self): """Return remotely accessible address of charm (not localhost) @return True if user has requested ipv6 support otherwise False """ if self.ipv6_mode: addr = ch_ip.get_ipv6_addr(exc_list=[self.vip])[0] else: addr = ch_utils.get_host_ip( hookenv.unit_get('private-address')) return addr
def cluster_joined(relation_id=None): for addr_type in ADDRESS_TYPES: address = get_address_in_network( config('os-{}-network'.format(addr_type))) if address: relation_set( relation_id=relation_id, relation_settings={'{}-address'.format(addr_type): address}) if config('prefer-ipv6'): private_addr = get_ipv6_addr(exc_list=[config('vip')])[0] relation_set(relation_id=relation_id, relation_settings={'private-address': private_addr})
def __call__(self): ctxt = {} if config('prefer-ipv6'): host_ip = get_ipv6_addr()[0] else: host_ip = get_host_ip(unit_get('private-address')) if host_ip: # NOTE: do not format this even for ipv6 (see bug 1499656) ctxt['host_ip'] = host_ip return ctxt
def test_get_ipv6_global_dynamic_address_ip2(self, mock_get_iface_addr, mock_check_out, mock_get_iface_from_addr): mock_get_iface_from_addr.return_value = 'eth0' mock_check_out.return_value = IP2_OUTPUT scope_global_addr = '2001:db8:1:0:d0cf:528c:23eb:6000' scope_global_dyn_addr = '2001:db8:1:0:f816:3eff:fe2a:ccce' mock_get_iface_addr.return_value = [ scope_global_addr, scope_global_dyn_addr, '2001:db8:1:0:2918:3444:852:5b8a', 'fe80::f816:3eff:fe2a:ccce%eth0' ] self.assertEqual([scope_global_dyn_addr], net_ip.get_ipv6_addr())
def get_db_host(client_hostname, interface='shared-db'): """Get address of local database host. If an access-network has been configured, expect selected address to be on that network. If none can be found, revert to primary address. If network spaces are supported (Juju >= 2.0), use network-get to retrieve the network binding for the interface. If vip(s) are configured, chooses first available. """ vips = config('vip').split() if config('vip') else [] access_network = config('access-network') client_ip = get_host_ip(client_hostname) if access_network: if is_address_in_network(access_network, client_ip): if is_clustered(): for vip in vips: if is_address_in_network(access_network, vip): return vip log("Unable to identify a VIP in the access-network '%s'" % (access_network), level=WARNING) else: return get_address_in_network(access_network) else: log("Client address '%s' not in access-network '%s'" % (client_ip, access_network), level=WARNING) else: try: # NOTE(jamespage) # Try to use network spaces to resolve binding for # interface, and to resolve the VIP associated with # the binding if provided. interface_binding = network_get_primary_address(interface) if is_clustered() and vips: interface_cidr = resolve_network_cidr(interface_binding) for vip in vips: if is_address_in_network(interface_cidr, vip): return vip return interface_binding except NotImplementedError: # NOTE(jamespage): skip - fallback to previous behaviour pass if is_clustered() and vips: return vips[0] # NOTE on private network if config('prefer-ipv6'): return get_ipv6_addr(exc_list=vips)[0] return unit_get('private-address')
def test_get_ipv6_addr_exc_list(self, _interfaces, _ifaddresses, mock_get_iface_from_addr): def mock_ifaddresses(iface): return DUMMY_ADDRESSES[iface] _interfaces.return_value = ['eth0', 'eth1'] _ifaddresses.side_effect = mock_ifaddresses result = net_ip.get_ipv6_addr( exc_list='2a01:348:2f4:0:685e:5748:ae62:209f', inc_aliases=True, fatal=False) self.assertEqual([], result)
def cluster_joined(): relation_settings = {} if config('prefer-ipv6'): addr = get_ipv6_addr(exc_list=[config('vip')])[0] relation_settings = {'private-address': addr, 'hostname': socket.gethostname()} relation_settings['cluster-address'] = get_cluster_host_ip() log("Setting cluster relation: '%s'" % (relation_settings), level=INFO) relation_set(relation_settings=relation_settings)
def cluster_joined(): if config('prefer-ipv6'): addr = get_ipv6_addr(exc_list=[config('vip')])[0] relation_settings = {'private-address': addr, 'hostname': socket.gethostname()} log("Setting cluster relation: '%s'" % (relation_settings), level=INFO) relation_set(relation_settings=relation_settings) # Ensure all new peers are aware cluster_state_uuid = relation_get('bootstrap-uuid', unit=local_unit()) if cluster_state_uuid: notify_bootstrapped(cluster_rid=relation_id(), cluster_uuid=cluster_state_uuid)
def cluster_joined(relation_id=None): for addr_type in ADDRESS_TYPES: netaddr_cfg = 'os-{}-network'.format(addr_type) address = get_address_in_network(config(netaddr_cfg)) if address: settings = {'{}-address'.format(addr_type): address} relation_set(relation_id=relation_id, relation_settings=settings) if config('prefer-ipv6'): private_addr = get_ipv6_addr(exc_list=[config('vip')])[0] relation_set(relation_id=relation_id, relation_settings={'private-address': private_addr}) else: private_addr = unit_get('private-address')
def cluster_joined(rid=None): settings = {} for addr_type in ADDRESS_TYPES: address = get_address_in_network( config('os-{}-network'.format(addr_type))) if address: settings['{}-address'.format(addr_type)] = address if config('prefer-ipv6'): private_addr = get_ipv6_addr(exc_list=[config('vip')])[0] settings['private-address'] = private_addr relation_set(relation_id=rid, **settings)
def __call__(self): allowed_hosts = [] for relid in relation_ids('swift-storage'): for unit in related_units(relid): host = relation_get('private-address', unit, relid) if config('prefer-ipv6'): host_ip = get_ipv6_addr(exc_list=[config('vip')])[0] else: host_ip = get_host_ip(host) allowed_hosts.append(host_ip) ctxt = {'www_dir': WWW_DIR, 'allowed_hosts': allowed_hosts} return ctxt
def resolve_ports(self, ports): """Resolve NICs not yet bound to bridge(s) If hwaddress provided then returns resolved hwaddress otherwise NIC. """ if not ports: return None hwaddr_to_nic = {} hwaddr_to_ip = {} for nic in list_nics(): # Ignore virtual interfaces (bond masters will be identified from # their slaves) if not is_phy_iface(nic): continue _nic = get_bond_master(nic) if _nic: log("Replacing iface '%s' with bond master '%s'" % (nic, _nic), level=DEBUG) nic = _nic hwaddr = get_nic_hwaddr(nic) hwaddr_to_nic[hwaddr] = nic addresses = get_ipv4_addr(nic, fatal=False) addresses += get_ipv6_addr(iface=nic, fatal=False) hwaddr_to_ip[hwaddr] = addresses resolved = [] mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I) for entry in ports: if re.match(mac_regex, entry): # NIC is in known NICs and does NOT hace an IP address if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]: # If the nic is part of a bridge then don't use it if is_bridge_member(hwaddr_to_nic[entry]): continue # Entry is a MAC address for a valid interface that doesn't # have an IP address assigned yet. resolved.append(hwaddr_to_nic[entry]) else: # If the passed entry is not a MAC address, assume it's a valid # interface, and that the user put it there on purpose (we can # trust it to be the real external network). resolved.append(entry) # Ensure no duplicates return list(set(resolved))
def sync_db_with_multi_ipv6_addresses(database, database_user, relation_prefix=None): hosts = get_ipv6_addr(dynamic_only=False) kwargs = {'database': database, 'username': database_user, 'hostname': json.dumps(hosts)} if relation_prefix: for key in list(kwargs.keys()): kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key] del kwargs[key] for rid in relation_ids('shared-db'): relation_set(relation_id=rid, **kwargs)
def get_host_ip(hostname=None): if config('prefer-ipv6'): return get_ipv6_addr()[0] hostname = hostname or unit_get('private-address') try: # Test to see if already an IPv4 address socket.inet_aton(hostname) return hostname except socket.error: # This may throw an NXDOMAIN exception; in which case # things are badly broken so just let it kill the hook answers = dns.resolver.query(hostname, 'A') if answers: return answers[0].address
def get_host_ip(hostname=None): try: import dns.resolver except ImportError: apt_install(filter_installed_packages(['python-dnspython']), fatal=True) import dns.resolver if config('prefer-ipv6'): # Ensure we have a valid ipv6 address configured get_ipv6_addr(exc_list=[config('vip')], fatal=True)[0] return socket.gethostname() hostname = hostname or unit_get('private-address') try: # Test to see if already an IPv4 address socket.inet_aton(hostname) return hostname except socket.error: # This may throw an NXDOMAIN exception; in which case # things are badly broken so just let it kill the hook answers = dns.resolver.query(hostname, 'A') if answers: return answers[0].address
def get_cluster_hosts(): hosts_map = {} if config('cluster-network'): hostname = get_address_in_network(config('cluster-network'), fatal=True) else: try: hostname = network_get_primary_address('cluster') except NotImplementedError: # NOTE(jamespage): skip - fallback to previous behaviour hostname = get_host_ip() # We need to add this localhost dns name to /etc/hosts along with peer # hosts to ensure percona gets consistently resolved addresses. if config('prefer-ipv6'): addr = get_ipv6_addr(exc_list=[config('vip')], fatal=True)[0] hosts_map = {addr: hostname} hosts = [hostname] for relid in relation_ids('cluster'): for unit in related_units(relid): rdata = relation_get(unit=unit, rid=relid) # NOTE(dosaboy): see LP: #1599447 cluster_address = rdata.get('cluster-address', rdata.get('private-address')) if config('prefer-ipv6'): hostname = rdata.get('hostname') if not hostname or hostname in hosts: log("(unit=%s) Ignoring hostname '%s' provided by cluster " "relation for addr %s" % (unit, hostname, cluster_address), level=DEBUG) continue else: log("(unit=%s) hostname '%s' provided by cluster relation " "for addr %s" % (unit, hostname, cluster_address), level=DEBUG) hosts_map[cluster_address] = hostname hosts.append(hostname) else: hosts.append(get_host_ip(cluster_address)) if hosts_map: update_hosts_file(hosts_map) return hosts
def compute_joined(rid=None): # NOTE(james-page) in MAAS environments the actual hostname is a CNAME # record so won't get scanned based on private-address which is an IP # add the hostname configured locally to the relation. settings = {'hostname': gethostname()} if config('prefer-ipv6'): settings = {'private-address': get_ipv6_addr()[0]} if migration_enabled(): auth_type = config('migration-auth-type') settings['migration_auth_type'] = auth_type if auth_type == 'ssh': settings['ssh_public_key'] = public_ssh_key() relation_set(relation_id=rid, **settings) if config('enable-resize'): settings['nova_ssh_public_key'] = public_ssh_key(user='******') relation_set(relation_id=rid, **settings)
def cluster_joined(): unison.ssh_authorized_peers(user=SSH_USER, group="juju_keystone", peer_interface="cluster", ensure_local_user=True) settings = {} for addr_type in ADDRESS_TYPES: address = get_address_in_network(config("os-{}-network".format(addr_type))) if address: settings["{}-address".format(addr_type)] = address if config("prefer-ipv6"): private_addr = get_ipv6_addr(exc_list=[config("vip")])[0] settings["private-address"] = private_addr relation_set(relation_settings=settings) send_ssl_sync_request()
def __call__(self): allowed_hosts = [] for relid in relation_ids('swift-storage'): for unit in related_units(relid): host = relation_get('private-address', unit, relid) if config('prefer-ipv6'): host_ip = get_ipv6_addr(exc_list=[config('vip')])[0] else: host_ip = get_host_ip(host) allowed_hosts.append(host_ip) ctxt = { 'www_dir': WWW_DIR, 'allowed_hosts': allowed_hosts } return ctxt
def resolve_address(endpoint_type=PUBLIC): """Return unit address depending on net config. If unit is clustered with vip(s) and has net splits defined, return vip on correct network. If clustered with no nets defined, return primary vip. If not clustered, return unit address ensuring address is on configured net split if one is configured. :param endpoint_type: Network endpoing type """ resolved_address = _get_address_override(endpoint_type) if resolved_address: return resolved_address vips = config('vip') if vips: vips = vips.split() net_type = ADDRESS_MAP[endpoint_type]['config'] net_addr = config(net_type) net_fallback = ADDRESS_MAP[endpoint_type]['fallback'] clustered = is_clustered() if clustered: if not net_addr: # If no net-splits defined, we expect a single vip resolved_address = vips[0] else: for vip in vips: if is_address_in_network(net_addr, vip): resolved_address = vip break else: if config('prefer-ipv6'): fallback_addr = get_ipv6_addr(exc_list=vips)[0] else: fallback_addr = unit_get(net_fallback) resolved_address = get_address_in_network(net_addr, fallback_addr) if resolved_address is None: raise ValueError("Unable to resolve a suitable IP address based on " "charm state and configuration. (net_type=%s, " "clustered=%s)" % (net_type, clustered)) return resolved_address
def cluster_joined(relation_id=None): for addr_type in ADDRESS_TYPES: address = get_address_in_network( config('os-{}-network'.format(addr_type))) if address: relation_set( relation_id=relation_id, relation_settings={'{}-address'.format(addr_type): address}) # Only do if this is fired by cluster rel if not relation_id: check_db_initialised() if config('prefer-ipv6'): private_addr = get_ipv6_addr(exc_list=[config('vip')])[0] relation_set(relation_id=relation_id, relation_settings={'private-address': private_addr})
def __call__(self): ''' Builds half a context for the haproxy template, which describes all peers to be included in the cluster. Each charm needs to include its own context generator that describes the port mapping. ''' if not relation_ids('cluster'): return {} cluster_hosts = {} l_unit = local_unit().replace('/', '-') if config('prefer-ipv6'): addr = get_ipv6_addr() else: addr = unit_get('private-address') cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'), addr) for rid in relation_ids('cluster'): for unit in related_units(rid): _unit = unit.replace('/', '-') addr = relation_get('private-address', rid=rid, unit=unit) cluster_hosts[_unit] = addr ctxt = { 'units': cluster_hosts, } if config('prefer-ipv6'): ctxt['local_host'] = 'ip6-localhost' ctxt['haproxy_host'] = '::' ctxt['stat_port'] = ':::8888' else: ctxt['local_host'] = '127.0.0.1' ctxt['haproxy_host'] = '0.0.0.0' ctxt['stat_port'] = ':8888' if len(cluster_hosts.keys()) > 1: # Enable haproxy when we have enough peers. log('Ensuring haproxy enabled in /etc/default/haproxy.') with open('/etc/default/haproxy', 'w') as out: out.write('ENABLED=1\n') return ctxt log('HAProxy context is incomplete, this unit has no peers.') return {}
def compute_joined(rid=None): # NOTE(james-page) in MAAS environments the actual hostname is a CNAME # record so won't get scanned based on private-address which is an IP # add the hostname configured locally to the relation. settings = { 'hostname': gethostname() } if config('prefer-ipv6'): settings = {'private-address': get_ipv6_addr()[0]} if migration_enabled(): auth_type = config('migration-auth-type') settings['migration_auth_type'] = auth_type if auth_type == 'ssh': settings['ssh_public_key'] = public_ssh_key() relation_set(relation_id=rid, **settings) if config('enable-resize'): settings['nova_ssh_public_key'] = public_ssh_key(user='******') relation_set(relation_id=rid, **settings)
def get_ipv6_addr(): """Exclude any ip addresses configured or managed by corosync.""" excludes = [] for rid in relation_ids('ha'): for unit in related_units(rid): resources = parse_data(rid, unit, 'resources') for res in resources.values(): if 'ocf:heartbeat:IPv6addr' in res: res_params = parse_data(rid, unit, 'resource_params') res_p = res_params.get(res) if res_p: for k, v in res_p.values(): if utils.is_ipv6(v): log("Excluding '%s' from address list" % v, level=DEBUG) excludes.append(v) return utils.get_ipv6_addr(exc_list=excludes)[0]
def cluster_joined(relation_id=None): for addr_type in ADDRESS_TYPES: address = get_address_in_network( config('os-{}-network'.format(addr_type)) ) if address: relation_set( relation_id=relation_id, relation_settings={'{}-address'.format(addr_type): address} ) # Only do if this is fired by cluster rel if not relation_id: check_db_initialised() if config('prefer-ipv6'): private_addr = get_ipv6_addr(exc_list=[config('vip')])[0] relation_set(relation_id=relation_id, relation_settings={'private-address': private_addr})
def sync_db_with_multi_ipv6_addresses(database, database_user, relation_prefix=None): hosts = get_ipv6_addr(dynamic_only=False) if config('vip'): vips = config('vip').split() for vip in vips: if vip and is_ipv6(vip): hosts.append(vip) kwargs = {'database': database, 'username': database_user, 'hostname': json.dumps(hosts)} if relation_prefix: for key in list(kwargs.keys()): kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key] del kwargs[key] for rid in relation_ids('shared-db'): relation_set(relation_id=rid, **kwargs)
def __call__(self): ctxt = {} if config('prefer-ipv6'): ctxt['local_ip'] = '%s' % get_ipv6_addr()[0] else: ctxt['local_ip'] = unit_private_ip() timestamps = [] for rid in relation_ids('swift-storage'): for unit in related_units(rid): settings = relation_get(unit=unit, rid=rid) ts = settings.get('timestamp') allowed_hosts = settings.get('rsync_allowed_hosts') if allowed_hosts and ts: if not timestamps or ts > max(timestamps): ctxt['allowed_hosts'] = allowed_hosts timestamps.append(ts) self.enable_rsyncd() return ctxt
def get_ipv6_network_address(iface): # Behave in same way as ipv4 get_network_address() above if iface is None. if not iface: return None try: ipv6_addr = utils.get_ipv6_addr(iface=iface)[0] all_addrs = netifaces.ifaddresses(iface) for addr in all_addrs[netifaces.AF_INET6]: if ipv6_addr == addr['addr']: network = "{}/{}".format(addr['addr'], addr['netmask']) return str(IPNetwork(network).network) except ValueError: msg = "Invalid interface '%s'" % iface status_set('blocked', msg) raise Exception(msg) msg = "No valid network found for interface '%s'" % iface status_set('blocked', msg) raise Exception(msg)