def get_ha_nodes(): ha_units = peer_ips(peer_relation='hanode') ha_nodes = {} for unit in ha_units: corosync_id = get_corosync_id(unit) addr = ha_units[unit] if config('prefer-ipv6'): if not utils.is_ipv6(addr): # Not an error since cluster may still be forming/updating log("Expected an ipv6 address but got %s" % (addr), level=WARNING) ha_nodes[corosync_id] = addr else: ha_nodes[corosync_id] = get_host_ip(addr) corosync_id = get_corosync_id(local_unit()) if config('prefer-ipv6'): addr = get_ipv6_addr() else: addr = get_host_ip(unit_get('private-address')) ha_nodes[corosync_id] = addr return ha_nodes
def test_peer_ips(self): '''Get a dict of peers and their ips''' peers = { 'peer_node/1': '10.0.0.1', 'peer_node/2': '10.0.0.2', } def _relation_get(attr, rid, unit): return peers[unit] self.relation_ids.return_value = ['cluster:0'] self.relation_list.return_value = peers.keys() self.relation_get.side_effect = _relation_get self.assertEquals(peers, cluster_utils.peer_ips())
def update_reverseproxy_config(): '''Configure a reverse proxy. The lead unit is responsible for setting appropriate proxy config for all known registry peers. The oldest known peer will be configured as the primary proxied server. Other peers will be configured as backup servers which can take over if the primary fails. ''' website = endpoint_from_flag('website.available') port = hookenv.config().get('registry-port') # Gather data about our peers, including ourself peers = peer_ips(peer_relation="peer") peers[hookenv.local_unit()] = hookenv.unit_private_ip() # Construct a list of server stanzas # NB: use oldest peer (the first unit name in our sorted peers list) # versus juju leadership to determine primary vs backup servers: # https://bugs.launchpad.net/layer-docker-registry/+bug/1815459 common_opts = "check inter 2000 rise 2 fall 5 maxconn 4096" is_primary = True tls_opts = "" if (is_flag_set('config.set.tls-cert-blob') and is_flag_set('config.set.tls-key-blob')): tls_ca_config = hookenv.config().get('tls-ca-path') tls_opts = ("ssl check-ssl crt /var/lib/haproxy/default.pem " "ca-file %s verify required" % tls_ca_config) servers = [] for unit in sorted(peers): if is_primary: server_opts = common_opts is_primary = False else: server_opts = common_opts + ' backup' server_opts = "{} {}".format(server_opts, tls_opts) servers.append(' - [{name}, {ip}, {port}, {opts}]'.format( name=unit.replace('/', '-'), ip=peers[unit], port=port, opts=server_opts)) services_yaml = """ - service_name: %(app)s service_host: 0.0.0.0 service_port: %(port)s service_options: - mode %(mode)s - balance leastconn - option httpchk GET / HTTP/1.0 servers: %(servers)s """ % { 'mode': 'tcp' if tls_opts != '' else 'http', 'app': hookenv.application_name(), 'port': port, 'servers': "\n".join(servers), } # Send yaml to the proxy on initial relation and when it changes. if data_changed('proxy_stanza', services_yaml): # NB: interface needs configure() to set ip/host/port data and # set_remote for the blob of services. website.configure(port=port) website.set_remote(services=services_yaml) # A proxy may change our netloc; if we have clients, tell them. netloc = layer.docker_registry.get_netloc() if (is_flag_set('charm.docker-registry.client-configured') and data_changed('proxy_netloc', netloc)): configure_client() # Early versions of this charm incorrectly set an 'all_services' # key on the website relation. Kill it. if not is_flag_set('charm.docker-registry.proxy-data.validated'): website.set_remote(all_services=None) set_flag('charm.docker-registry.proxy-data.validated') # Ensure we'll validate website relation data from a follower perspective # if we ever lose leadership. clear_flag('charm.docker-registry.proxy-follower.validated')