def get_template_data(): rels = hookenv.relations() config = hookenv.config() version = config['version'] template_data = {} template_data['etcd_servers'] = ",".join([ "http://%s:%s" % (s[0], s[1]) for s in sorted(get_rel_hosts('etcd', rels, ('hostname', 'port'))) ]) template_data['minions'] = ",".join(get_rel_hosts('minions-api', rels)) template_data['api_bind_address'] = _bind_addr(hookenv.unit_private_ip()) template_data['bind_address'] = "127.0.0.1" template_data['api_server_address'] = "http://%s:%s" % ( hookenv.unit_private_ip(), 8080) arch = subprocess.check_output(['dpkg', '--print-architecture']).strip() template_data['web_uri'] = "/kubernetes/%s/local/bin/linux/%s/" % (version, arch) if version == 'local': template_data['alias'] = hookenv.charm_dir() + '/files/output/' else: directory = '/opt/kubernetes/_output/local/bin/linux/%s/' % arch template_data['alias'] = directory _encode(template_data) return template_data
def get_template_data(): rels = hookenv.relations() config = hookenv.config() version = config['version'] template_data = {} template_data['etcd_servers'] = ",".join([ "http://%s:%s" % (s[0], s[1]) for s in sorted( get_rel_hosts('etcd', rels, ('hostname', 'port')))]) template_data['minions'] = ",".join(get_rel_hosts('minions-api', rels)) template_data['api_bind_address'] = _bind_addr(hookenv.unit_private_ip()) template_data['bind_address'] = "127.0.0.1" template_data['api_server_address'] = "http://%s:%s" % ( hookenv.unit_private_ip(), 8080) arch = subprocess.check_output(['dpkg', '--print-architecture']).strip() template_data['web_uri'] = "/kubernetes/%s/local/bin/linux/%s/" % (version, arch) if version == 'local': template_data['alias'] = hookenv.charm_dir() + '/files/output/' else: directory = '/opt/kubernetes/_output/local/bin/linux/%s/' % arch template_data['alias'] = directory _encode(template_data) return template_data
def add_new_peer_nodes_to_cluster(): """Add new peers to cluster """ if is_flag_set('endpoint.cluster.peer.joined'): endpoint = 'endpoint.cluster.peer.joined' elif is_flag_set('endpoint.cluster.peer.changed'): endpoint = 'endpoint.cluster.peer.changed' else: status.blocked('No peer endpoint set') return # Get the known application peer ip addressese from juju perspective peers = endpoint_from_flag(endpoint).all_units peer_ips = [ peer._data['private-address'] for peer in peers if peer._data is not None ] # Get the known cluster node ips from redis point of view cluster_node_ips = [node['node_ip'] for node in get_cluster_nodes_info()] # Compare the nodes in the cluster to the peer nodes that juju is aware of # Register nodes that are juju peers, but not part of the cluster node_added = False for ip in peer_ips: if ip not in cluster_node_ips: node_added = True cmd = "{} --cluster add-node {}:6379 {}:6379".format( REDIS_CLI, ip, unit_private_ip()) out = check_output(cmd, shell=True) log(out) # Give the cluster a second to recognize the new node sleep(1) if node_added: cluster_nodes = get_cluster_nodes_info() cluster_node_ips = [node['node_ip'] for node in cluster_nodes] cluster_node_ids = [node['node_id'] for node in cluster_nodes] charms.leadership.leader_set( cluster_node_ips=",".join(cluster_node_ips)) charms.leadership.leader_set( cluster_nodes_json=json.dumps(cluster_nodes)) # Generate the weights string for the rebalance command node_weights = " ".join( ["{}=1".format(node_id) for node_id in cluster_node_ids]) cmd = ("{} --cluster rebalance --cluster-weight {} " "--cluster-timeout 3600 --cluster-use-empty-masters " "{}:6379").format(REDIS_CLI, node_weights, unit_private_ip()) out = check_output(cmd, shell=True) log(out) clear_flag('endpoint.cluster.peer.joined') clear_flag('endpoint.cluster.peer.changed')
def get_ip(): """Get internal IP and relation IP""" rel_ip = None main_ip = unit_private_ip() if not config.get('host') or ( config.get('host') == "none") else config.get('host') if not main_ip or (main_ip == '0.0.0.0'): rel_ip = unit_private_ip() if not rel_ip: rel_ip = main_ip return main_ip, rel_ip
def publish_node_private_ip(self): relation = self.relations[0] relation.to_publish['node-ip'] = hookenv.unit_private_ip() log('unit private ip: ' + hookenv.unit_private_ip(), INFO) relation.to_publish['node-id'] = hookenv.local_unit() log('unit id: ' + hookenv.local_unit(), INFO) relation.to_publish['node-type'] = hookenv.config()['node-type'] log('unit type: ' + hookenv.config()['node-type'], INFO) status_set('active', hookenv.local_unit().split('/')[0] + '.joined') set_flag(self.expand_name('endpoint.{endpoint_name}.ip-published'))
def get_ingress_addresses(endpoint_name): """Returns all ingress-addresses belonging to the named endpoint, if available. Falls back to private-address if necessary.""" try: data = network_get(endpoint_name) except NotImplementedError: return [unit_private_ip()] if "ingress-addresses" in data: return data["ingress-addresses"] else: return [unit_private_ip()]
def get_ingress_address(endpoint_name): ''' Returns ingress-address belonging to the named endpoint, if available. Falls back to private-address if necessary. ''' try: data = network_get(endpoint_name) except NotImplementedError: return unit_private_ip() if 'ingress-addresses' in data: return data['ingress-addresses'][0] else: return unit_private_ip()
def trigger_puppet(self): # If we can't reverse resolve the hostname (like on azure), support DN # registration by IP address. # NB: determine this *before* updating /etc/hosts below since # gethostbyaddr will not fail if we have an /etc/hosts entry. reverse_dns_bad = False try: socket.gethostbyaddr(utils.resolve_private_address(hookenv.unit_private_ip())) except socket.herror: reverse_dns_bad = True # We know java7 has MAXHOSTNAMELEN of 64 char, so we cannot rely on # java to do a hostname lookup on clouds that have >64 char fqdns # (gce). Force short hostname (< 64 char) into /etc/hosts as workaround. # Better fix may be to move to java8. See http://paste.ubuntu.com/16230171/ # NB: do this before the puppet apply, which may call java stuffs # like format namenode, which will fail if we dont get this fix # down early. short_host = subprocess.check_output(['facter', 'hostname']).strip().decode() private_ip = utils.resolve_private_address(hookenv.unit_private_ip()) if short_host and private_ip: utils.update_kv_host(private_ip, short_host) utils.manage_etc_hosts() charm_dir = hookenv.charm_dir() # TODO JIRA KWM: rm does not need Hdfs_init and will fail rm_patch = Path(charm_dir) / 'resources/patch1_rm_init_hdfs.patch' # TODO JIRA KWM: nm should not *need* mapred role. we could patch it # with nm_patch, or adjust nm charm to include mapred role. for now, # we're doing the latter. todo rfc from dev@bigtop list. # nm_patch = Path(charm_dir) / 'resources/patch2_nm_core-site.patch' # TODO JIRA KWM: client role needs common_yarn for yarn-site.xml client_patch = Path(charm_dir) / 'resources/patch3_client_role_use_common_yarn.patch' with chdir("{}".format(self.bigtop_base)): # rm patch goes first utils.run_as('root', 'patch', '-p1', '-s', '-i', rm_patch) # skip nm_patch for now since nm charm is including mapred role # utils.run_as('root', 'patch', '-p1', '-s', '-i', nm_patch) # client patch goes last utils.run_as('root', 'patch', '-p1', '-s', '-i', client_patch) # TODO FIX ABOVE KWM # puppet apply needs to be ran where recipes were unpacked with chdir("{}".format(self.bigtop_base)): utils.run_as('root', 'puppet', 'apply', '-d', '--modulepath="bigtop-deploy/puppet/modules:/etc/puppet/modules"', 'bigtop-deploy/puppet/manifests/site.pp') # Do any post-puppet config on the generated config files. if reverse_dns_bad: hdfs_site = Path('/etc/hadoop/conf/hdfs-site.xml') with utils.xmlpropmap_edit_in_place(hdfs_site) as props: props['dfs.namenode.datanode.registration.ip-hostname-check'] = 'false'
def get_template_data(): rels = hookenv.relations() template_data = {} template_data['etcd_servers'] = ",".join([ "http://%s:%s" % (s[0], s[1]) for s in sorted( get_rel_hosts('etcd', rels, ('hostname', 'port')))]) template_data['minions'] = ",".join(get_rel_hosts('minions-api', rels)) template_data['api_bind_address'] = _bind_addr(hookenv.unit_private_ip()) template_data['bind_address'] = "127.0.0.1" template_data['api_server_address'] = "http://%s:%s" % ( hookenv.unit_private_ip(), 8080) _encode(template_data) return template_data
def ingress_address(endpoint, relid): # Work around https://github.com/juju/charm-helpers/issues/112 if not hookenv.has_juju_version("2.3"): return hookenv.unit_private_ip() try: d = hookenv.network_get(endpoint, relid) return d["ingress-addresses"][0] except NotImplementedError: # Warn, although this is normal with older Juju. hookenv.log( "Unable to determine ingress address, " "falling back to private ip", hookenv.WARNING, ) return hookenv.unit_private_ip()
def rebalance_and_remove(): """Rebalance and remove. Rebalance the node slots before removal. """ if is_flag_set('redis.cluster.joined') and \ not is_flag_set('redis.cluster.stopped'): nodes_info_json = charms.leadership.leader_get("cluster_nodes_json") nodes_info = json.loads(nodes_info_json) for node in nodes_info: if node['node_ip'] == unit_private_ip(): # Rebalance slots away from node to remove cmd = ("{} --cluster rebalance {}:6379 " "--cluster-weight {}=0").format( REDIS_CLI, node['node_ip'], node['node_id']) out = check_output(cmd, shell=True) log(out) # TODO: Need to figure out a way to poll here. sleep(5) try: # Remove node from cluster cmd = "{} --cluster del-node {}:6379 {}".format( REDIS_CLI, node['node_ip'], node['node_id']) out = check_output(cmd, shell=True) log(out) except CalledProcessError as e: log(e) set_flag('redis.cluster.stopped')
def ensure_sufficient_masters(): """Redis enforces us to use at minimum 3 master nodes. Set leader flag indicating we have met the minimum # nodes. """ if is_flag_set('endpoint.cluster.peer.joined'): endpoint = 'endpoint.cluster.peer.joined' elif is_flag_set('endpoint.cluster.peer.changed'): endpoint = 'endpoint.cluster.peer.changed' else: status.blocked('No peer endpoint set') return # Get the peers, check for min length peers = endpoint_from_flag(endpoint).all_units peer_ips = [peer._data['private-address'] for peer in peers if peer._data is not None] if len(peer_ips) > 1: status.active( "Minimum # masters available, got {}.".format(len(peer_ips)+1)) init_masters = \ ",".join(peer_ips + [unit_private_ip()]) charms.leadership.leader_set(init_masters=init_masters) clear_flag('endpoint.cluster.peer.joined') clear_flag('endpoint.cluster.peer.changed')
def register_machine(apiserver, retry=False): parsed = urlparse.urlparse(apiserver) # identity = hookenv.local_unit().replace('/', '-') private_address = hookenv.unit_private_ip() with open('/proc/meminfo') as fh: info = fh.readline() mem = info.strip().split(':')[1].strip().split()[0] cpus = os.sysconf('SC_NPROCESSORS_ONLN') registration_request = Registrator() registration_request.data['Kind'] = 'Minion' registration_request.data['id'] = private_address registration_request.data['name'] = private_address registration_request.data['metadata']['name'] = private_address registration_request.data['spec']['capacity']['mem'] = mem + ' K' registration_request.data['spec']['capacity']['cpu'] = cpus registration_request.data['spec']['externalID'] = private_address registration_request.data['status']['hostIP'] = private_address response, result = registration_request.register(parsed.hostname, parsed.port, '/api/v1beta3/nodes') print(response) try: registration_request.command_succeeded(response, result) except ValueError: # This happens when we have already registered # for now this is OK pass
def config_leader(): leader_set(hostname=hookenv.unit_private_ip()) leader_set(public_ip=hookenv.unit_public_ip()) leader_set(username='******') leader_set(password=hookenv.config('carte_password')) leader_set(port=hookenv.config('carte_port')) render_master_config()
def config_with_reverseproxy(reverseproxy): services = reverseproxy.services() cfg = hookenv.config() for service in services: service_dir = '/var/lib/tor/%s' % (service['service_name']) if not os.path.isdir(service_dir): check_call(['install', '-d', service_dir, '-o', 'debian-tor', '-m', '700']) bridges = [] for bridge in cfg.get('bridges', '').split(','): fields = bridge.split() if len(fields) > 1: addr, fp = fields[:2] bridges.append({'addr': addr, 'fingerprint': fp}) render(source='torrc', target='/etc/tor/torrc', owner='root', perms=0o644, context={ 'cfg': cfg, 'services': services, 'bridges': bridges, 'public_address': hookenv.unit_public_ip(), 'private_address': hookenv.unit_private_ip(), }) remove_state('reverseproxy.available') set_state('tor.start')
def set_secrets(): password = config()['admin_password'] if password == '': password = b64encode(os.urandom(18)).decode('utf-8') leader_set({'password': password, 'leader_ip': unit_private_ip()}) kv.set('password', password) set_flag('secrets.configured')
def request_certificates(): '''Request new certificate data.''' cert_provider = endpoint_from_flag('cert-provider.available') # Set the private ip of this unit as the Common Name for the cert. # NB: Any 'http-host' config will be added to the SANs list; we # want to ensure we always have a consistent CN regardless of config. cert_cn = hookenv.unit_private_ip() # Create a path safe name by removing path characters from the unit name. cert_name = hookenv.local_unit().replace('/', '_') # gather up all the alt names we want for our cert proxy_ep = endpoint_from_flag('proxy.available') sans = layer.docker_registry.get_tls_sans( proxy_ep.relation if proxy_ep else None) # if our alt names have changed, request a new cert if data_changed('tls_sans', sans): hookenv.log('Requesting new cert for CN: {} with SANs: {})'.format( cert_cn, sans)) cert_provider.request_server_cert(cert_cn, sans, cert_name) else: hookenv.log( 'Not requesting new tls data; SANs did not change: {}'.format( sans))
def register_machine(apiserver, retry=False): parsed = urlparse.urlparse(apiserver) # identity = hookenv.local_unit().replace('/', '-') private_address = hookenv.unit_private_ip() with open("/proc/meminfo") as fh: info = fh.readline() mem = info.strip().split(":")[1].strip().split()[0] cpus = os.sysconf("SC_NPROCESSORS_ONLN") registration_request = Registrator() registration_request.data["Kind"] = "Minion" registration_request.data["id"] = private_address registration_request.data["name"] = private_address registration_request.data["metadata"]["name"] = private_address registration_request.data["spec"]["capacity"]["mem"] = mem + " K" registration_request.data["spec"]["capacity"]["cpu"] = cpus registration_request.data["spec"]["externalID"] = private_address registration_request.data["status"]["hostIP"] = private_address response, result = registration_request.register(parsed.hostname, parsed.port, "/api/v1beta3/nodes") print(response) try: registration_request.command_succeeded(response, result) except ValueError: # This happens when we have already registered # for now this is OK pass
def get_sans(address_list=None): """Return a string suitable for the easy-rsa subjectAltNames. This method will add a valid SANs string with the public IP, private IP, and hostname of THIS system.""" # The unit_public_ip could be a FQDN or IP address depending on provider. public = hookenv.unit_public_ip() # unitdata returns None if not found. Handle the occurrence of no # addresses passed, and initialize to an empty array if not address_list: address_list = [] if public not in address_list: address_list.append(public) # The unit_private_ip could be a FQDN or IP address depending on provider. private = hookenv.unit_private_ip() if private not in address_list: address_list.append(private) # The hostname is usually a string, not an IP address. hostname = socket.gethostname() if hostname not in address_list: address_list.append(hostname) sans = [] for address in address_list: if _is_ip(address): sans.append("IP:{0}".format(address)) else: sans.append("DNS:{0}".format(address)) return ",".join(sans)
def cluster_connected(hacluster): """Configure HA resources in corosync""" dns_record = config('dns-ha-access-record') vips = config('vip') or None if vips and dns_record: set_flag('config.dns_vip.invalid') log("Unsupported configuration. vip and dns-ha cannot both be set", level=ERROR) return else: clear_flag('config.dns_vip.invalid') if vips: vips = vips.split() for vip in vips: if vip == vault.get_vip(binding='external'): hacluster.add_vip('vault-ext', vip) else: hacluster.add_vip('vault', vip) elif dns_record: try: ip = network_get_primary_address('access') except NotImplementedError: ip = unit_private_ip() hacluster.add_dnsha('vault', ip, dns_record, 'access') hacluster.bind_resources()
def notify_minions(): print("Notify minions.") for r in hookenv.relation_ids('minions-api'): hookenv.relation_set( r, hostname=hookenv.unit_private_ip(), port=8080)
def save_ip_tables(host, source_port, dest_port): iface = get_iface_from_addr(unit_private_ip()) host_port = {'host': host, 'source_port': source_port, 'dest_port': dest_port, 'iface': iface} call("iptables -t nat -A PREROUTING -i {iface} -p tcp --dport {source_port} " "-j DNAT --to {host}:{dest_port}".format(**host_port).split()) call("iptables-save > /etc/iptables/rules.v4".split())
def emit_natsconf(): natscontext = { 'nats_ip': hookenv.unit_private_ip(), 'nats_port': config_data['nats_port'], } with open(NATS_CONFIG_FILE, 'w') as natsconf: natsconf.write(render_template('nats.yml', natscontext))
def write_certs(): '''Write cert data to our configured location.''' cert_provider = endpoint_from_flag('cert-provider.server.certs.changed') cert_cn = hookenv.unit_private_ip() ca = cert_provider.root_ca_cert cert = cert_provider.server_certs_map[cert_cn] # configure when we have everything we need if not (cert and ca and cert.cert and cert.key): layer.status.maint('Incomplete TLS data. Retrying.') clear_flag('charm.docker-registry.tls-enabled') else: layer.status.maint('Reconfiguring registry with TLS.') # Only configure/restart if cert data was written. if layer.docker_registry.write_tls(ca, cert.cert, cert.key): # NB: set the tls flag prior to calling configure set_flag('charm.docker-registry.tls-enabled') layer.docker_registry.stop_registry() layer.docker_registry.configure_registry() layer.docker_registry.start_registry() # If we have clients, let them know our tls data has changed if (is_flag_set('charm.docker-registry.client-configured')): configure_client() clear_flag('cert-provider.server.certs.changed') report_status() else: layer.status.maint('Could not write TLS data. Retrying.') clear_flag('charm.docker-registry.tls-enabled')
def test_set_active_seed(self, is_leader, status_get, status_set, seed_ips): is_leader.return_value = False status_get.return_value = 'waiting' seed_ips.return_value = set([hookenv.unit_private_ip()]) actions.set_active('') status_set.assert_called_once_with('active', 'Live seed')
def update_zoo_cfg(self, zkid=getid(local_unit()), ip=unit_private_ip(), remove=False): """ Add or remove Zookeeper units from zoo.cfg. Configuration for a Zookeeper quorum requires listing all unique servers (server.X=<ip>:2888:3888) in the zoo.cfg. This function manages server.X entries. """ zookeeper_cfg = "{}/zoo.cfg".format(self.dist_config.path('zookeeper_conf')) key = "server.{}".format(zkid) value = "={}:2888:3888".format(ip) found = False if remove: with open(zookeeper_cfg, 'r', encoding='utf-8') as f: contents = f.readlines() for l in range(0, len(contents)): if contents[l].startswith(key): contents.pop(l) found = True break if found: with open(zookeeper_cfg, 'w', encoding='utf-8') as f: f.writelines(contents) else: with open(zookeeper_cfg, 'r', encoding='utf-8') as f: contents = f.readlines() for l in range(0, len(contents)): if contents[l].startswith(key): contents[l] = key + value + "\n" found = True if not found: contents.append(key + value + "\n") with open(zookeeper_cfg, 'w', encoding='utf-8') as f: f.writelines(contents)
def __call__(self): ctxt = { 'local_ip': unit_private_ip(), 'account_server_port': config('account-server-port'), 'container_server_port': config('container-server-port'), 'object_server_port': config('object-server-port'), 'object_server_threads_per_disk': config('object-server-threads-per-disk'), 'account_max_connections': config('account-max-connections'), 'container_max_connections': config('container-max-connections'), 'object_max_connections': config('object-max-connections'), 'object_replicator_concurrency': config('object-replicator-concurrency'), 'object_rsync_timeout': config('object-rsync-timeout'), 'statsd_host': config('statsd-host'), 'statsd_port': config('statsd-port'), 'statsd_sample_rate': config('statsd-sample-rate'), } return ctxt
def maintain_seeds(): '''The leader needs to maintain the list of seed nodes''' seed_ips = helpers.get_seed_ips() hookenv.log('Current seeds == {!r}'.format(seed_ips), DEBUG) bootstrapped_ips = helpers.get_bootstrapped_ips() hookenv.log('Bootstrapped == {!r}'.format(bootstrapped_ips), DEBUG) # Remove any seeds that are no longer bootstrapped, such as dropped # units. seed_ips.intersection_update(bootstrapped_ips) # Add more bootstrapped nodes, if necessary, to get to our maximum # of 3 seeds. potential_seed_ips = list(reversed(sorted(bootstrapped_ips))) while len(seed_ips) < 3 and potential_seed_ips: seed_ips.add(potential_seed_ips.pop()) # If there are no seeds or bootstrapped nodes, start with the leader. Us. if len(seed_ips) == 0: seed_ips.add(hookenv.unit_private_ip()) hookenv.log('Updated seeds == {!r}'.format(seed_ips), DEBUG) hookenv.leader_set(seeds=','.join(sorted(seed_ips)))
def send_data(tls): '''Send the data that is required to create a server certificate for this server.''' # Use the public ip of this unit as the Common Name for the certificate. common_name = hookenv.unit_public_ip() # Get the SDN gateway based on the cidr address. kubernetes_service_ip = get_kubernetes_service_ip() domain = hookenv.config('dns_domain') # Create SANs that the tls layer will add to the server cert. sans = [ hookenv.unit_public_ip(), hookenv.unit_private_ip(), socket.gethostname(), kubernetes_service_ip, 'kubernetes', 'kubernetes.{0}'.format(domain), 'kubernetes.default', 'kubernetes.default.svc', 'kubernetes.default.svc.{0}'.format(domain) ] # maybe they have extra names they want as SANs extra_sans = hookenv.config('extra_sans') if extra_sans and not extra_sans == "": sans.extend(extra_sans.split()) # Create a path safe name by removing path characters from the unit name. certificate_name = hookenv.local_unit().replace('/', '_') # Request a server cert with this information. tls.request_server_cert(common_name, sans, certificate_name)
def install(): install_packages() hookenv.log('Installing go') download_go() hookenv.log('Adding kubernetes and go to the path') address = hookenv.unit_private_ip() strings = [ 'export GOROOT=/usr/local/go\n', 'export PATH=$PATH:$GOROOT/bin\n', 'export KUBERNETES_MASTER=http://{0}:8080\n'.format(address), ] update_rc_files(strings) hookenv.log('Downloading kubernetes code') clone_repository() # Create the directory to store the keys and auth files. srv = Path('/srv/kubernetes') if not srv.isdir(): srv.makedirs_p() hookenv.open_port(8080) hookenv.open_port(6443) hookenv.open_port(443) hookenv.log('Install complete')
def __call__(self): ctxt = { 'local_ip': unit_private_ip(), 'account_server_port': config('account-server-port'), 'account_server_port_rep': config('account-server-port-rep'), 'container_server_port': config('container-server-port'), 'container_server_port_rep': config('container-server-port-rep'), 'object_server_port': config('object-server-port'), 'object_server_port_rep': config('object-server-port-rep'), 'object_server_threads_per_disk': config( 'object-server-threads-per-disk'), 'account_max_connections': config('account-max-connections'), 'container_max_connections': config('container-max-connections'), 'object_max_connections': config('object-max-connections'), 'object_replicator_concurrency': config( 'object-replicator-concurrency'), 'object_rsync_timeout': config('object-rsync-timeout'), 'statsd_host': config('statsd-host'), 'statsd_port': config('statsd-port'), 'statsd_sample_rate': config('statsd-sample-rate'), } ubuntu_release = lsb_release()['DISTRIB_CODENAME'].lower() if CompareHostReleases(ubuntu_release) > "trusty": ctxt['standalone_replicator'] = True else: ctxt['standalone_replicator'] = False return ctxt
def provide_data(self, port): ''' Consumers will invoke this method to ship the extant unit the port and address. ''' for conv in self.conversations(): self.set_remote(scope=conv.scope, data={'port': port, 'address': unit_private_ip()}) # noqa
def write_upstart(config): upstart = textwrap.dedent("""\ description "Meteor app '{app_name}'" start on (net-device-up and local-filesystems and runlevel [2345]) stop on runlevel [!2345] respawn setuid {app_user} env PORT={port} env MONGO_URL={mongo_url} env ROOT_URL={root_url} exec node {app_dir}/bundle/main.js """) with open('/etc/init/{}.conf'.format(SERVICE), 'w') as f: f.write(upstart.format( app_user=USER, app_dir=BASE_DIR, app_name=config['app-name'], mongo_url=config.get('mongo_url', ''), port=config['port'], root_url='http://{}:{}/'.format( hookenv.unit_private_ip(), config['port']), ))
def provide_core(solr): for service in solr.requested_cores(): database = generate_dbname(service) solr.provide_core(service=service, host=hookenv.unit_private_ip(), port="8983", core=service)
def client_present(client): if is_state('leadership.is_leader'): client.set_spark_started() spark = Spark() master_ip = utils.resolve_private_address(hookenv.unit_private_ip()) master_url = spark.get_master_url(master_ip) client.send_master_info(master_url, master_ip)
def needs_restart(): '''Return True if Cassandra is not running or needs to be restarted.''' if helpers.is_decommissioned(): # Decommissioned nodes are never restarted. They remain up # telling everyone they are decommissioned. helpers.status_set('blocked', 'Decommissioned node') return False if not helpers.is_cassandra_running(): if helpers.is_bootstrapped(): helpers.status_set('waiting', 'Waiting for permission to start') else: helpers.status_set('waiting', 'Waiting for permission to bootstrap') return True config = hookenv.config() # If our IP address has changed, we need to restart. if config.changed('unit_private_ip'): helpers.status_set( 'waiting', 'IP address changed. ' 'Waiting for restart permission.') return True # If the directory paths have changed, we need to migrate data # during a restart. storage = relations.StorageRelation() if storage.needs_remount(): helpers.status_set(hookenv.status_get(), 'New mounts. Waiting for restart permission') return True # If any of these config items changed, a restart is required. for key in RESTART_REQUIRED_KEYS: if config.changed(key): hookenv.log('{} changed. Restart required.'.format(key)) for key in RESTART_REQUIRED_KEYS: if config.changed(key): helpers.status_set( hookenv.status_get(), 'Config changes. ' 'Waiting for restart permission.') return True # If we have new seeds, we should restart. new_seeds = helpers.get_seed_ips() if config.get('configured_seeds') != sorted(new_seeds): old_seeds = set(config.previous('configured_seeds') or []) changed = old_seeds.symmetric_difference(new_seeds) # We don't care about the local node in the changes. changed.discard(hookenv.unit_private_ip()) if changed: helpers.status_set( hookenv.status_get(), 'Updated seeds {!r}. ' 'Waiting for restart permission.' ''.format(new_seeds)) return True hookenv.log('Restart not required') return False
def configure(self, database, username, hostname=None, prefix=None): """ Called by charm layer that uses this interface to configure a database. """ if not hostname: conversation = self.conversation() try: hostname = hookenv.network_get_primary_address( conversation.relation_name ) except NotImplementedError: hostname = hookenv.unit_private_ip() if prefix: relation_info = { prefix + '_database': database, prefix + '_username': username, prefix + '_hostname': hostname, } self.set_prefix(prefix) else: relation_info = { 'database': database, 'username': username, 'hostname': hostname, } self.set_remote(**relation_info) self.set_local(**relation_info)
def fetch_envxml(): relation_id = hookenv.relation_id() if not relation_id: return True status_set('maintenance', 'fetch-envxml') log('relation_id: ' + relation_id, INFO) dali_ip = hookenv.relation_get('dali-hostname', hookenv.local_unit(), relation_id) if not dali_ip: # Wait dali_ip retrived return True log('dali_ip: ' + dali_ip, INFO) os.system("su hpcc -c \"scp -o StrictHostKeyChecking=no " + dali_ip + ":/etc/HPCCSystems/environment.xml /etc/HPCCSystems/\"") # open port hpcc_config = HPCCConfig() config = hookenv.config() if (config['node-type'] == "esp" ): hpcc_config.open_ports() if ( (config['node-type'] == "node" ) or (config['node-type'] == "support" ) ): if has_component('esp', hookenv.unit_private_ip()): hpcc_config.open_ports() clear_flag('endpoint.hpcc-node.fetch-envxml') clear_flag('endpoint.hpcc-node.node-wait') set_flag('endpoint.hpcc-node.envxml-fetched') status_set('maintenance', 'envxml-fetched') return True
def _get_service_options(config, slave_relation=None): """Return a dict containing the redis service configuration options. Receive the hook environment config object and optionally the slave relation context. """ hookutils.log('Retrieving service options.') # To introduce more redis configuration options in the charm, add them to # the config.yaml file and to the dictionary returned by this function. # If the new options are relevant while establishing relations, also update # the "provide_data" methods in the relation contexts defined in # relations.py. options = { 'bind': hookenv.unit_private_ip(), 'databases': config['databases'], 'logfile': config['logfile'], 'loglevel': config['loglevel'], 'port': config['port'], 'tcp-keepalive': config['tcp-keepalive'], 'timeout': config['timeout'], } password = config['password'].strip() if password: options['requirepass'] = password if slave_relation is not None: hookutils.log('Setting up slave relation.') # If slave_relation is defined, it is assumed that the relation is # ready, i.e. that the slave_relation dict evaluates to True. data = slave_relation[slave_relation.name][0] options['slaveof'] = '{hostname} {port}'.format(**data) password = data.get('password') if password: options['masterauth'] = password return options
def write_config_file(): cc = hookenv.config() lc = leadership.leader_get() config = { "api-macaroon-timeout": cc["api-macaroon-timeout"], "discharge-macaroon-timeout": cc["discharge-macaroon-timeout"], "discharge-token-timeout": cc["discharge-token-timeout"], "enable-email-login": cc["enable-email-login"], "logging-config": cc["logging-config"], "private-addr": hookenv.unit_private_ip(), "rendezvous-timeout": cc["rendezvous-timeout"], "skip-location-for-cookie-paths": cc["skip-location-for-cookie-paths"], } if cc["admin-agent-public-key"]: config["admin-agent-public-key"] = cc["admin-agent-public-key"] if cc["http-proxy"]: config["http-proxy"] = cc["http-proxy"] # extend no-proxy to include all candid units. no_proxy = [cc["no-proxy"]] if not no_proxy[0]: no_proxy = no_proxy[1:] ep = endpoint_from_flag('candid.connected') if ep: no_proxy.extend(ep.addresses) config["no-proxy"] = ",".join(no_proxy) if cc["identity-providers"]: try: config["identity-providers"] = \ candid.parse_identity_providers(cc["identity-providers"]) except candid.IdentityProvidersParseError as e: hookenv.log("invalid identity providers: {}".format(e), level="error") if cc["location"]: config["location"] = cc["location"] if cc["private-key"]: config["private-key"] = cc["private-key"] elif lc.get("private-key"): config["private-key"] = lc["private-key"] if cc["public-key"]: config["public-key"] = cc["public-key"] elif lc.get("public-key"): config["public-key"] = lc["public-key"] if cc["redirect-login-trusted-urls"]: config["redirect-login-trusted-urls"] = \ _parse_list(cc["redirect-login-trusted-urls"]) if cc["redirect-login-trusted-domains"]: config["redirect-login-trusted-domains"] = \ _parse_list(cc["redirect-login-trusted-domains"]) pg = endpoint_from_flag('postgres.master.available') if pg: config["storage"] = { "type": "postgres", "connection-string": str(pg.master), } else: config["storage"] = {"type": "memory"} candid.update_config(CONFIG_FILE, config) set_flag('candid.configured') set_flag('candid.restart')
def get_tls_sans(relation_name=None): '''Get all sans for our TLS certificate. Return all IP/DNS data that should included as alt names when we request a TLS cert. This includes our public/private address, local DNS name, any configured hostname, and the address of any related proxy. :return: sorted list of sans ''' charm_config = hookenv.config() sans = [ hookenv.unit_private_ip(), hookenv.unit_public_ip(), socket.gethostname(), ] if charm_config.get('http-host'): http_host = urlparse(charm_config['http-host']).hostname sans.append(http_host) if relation_name: proxy_sans = [hookenv.ingress_address(rid=u.rid, unit=u.unit) for u in hookenv.iter_units_for_relation_name(relation_name)] sans.extend(proxy_sans) return sorted(sans)
def update_peers(self, node_list): ''' This method wtill return True if the master peer was updated. False otherwise. ''' old_master = unitdata.kv().get('spark_master.ip', 'not_set') master_ip = '' if not node_list: hookenv.log("No peers yet. Acting as master.") master_ip = utils.resolve_private_address(hookenv.unit_private_ip()) nodes = [(hookenv.local_unit(), master_ip)] unitdata.kv().set('spark_all_master.ips', nodes) unitdata.kv().set('spark_master.ip', master_ip) else: # Use as master the node with minimum Id # Any ordering is fine here. Lexicografical ordering too. node_list.sort() master_ip = utils.resolve_private_address(node_list[0][1]) unitdata.kv().set('spark_master.ip', master_ip) unitdata.kv().set('spark_all_master.ips', node_list) hookenv.log("Updating master ip to {}.".format(master_ip)) unitdata.kv().set('spark_master.is_set', True) unitdata.kv().flush(True) # Incase of an HA setup adding peers must be treated as a potential # mastr change if (old_master != master_ip) or unitdata.kv().get('zookeepers.available', False): return True else: return False
def get_remote_unit_name(): for rel_type in hookenv.metadata()['requires'].keys(): rels = hookenv.relations_of_type(rel_type) if rels and len(rels) >= 1: rel = rels[0] if rel['private-address'] == hookenv.unit_private_ip(): return rel['__unit__']
def get_netloc(): '''Get the network location (host:port) for this registry. If http-host config is present, return the netloc for that config. If related to a proxy, return the proxy netloc. Otherwise, return our private_adddress:port. ''' charm_config = hookenv.config() netloc = None if charm_config.get('http-host'): netloc = urlparse(charm_config['http-host']).netloc else: # use the proxy address for our netloc (if available) proxy = endpoint_from_flag('website.available') if proxy: proxy_addrs = [ hookenv.ingress_address(rid=u.rid, unit=u.unit) for u in hookenv.iter_units_for_relation_name(proxy.endpoint_name) ] # NB: get the first addr; presumably, the first will work just as # well as any other. try: netloc = proxy_addrs[0] except IndexError: # If we fail here, the proxy is probably departing; fall out # to the default netloc. pass if not netloc: netloc = '{}:{}'.format(hookenv.unit_private_ip(), charm_config.get('registry-port', '5000')) return netloc
def get_template_data(): rels = hookenv.relations() template_data = hookenv.Config() template_data.CONFIG_FILE_NAME = '.unit-state' overlay_type = get_scoped_rel_attr('network', rels, 'overlay_type') etcd_servers = get_rel_hosts('etcd', rels, ('hostname', 'port')) api_servers = get_rel_hosts('api', rels, ('hostname', 'port')) # kubernetes master isn't ha yet. if api_servers: api_info = api_servers.pop() api_servers = 'http://%s:%s' % (api_info[0], api_info[1]) template_data['overlay_type'] = overlay_type template_data['kubelet_bind_addr'] = _bind_addr( hookenv.unit_private_ip()) template_data['proxy_bind_addr'] = _bind_addr( hookenv.unit_get('public-address')) template_data['kubeapi_server'] = api_servers template_data['etcd_servers'] = ','.join([ 'http://%s:%s' % (s[0], s[1]) for s in sorted(etcd_servers)]) template_data['identifier'] = os.environ['JUJU_UNIT_NAME'].replace( '/', '-') return _encode(template_data)
def needs_restart(): '''Return True if Cassandra is not running or needs to be restarted.''' if helpers.is_decommissioned(): # Decommissioned nodes are never restarted. They remain up # telling everyone they are decommissioned. helpers.status_set('blocked', 'Decommissioned node') return False if not helpers.is_cassandra_running(): if helpers.is_bootstrapped(): helpers.status_set('waiting', 'Waiting for permission to start') else: helpers.status_set('waiting', 'Waiting for permission to bootstrap') return True config = hookenv.config() # If our IP address has changed, we need to restart. if config.changed('unit_private_ip'): helpers.status_set('waiting', 'IP address changed. ' 'Waiting for restart permission.') return True # If the directory paths have changed, we need to migrate data # during a restart. storage = relations.StorageRelation() if storage.needs_remount(): helpers.status_set(hookenv.status_get(), 'New mounts. Waiting for restart permission') return True # If any of these config items changed, a restart is required. for key in RESTART_REQUIRED_KEYS: if config.changed(key): hookenv.log('{} changed. Restart required.'.format(key)) for key in RESTART_REQUIRED_KEYS: if config.changed(key): helpers.status_set(hookenv.status_get(), 'Config changes. ' 'Waiting for restart permission.') return True # If we have new seeds, we should restart. new_seeds = helpers.get_seed_ips() if config.get('configured_seeds') != sorted(new_seeds): old_seeds = set(config.previous('configured_seeds') or []) changed = old_seeds.symmetric_difference(new_seeds) # We don't care about the local node in the changes. changed.discard(hookenv.unit_private_ip()) if changed: helpers.status_set(hookenv.status_get(), 'Updated seeds {!r}. ' 'Waiting for restart permission.' ''.format(new_seeds)) return True hookenv.log('Restart not required') return False
def test_maintain_seeds_start(self, is_leader, leader_set, seed_ips, bootstrapped_ips): seed_ips.return_value = set() bootstrapped_ips.return_value = set() actions.maintain_seeds('') # First seed is the first leader, which lets is get everything # started. leader_set.assert_called_once_with(seeds=hookenv.unit_private_ip())
def unit_to_ip(unit): if unit is None or unit == hookenv.local_unit(): return hookenv.unit_private_ip() elif coordinator.relid: return hookenv.relation_get(rid=coordinator.relid, unit=unit).get('private-address') else: return None
def get_scoped_rel_attr(rel_name, rels, attr): private_ip = hookenv.unit_private_ip() for r, data in rels.get(rel_name, {}).items(): for unit_id, unit_data in data.items(): if unit_data.get('private-address') != private_ip: continue if unit_data.get(attr): return unit_data.get(attr)
def __call__(self): ctxt = { 'local_ip': unit_private_ip(), 'account_server_port': config('account-server-port'), 'container_server_port': config('container-server-port'), 'object_server_port': config('object-server-port'), } return ctxt
def setup_database(database): """On receiving database credentials, configure the database on the interface. """ database.configure(hookenv.config('database'), hookenv.config('database-user'), hookenv.unit_private_ip()) barbican.assess_status()
def render_slave_config(): render('carte-config/slave.xml.j2', '/home/etl/carte-config.xml', { 'carteslaveport': leader_get('port'), 'carteslavehostname': hookenv.unit_private_ip(), 'cartemasterhostname': leader_get('hostname'), 'carteslavepassword': leader_get('password'), 'cartemasterpassword': leader_get('password'), 'cartemasterport': leader_get('port') })
def notify_minions(): print("Notify minions.") config = hookenv.config() for r in hookenv.relation_ids('minions-api'): hookenv.relation_set( r, hostname=hookenv.unit_private_ip(), port=8080, version=config['version'])