def run_with_external_services(postgres, redis): set_state('block_standalone') # Grab redis data hosts = [] # iterate over all the connected redis hosts for unit in redis.redis_data(): hosts.append(unit['private_address']) redis_host = ','.join(hosts) # grab postgres data pgdata = {'pg_host': postgres.host(), 'pg_user': postgres.user(), 'pg_pass': postgres.password(), 'pg_db': postgres.database()} # Create a merged dict with the config values we expect context = {} context.update(config()) context.update({'redis_host': redis_host}) context.update(pgdata) render('docker-compose.yml', 'files/voting-app/docker-compose.yml', context) start_application() status_set('active', 'Ready to vote!')
def configure_flume(self, template_data=None): ''' handle configuration of Flume and setup the environment ''' render( source='flume.conf.j2', target=self.config_file, context=dict({ 'dist_config': self.dist_config, }, **(template_data or {})), filters={ 'agent_list': lambda agents, prefix='': ','.join( ['%s%s' % (prefix, a['name']) for a in agents]), }, ) flume_bin = self.dist_config.path('flume') / 'bin' java_symlink = check_output(["readlink", "-f", "/usr/bin/java"]).decode('utf8') java_home = re.sub('/bin/java', '', java_symlink).rstrip() with utils.environment_edit_in_place('/etc/environment') as env: if flume_bin not in env['PATH']: env['PATH'] = ':'.join([env['PATH'], flume_bin]) env['FLUME_CONF_DIR'] = self.dist_config.path('flume_conf') env['FLUME_CLASSPATH'] = self.dist_config.path('flume') / 'lib' env['FLUME_HOME'] = self.dist_config.path('flume') env['JAVA_HOME'] = java_home
def initialize_new_leader(): ''' Create an initial cluster string to bring up a single member cluster of etcd, and set the leadership data so the followers can join this one. ''' bag = EtcdDatabag() bag.token = bag.token bag.cluster_state = 'new' cluster_connection_string = get_connection_string([bag.private_address], bag.management_port) bag.cluster = "{}={}".format(bag.unit_name, cluster_connection_string) render('defaults', '/etc/default/etcd', bag.__dict__, owner='root', group='root') host.service_restart('etcd') # sorry, some hosts need this. The charm races with systemd and wins. time.sleep(2) # Check health status before we say we are good etcdctl = EtcdCtl() status = etcdctl.cluster_health() if 'unhealthy' in status: status_set('blocked', 'Cluster not healthy') return # We have a healthy leader, broadcast initial data-points for followers open_port(bag.port) leader_connection_string = get_connection_string([bag.private_address], bag.port) leader_set({'token': bag.token, 'leader_address': leader_connection_string, 'cluster': bag.cluster}) # finish bootstrap delta and set configured state set_state('etcd.leader.configured')
def test_filters(self): filters = {'test': 'test({})'.format} output = render('filters.j2', filters=filters) self.assertEqual(output, 'test(value)\n' 'fm-value1,fm-value2') self.assertEqual(render(template='{{"value"|test}}', filters=filters), 'test(value)')
def render_config(bag=None): """Render the etcd configuration template for the given version""" if not bag: bag = EtcdDatabag() move_etcd_data_to_standard_location() v2_conf_path = "{}/etcd.conf".format(bag.etcd_conf_dir) v3_conf_path = "{}/etcd.conf.yml".format(bag.etcd_conf_dir) # probe for 2.x compatibility if etcd_version().startswith("2."): render("etcd2.conf", v2_conf_path, bag.__dict__, owner="root", group="root") # default to 3.x template behavior else: render("etcd3.conf", v3_conf_path, bag.__dict__, owner="root", group="root") if os.path.exists(v2_conf_path): # v3 will fail if the v2 config is left in place os.remove(v2_conf_path) # Close the previous client port and open the new one. close_open_ports() remove_state("etcd.rerender-config")
def setup_init_scripts(self): if host.init_is_systemd(): template_path = '/etc/systemd/system/zeppelin.service' template_name = 'systemd.conf' else: template_path = '/etc/init/zeppelin.conf' template_name = 'upstart.conf' if os.path.exists(template_path): template_path_backup = "{}.backup".format(template_path) if os.path.exists(template_path_backup): os.remove(template_path_backup) os.rename(template_path, template_path_backup) render( template_name, template_path, context={ 'zeppelin_home': self.dist_config.path('zeppelin'), 'zeppelin_conf': self.dist_config.path('zeppelin_conf') }, ) if host.init_is_systemd(): utils.run_as('root', 'systemctl', 'enable', 'zeppelin.service') utils.run_as('root', 'systemctl', 'daemon-reload')
def configure_cni(): ''' Configure Calico CNI. ''' status.maintenance('Configuring Calico CNI') try: subnet = get_flannel_subnet() except FlannelSubnetNotFound: hookenv.log(traceback.format_exc()) status.waiting('Waiting for Flannel') return os.makedirs('/etc/cni/net.d', exist_ok=True) cni = endpoint_from_flag('cni.connected') etcd = endpoint_from_flag('etcd.available') cni_config = cni.get_config() context = { 'connection_string': etcd.get_connection_string(), 'etcd_key_path': ETCD_KEY_PATH, 'etcd_cert_path': ETCD_CERT_PATH, 'etcd_ca_path': ETCD_CA_PATH, 'kubeconfig_path': cni_config.get('kubeconfig_path', '/root/cdk/kubeconfig'), 'subnet': subnet } render('10-canal.conflist', '/etc/cni/net.d/10-canal.conflist', context) cni.set_config(cidr=config('cidr'), cni_conf_file='10-canal.conflist') set_state('canal.cni.configured')
def launch_default_ingress_controller(): ''' Launch the Kubernetes ingress controller & default backend (404) ''' context = {} context['arch'] = arch() addon_path = '/root/cdk/addons/{}' # Render the default http backend (404) replicationcontroller manifest manifest = addon_path.format('default-http-backend.yaml') render('default-http-backend.yaml', manifest, context) hookenv.log('Creating the default http backend.') try: kubectl('apply', '-f', manifest) except CalledProcessError as e: hookenv.log(e) hookenv.log('Failed to create default-http-backend. Will attempt again next update.') # noqa hookenv.close_port(80) hookenv.close_port(443) return # Render the ingress replication controller manifest manifest = addon_path.format('ingress-replication-controller.yaml') render('ingress-replication-controller.yaml', manifest, context) hookenv.log('Creating the ingress replication controller.') try: kubectl('apply', '-f', manifest) except CalledProcessError as e: hookenv.log(e) hookenv.log('Failed to create ingress controller. Will attempt again next update.') # noqa hookenv.close_port(80) hookenv.close_port(443) return set_state('kubernetes-worker.ingress.available') hookenv.open_port(80) hookenv.open_port(443)
def render_without_context(source, target): ''' Render beat template from global state context ''' cache = kv() context = dict(config()) connected = False logstash_hosts = cache.get('beat.logstash') elasticsearch_hosts = cache.get('beat.elasticsearch') kafka_hosts = cache.get('beat.kafka') context['principal_unit'] = cache.get('principal_name') if logstash_hosts: connected = True context.update({'logstash': logstash_hosts}) if context['logstash_hosts']: connected = True if elasticsearch_hosts: connected = True context.update({'elasticsearch': elasticsearch_hosts}) if kafka_hosts: connected = True context.update({'kafka': kafka_hosts}) if context['kafka_hosts']: connected = True if 'protocols' in context.keys(): context.update({'protocols': parse_protocols()}) # Split the log paths if 'logpath' in context.keys() and not isinstance(context['logpath'], list): # noqa context['logpath'] = context['logpath'].split(' ') render(source, target, context) return connected
def initialize_networking_configuration(etcd): ''' Use an emphemeral instance of the configured ETCD container to initialize the CIDR range flannel can pull from. This becomes a single use tool. ''' # Due to how subprocess mangles the JSON string, turn the hack script # formerly known as scripts/bootstrap.sh into this single-command # wrapper, under template control. status_set('maintenance', 'Configuring etcd keystore for flannel CIDR.') context = {} if is_state('etcd.tls.available'): cert_path = '/etc/ssl/flannel' etcd.save_client_credentials('{}/client-key.pem'.format(cert_path), '{}/client-cert.pem'.format(cert_path), '{}/client-ca.pem'.format(cert_path)) else: cert_path = None context.update(config()) context.update({'connection_string': etcd.get_connection_string(), 'socket': 'unix:///var/run/bootstrap-docker.sock', 'cert_path': cert_path}) render('subnet-runner.sh', 'files/flannel/subnet.sh', context, perms=0o755) check_call(split('files/flannel/subnet.sh')) set_state('flannel.subnet.configured')
def nfs_storage(mount): '''NFS on kubernetes requires nfs config rendered into a deployment of the nfs client provisioner. That will handle the persistent volume claims with no persistent volume to back them.''' mount_data = get_first_mount(mount) if not mount_data: return # If present, use the configured registry to define the nfs image location. registry_location = get_registry_location() if registry_location: mount_data['registry'] = registry_location addon_path = '/root/cdk/addons/{}' # Render the NFS deployment manifest = addon_path.format('nfs-provisioner.yaml') render('nfs-provisioner.yaml', manifest, mount_data) hookenv.log('Creating the nfs provisioner.') try: kubectl('apply', '-f', manifest) except CalledProcessError as e: hookenv.log(e) hookenv.log( 'Failed to create nfs provisioner. Will attempt again next update.' ) # noqa return set_state('nfs.configured')
def run_flannel(etcd): ''' Render the docker-compose template, and run the flannel daemon ''' status_set('maintenance', 'Starting flannel network container.') context = {} if is_state('etcd.tls.available'): cert_path = '/etc/ssl/flannel' else: cert_path = None context.update(config()) context.update({'charm_dir': os.getenv('CHARM_DIR'), 'connection_string': etcd.get_connection_string(), 'cert_path': cert_path}) render('flannel-compose.yml', 'files/flannel/docker-compose.yml', context) compose = Compose('files/flannel', socket='unix:///var/run/bootstrap-docker.sock') compose.up() # Give the flannel daemon a moment to actually generate the interface # configuration seed. Otherwise we enter a time/wait scenario which # may cuase this to be called out of order and break the expectation # of the deployment. time.sleep(3) ingest_network_config()
def write_cert_secret(): ''' Write returned certificate into a secret for the webhook. This data is also shared across the leadership data to other units. ''' hookenv.status_set('maintenance', 'Writing certificates') cert = leader_get('cert').encode('utf-8') key = leader_get('key').encode('utf-8') context = {} context['namespace'] = namespace context['cert'] = base64.b64encode(cert).decode('utf-8') context['key'] = base64.b64encode(key).decode('utf-8') render('certs.yaml', secret_yaml, context) hookenv.log('Updating AWS-IAM secret.') try: _kubectl('apply', '-f', secret_yaml) except CalledProcessError as e: hookenv.log(e) hookenv.log( 'Failed to create AWS_IAM secret. Will attempt again next update.' ) # noqa return set_flag('charm.aws-iam.certificate-written')
def rerender_service_template(): ''' If we change proxy settings, re-render the bootstrap service definition and attempt to resume where we left off. ''' # Note: At this point if we hijack the workload daemon, heavy fisted # reprocussions will occur, like disruption of services. codename = host.lsb_release()['DISTRIB_CODENAME'] # by default, dont reboot the daemon unless we have previously rendered # system files. # Deterministic method to probe if we actually need to restart the # daemon. reboot = (os.path.exists('/lib/systemd/system/bootstrap-docker.service') or os.path.exists('/etc/init/bootstrap-docker.conf')) if codename != "trusty": # Handle SystemD render('bootstrap-docker.service', '/lib/systemd/system/bootstrap-docker.service', config(), owner='root', group='root') cmd = ["systemctl", "daemon-reload"] check_call(cmd) else: # Handle Upstart render('bootstrap-docker.upstart', '/etc/init/bootstrap-docker.conf', config(), owner='root', group='root') if reboot: service_restart('bootstrap-docker')
def configure_flume(self, template_data=None): ''' handle configuration of Flume and setup the environment ''' render( source='flume.conf.j2', target=self.config_file, context=dict({ 'dist_config': self.dist_config, }, **(template_data or {})), filters={ 'agent_list': lambda agents, prefix='': ','.join([ '%s%s' % (prefix, a['name']) for a in agents ]), }, ) flume_bin = self.dist_config.path('flume') / 'bin' java_symlink = check_output( ["readlink", "-f", "/usr/bin/java"]).decode('utf8') java_home = re.sub('/bin/java', '', java_symlink).rstrip() with utils.environment_edit_in_place('/etc/environment') as env: if flume_bin not in env['PATH']: env['PATH'] = ':'.join([env['PATH'], flume_bin]) env['FLUME_CONF_DIR'] = self.dist_config.path('flume_conf') env['FLUME_CLASSPATH'] = self.dist_config.path('flume') / 'lib' env['FLUME_HOME'] = self.dist_config.path('flume') env['JAVA_HOME'] = java_home
def leader_config_changed(): ''' The leader executes the runtime configuration update for the cluster, as it is the controlling unit. Will render config, close and open ports and restart the etcd service.''' configuration = hookenv.config() previous_port = configuration.previous('port') log('Previous port: {0}'.format(previous_port)) previous_mgmt_port = configuration.previous('management_port') log('Previous management port: {0}'.format(previous_mgmt_port)) if previous_port and previous_mgmt_port: bag = EtcdDatabag() etcdctl = EtcdCtl() members = etcdctl.member_list() # Iterate over all the members in the list. for unit_name in members: # Grab the previous peer url and replace the management port. peer_urls = members[unit_name]['peer_urls'] log('Previous peer url: {0}'.format(peer_urls)) old_port = ':{0}'.format(previous_mgmt_port) new_port = ':{0}'.format(configuration.get('management_port')) url = peer_urls.replace(old_port, new_port) # Update the member's peer_urls with the new ports. log(etcdctl.member_update(members[unit_name]['unit_id'], url)) # Render just the leaders configuration with the new values. render('defaults', '/etc/default/etcd', bag.__dict__, owner='root', group='root') # Close the previous client port and open the new one. close_open_ports() leader_set({'leader_address': get_connection_string([bag.private_address], bag.management_port)}) host.service_restart('etcd')
def manage_filebeat_logstash_ssl(): """Manage the ssl cert/key that filebeat uses to connect to logstash. Create the cert/key files when both logstash_ssl options have been set; update when either config option changes; remove if either gets unset. """ logstash_ssl_cert = config().get('logstash_ssl_cert') logstash_ssl_key = config().get('logstash_ssl_key') if logstash_ssl_cert and logstash_ssl_key: cert = base64.b64decode(logstash_ssl_cert).decode('utf8') key = base64.b64decode(logstash_ssl_key).decode('utf8') if data_changed('logstash_cert', cert): render(template='{{ data }}', context={'data': cert}, target=LOGSTASH_SSL_CERT, perms=0o444) if data_changed('logstash_key', key): render(template='{{ data }}', context={'data': key}, target=LOGSTASH_SSL_KEY, perms=0o400) else: if not logstash_ssl_cert and os.path.exists(LOGSTASH_SSL_CERT): os.remove(LOGSTASH_SSL_CERT) if not logstash_ssl_key and os.path.exists(LOGSTASH_SSL_KEY): os.remove(LOGSTASH_SSL_KEY)
def test_tests(self): tests = {'test': lambda s: s == 'foo'} self.assertEqual(render('tests.j2', context={'foo': 'foo'}, tests=tests), 'Yep') self.assertEqual(render('tests.j2', context={'foo': 'bar'}, tests=tests), 'Nope') tmpl = '{% if foo is test %}Yep{% else %}Nope{% endif %}' self.assertEqual(render(template=tmpl, context={'foo': 'foo'}, tests=tests), 'Yep') self.assertEqual(render(template=tmpl, context={'foo': 'bar'}, tests=tests), 'Nope')
def initialize_default_policy(): """ Configure CFSSL with default signing policies. This initializes a few different configurations for generating certificates """ db = kv() apikey = db.get('cfssl.apikey') # render the default 5 year policies render(config('default_policy'), '/etc/cfssl/policy.json', {'apikey': apikey}) set_state('certificate-authority.policy.placed')
def test_basic(self): self.config = {'cfg-name': 'cfg-value'} output = render('basic.j2', context={'name': 'value'}) self.assertEqual(output, 'name=value\n' 'cfg-name=cfg-value') output = render(template='{{ config["cfg-name"] }}') self.assertEqual(output, 'cfg-value') output = render(template=Template('{{ config["cfg-name"] }}')) self.assertEqual(output, 'cfg-value')
def configure_ganglia(ganglia): endpoints = ganglia.endpoints() render( source='hadoop-metrics2.properties.j2', target=GANGLIA_CONF_FILE, context={ 'servers': ','.join(sorted(map('{0[host]}:{0[port]}'.format, endpoints))), }, ) set_state('hadoop-ganglia.enabled')
def configure_ganglia(ganglia): endpoints = ganglia.endpoints() render( source='hadoop-metrics2.properties.j2', target=GANGLIA_CONF_FILE, context={ 'servers': ','.join(map('{0[host]}:{0[port]}'.format, endpoints)), }, ) set_state('hadoop-ganglia.enabled')
def install_flannel_service(etcd): ''' Install the flannel service. ''' status_set('maintenance', 'Installing flannel service.') iface = config('iface') or get_bind_address_interface() context = {'iface': iface, 'connection_string': etcd.get_connection_string(), 'cert_path': ETCD_PATH} render('flannel.service', '/lib/systemd/system/flannel.service', context) service('enable', 'flannel') set_state('flannel.service.installed') remove_state('flannel.service.started')
def follower_config_changed(): ''' Follower units need to render the configuration file, close and open ports, and restart the etcd service. ''' bag = EtcdDatabag() log('Rendering defaults file for {0}'.format(bag.unit_name)) # Render the follower's configuration with the new values. render('defaults', '/etc/default/etcd', bag.__dict__, owner='root', group='root') # Close the previous client port and open the new one. close_open_ports() host.service_restart('etcd')
def test_tests(self): tests = {'test': lambda s: s == 'foo'} self.assertEqual( render('tests.j2', context={'foo': 'foo'}, tests=tests), 'Yep') self.assertEqual( render('tests.j2', context={'foo': 'bar'}, tests=tests), 'Nope') tmpl = '{% if foo is test %}Yep{% else %}Nope{% endif %}' self.assertEqual( render(template=tmpl, context={'foo': 'foo'}, tests=tests), 'Yep') self.assertEqual( render(template=tmpl, context={'foo': 'bar'}, tests=tests), 'Nope')
def register_node_with_leader(cluster): ''' Control flow mechanism to perform self registration with the leader. Before executing self registration, we must adhere to the nature of offline static turnup rules. If we find a GUID in the member list without peering information the unit will enter a race condition and must wait for a clean status output before we can progress to self registration. ''' # We're going to communicate with the leader, and we need our bootstrap # startup string once.. TBD after that. etcdctl = EtcdCtl() bag = EtcdDatabag() # Assume a hiccup during registration and attempt a retry if bag.cluster_unit_id: bag.cluster = bag.registration_peer_string render('defaults', '/etc/default/etcd', bag.__dict__) host.service_restart('etcd') time.sleep(2) peers = etcdctl.member_list(leader_get('leader_address')) for unit in peers: if 'client_urls' not in peers[unit].keys(): # we cannot register. State not attainable. msg = 'Waiting for unit to complete registration' status_set('waiting', msg) return if not bag.cluster_unit_id: bag.leader_address = leader_get('leader_address') resp = etcdctl.register(bag.__dict__) if resp and 'cluster_unit_id' in resp.keys() and 'cluster' in resp.keys(): # noqa bag.cache_registration_detail('cluster_unit_id', resp['cluster_unit_id']) bag.cache_registration_detail('registration_peer_string', resp['cluster']) bag.cluster_unit_id = resp['cluster_unit_id'] bag.cluster = resp['cluster'] render('defaults', '/etc/default/etcd', bag.__dict__) host.service_restart('etcd') time.sleep(2) # Check health status before we say we are good etcdctl = EtcdCtl() status = etcdctl.cluster_health() if 'unhealthy' in status: status_set('blocked', 'Cluster not healthy') return open_port(bag.port) set_state('etcd.registered')
def launch_default_ingress_controller(): ''' Launch the Kubernetes ingress controller & default backend (404) ''' context = {} context['arch'] = arch() addon_path = '/root/cdk/addons/{}' context['defaultbackend_image'] = \ "gcr.io/google_containers/defaultbackend:1.4" if arch() == 's390x': context['defaultbackend_image'] = \ "gcr.io/google_containers/defaultbackend-s390x:1.4" # Render the default http backend (404) replicationcontroller manifest manifest = addon_path.format('default-http-backend.yaml') render('default-http-backend.yaml', manifest, context) hookenv.log('Creating the default http backend.') try: kubectl('apply', '-f', manifest) except CalledProcessError as e: hookenv.log(e) hookenv.log( 'Failed to create default-http-backend. Will attempt again next update.' ) # noqa hookenv.close_port(80) hookenv.close_port(443) return # Render the ingress daemon set controller manifest context['ingress_image'] = \ "gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13" if arch() == 's390x': context['ingress_image'] = \ "docker.io/cdkbot/nginx-ingress-controller-s390x:0.9.0-beta.13" context['juju_application'] = hookenv.service_name() manifest = addon_path.format('ingress-daemon-set.yaml') render('ingress-daemon-set.yaml', manifest, context) hookenv.log('Creating the ingress daemon set.') try: kubectl('apply', '-f', manifest) except CalledProcessError as e: hookenv.log(e) hookenv.log( 'Failed to create ingress controller. Will attempt again next update.' ) # noqa hookenv.close_port(80) hookenv.close_port(443) return set_state('kubernetes-worker.ingress.available') hookenv.open_port(80) hookenv.open_port(443)
def render_without_context(source, target): """ Render beat template from global state context. """ cache = kv() context = dict(config()) connected = False # Add deployment attributes model_info_cache() principal_unit_cache() context['juju_model_name'] = cache.get('model_name') context['juju_model_uuid'] = cache.get('model_uuid') context['juju_principal_unit'] = cache.get('principal_name') logstash_hosts = cache.get('beat.logstash') elasticsearch_hosts = cache.get('beat.elasticsearch') kafka_hosts = cache.get('beat.kafka') if logstash_hosts: connected = True context.update({'logstash': logstash_hosts}) if context['logstash_hosts']: connected = True if elasticsearch_hosts: connected = True context.update({'elasticsearch': elasticsearch_hosts}) if kafka_hosts: connected = True context.update({'kafka': kafka_hosts}) if context['kafka_hosts']: connected = True # detect various container attributes if path.isdir('/var/log/containers'): context.update({'has_containers': True}) if path.isdir('/var/lib/docker/containers'): context.update({'has_docker': True}) if context.get('kube_logs', False) and path.isfile('/root/.kube/config'): context.update({'has_k8s': True}) if 'protocols' in context.keys(): context.update({'protocols': parse_protocols()}) # Transform some config options into proper lists if they aren't already. # Do this only for non-empty values for proper jinja templating. for key in ('fields', 'logpath'): if (key in context.keys() and context[key] and not isinstance(context[key], list)): context[key] = context[key].split(' ') render(source, target, context) return connected
def launch_standalone_formation(): """ By default we want to execute the stand-alone formation """ # By default, the render method looks in the `templates` directory # This defines src, tgt, and context. Context is used for variable # substitution during the rendering of the template render('docker-compose.yml', 'files/voting-app/docker-compose.yml', config()) # Start our application, and open the ports start_application() # Set our idempotency state set_state('voting-app.standalone.running') status_set('active', 'Ready to vote!')
def deploy_docker_bootstrap_daemon(): ''' This is a nifty trick. We're going to init and start a secondary docker engine instance to run applications that can modify the "workload docker engine" ''' # Render static template for init job status_set('maintenance', 'Configuring bootstrap docker daemon.') codename = host.lsb_release()['DISTRIB_CODENAME'] # Render static template for daemon options render('bootstrap-docker.defaults', '/etc/default/bootstrap-docker', {}, owner='root', group='root') # The templates are static, but running through the templating engine for # future modification. This doesn't add much overhead. if codename == 'trusty': render('bootstrap-docker.upstart', '/etc/init/bootstrap-docker.conf', {}, owner='root', group='root') else: # Render the service definition render('bootstrap-docker.service', '/lib/systemd/system/bootstrap-docker.service', {}, owner='root', group='root') # let systemd allocate the unix socket render('bootstrap-docker.socket', '/lib/systemd/system/bootstrap-docker.socket', {}, owner='root', group='root') # this creates the proper symlinks in /etc/systemd/system path check_call(split('systemctl enable /lib/systemd/system/bootstrap-docker.socket')) # noqa check_call(split('systemctl enable /lib/systemd/system/bootstrap-docker.service')) # noqa # start the bootstrap daemon service_restart('bootstrap-docker') set_state('bootstrap_daemon.available')
def write_webhook_yaml(): ''' Write out the webhook yaml file for the api server to use. Everyone, including the leader, does this with leadership data set by the leader. ''' hookenv.status_set('maintenance', 'Writing apiserver webhook configuration') context = {} cert = leader_get('cert').encode('utf-8') context['cert'] = base64.b64encode(cert).decode('utf-8') context['service_ip'] = leader_get('service_ip') render('webhook.yaml', webhook_path, context) aws_iam = endpoint_from_flag('endpoint.aws-iam.available') aws_iam.set_webhook_status(True) set_flag('charm.aws-iam.written-webhook')
def start_standalone(): path = resource_get('registry') if path: check_call(['docker', 'load', '-i', path]) else: status_set('blocked', 'Please attach a registry image.') return render('docker-compose.yml', 'files/docker-registry/docker-compose.yml', config()) start() set_state('docker-registry.standalone.running') status_set('active', 'Docker registry ready.')
def configure_cni(etcd, cni): ''' Configure Calico CNI. ''' status_set('maintenance', 'Configuring Calico CNI') os.makedirs('/etc/cni/net.d', exist_ok=True) cni_config = cni.get_config() context = { 'connection_string': etcd.get_connection_string(), 'etcd_key_path': ETCD_KEY_PATH, 'etcd_cert_path': ETCD_CERT_PATH, 'etcd_ca_path': ETCD_CA_PATH, 'kubeconfig_path': cni_config['kubeconfig_path'] } render('10-canal.conflist', '/etc/cni/net.d/10-canal.conflist', context) cni.set_config(cidr=config('cidr')) set_state('canal.cni.configured')
def render_topbeat_logstash_ssl_cert(): logstash_ssl_cert = config().get('logstash_ssl_cert') logstash_ssl_key = config().get('logstash_ssl_key') if logstash_ssl_cert and logstash_ssl_key: render(template='{{ data }}', context={'data': base64.b64decode(logstash_ssl_cert)}, target=LOGSTASH_SSL_CERT, perms=0o444) render(template='{{ data }}', context={'data': base64.b64decode(logstash_ssl_key)}, target=LOGSTASH_SSL_KEY, perms=0o400) else: if not logstash_ssl_cert and os.path.exists(LOGSTASH_SSL_CERT): os.remove(LOGSTASH_SSL_CERT) if not logstash_ssl_key and os.path.exists(LOGSTASH_SSL_KEY): os.remove(LOGSTASH_SSL_KEY)
def render_config(bag=None): ''' Render the etcd configuration template for the given version ''' if not bag: bag = EtcdDatabag() # probe for 2.x compatibility if etcd_version().startswith('2.'): conf_path = "{}/etcd.conf".format(bag.etcd_conf_dir) render('etcd2.conf', conf_path, bag.__dict__, owner='root', group='root') # default to 3.x template behavior else: conf_path = "{}/etcd.conf.yml".format(bag.etcd_conf_dir) render('etcd3.conf', conf_path, bag.__dict__, owner='root', group='root')
def install_flannel_service(etcd): ''' Install the flannel service. ''' status.maintenance('Installing flannel service.') # keep track of our etcd conn string and cert info so we can detect when it # changes later data_changed('flannel_etcd_connections', etcd.get_connection_string()) data_changed('flannel_etcd_client_cert', etcd.get_client_credentials()) iface = config('iface') or get_bind_address_interface() context = {'iface': iface, 'connection_string': etcd.get_connection_string(), 'cert_path': ETCD_PATH} render('flannel.service', '/lib/systemd/system/flannel.service', context) service('enable', 'flannel') set_state('flannel.service.installed') remove_state('flannel.service.started')
def deploy_service(): hookenv.status_set('maintenance', 'Deploying aws-iam service') context = {} context['namespace'] = namespace render('service.yaml', service_yaml, context) try: _kubectl('apply', '-f', service_yaml) except CalledProcessError as e: hookenv.status_set('maintenance', 'Unable to deploy service. Will retry.') hookenv.log(e) hookenv.log( 'Failed to create AWS_IAM service. Will attempt again next update.' ) # noqa return set_flag('charm.aws-iam.deployed-service')
def install_flannel_service(etcd): ''' Install the flannel service. ''' status_set('maintenance', 'Installing flannel service.') default_interface = None cmd = ['route'] output = check_output(cmd).decode('utf8') for line in output.split('\n'): if 'default' in line: default_interface = line.split(' ')[-1] break context = {'iface': config('iface') or default_interface, 'connection_string': etcd.get_connection_string(), 'cert_path': ETCD_PATH} render('flannel.service', '/lib/systemd/system/flannel.service', context) set_state('flannel.service.installed') remove_state('flannel.service.started')
def launch_default_ingress_controller(): ''' Launch the Kubernetes ingress controller & default backend (404) ''' context = {} context['arch'] = arch() addon_path = '/root/cdk/addons/{}' context['defaultbackend_image'] = \ "gcr.io/google_containers/defaultbackend:1.4" if arch() == 's390x': context['defaultbackend_image'] = \ "gcr.io/google_containers/defaultbackend-s390x:1.4" # Render the default http backend (404) replicationcontroller manifest manifest = addon_path.format('default-http-backend.yaml') render('default-http-backend.yaml', manifest, context) hookenv.log('Creating the default http backend.') try: kubectl('apply', '-f', manifest) except CalledProcessError as e: hookenv.log(e) hookenv.log('Failed to create default-http-backend. Will attempt again next update.') # noqa hookenv.close_port(80) hookenv.close_port(443) return # Render the ingress daemon set controller manifest context['ingress_image'] = \ "gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.13" if arch() == 's390x': context['ingress_image'] = \ "docker.io/cdkbot/nginx-ingress-controller-s390x:0.9.0-beta.13" context['juju_application'] = hookenv.service_name() manifest = addon_path.format('ingress-daemon-set.yaml') render('ingress-daemon-set.yaml', manifest, context) hookenv.log('Creating the ingress daemon set.') try: kubectl('apply', '-f', manifest) except CalledProcessError as e: hookenv.log(e) hookenv.log('Failed to create ingress controller. Will attempt again next update.') # noqa hookenv.close_port(80) hookenv.close_port(443) return set_state('kubernetes-worker.ingress.available') hookenv.open_port(80) hookenv.open_port(443)
def render_filebeat_logstash_ssl_cert(): logstash_ssl_cert = config().get('logstash_ssl_cert') logstash_ssl_key = config().get('logstash_ssl_key') if logstash_ssl_cert and logstash_ssl_key: render(template='{{ data }}', context={'data': base64.b64decode(logstash_ssl_cert)}, target=LOGSTASH_SSL_CERT, perms=0o444) render(template='{{ data }}', context={'data': base64.b64decode(logstash_ssl_key)}, target=LOGSTASH_SSL_KEY, perms=0o400) else: if not logstash_ssl_cert and os.path.exists(LOGSTASH_SSL_CERT): os.remove(LOGSTASH_SSL_CERT) if not logstash_ssl_key and os.path.exists(LOGSTASH_SSL_KEY): os.remove(LOGSTASH_SSL_KEY)
def install_flannel_service(): ''' Install the flannel service. ''' status_set('maintenance', 'Installing flannel service.') # keep track of our etcd connections so we can detect when it changes later etcd = endpoint_from_flag('etcd.tls.available') etcd_connections = etcd.get_connection_string() data_changed('flannel_etcd_connections', etcd_connections) iface = config('iface') or get_bind_address_interface() context = {'iface': iface, 'connection_string': etcd_connections, 'cert_path': ETCD_PATH} render('flannel.service', '/lib/systemd/system/flannel.service', context) service('enable', 'flannel') set_state('flannel.service.installed') remove_state('flannel.service.started')
def replace_postgres_container(postgres): """ Prepare the data for the docker-compose template """ set_state('block_standalone') status_set('maintenance', 'Configuring charm for external Postgres.') # iterate over all the connected redis hosts pgdata = {'pg_host': postgres.host(), 'pg_user': postgres.user(), 'pg_pass': postgres.password(), 'pg_db': postgres.database()} context = {} context.update(config()) context.update(pgdata) render('docker-compose.yml', 'files/voting-app/docker-compose.yml', context) start_application() status_set('active', 'Ready to vote!')
def apply_webhook_deployment(): hookenv.status_set('maintenance', 'Deploying webhook') context = {} context['namespace'] = namespace context['cluster_id'] = leader_get('cluster_id') context['image'] = hookenv.config('image') render('aws-iam-deployment.yaml', deployment_yaml, context) try: _kubectl('apply', '-f', deployment_yaml) except CalledProcessError as e: hookenv.status_set('maintenance', 'Unable to deploy webhook. Will retry.') hookenv.log(e) hookenv.log( 'Failed to create AWS_IAM deployment. Will attempt again next update.' ) # noqa return set_flag('charm.aws-iam.deployment-started')
def render_without_context(source, target): """ Render beat template from global state context. """ cache = kv() context = dict(config()) connected = False # Add deployment attributes model_info_cache() principal_unit_cache() context['juju_model_name'] = cache.get('model_name') context['juju_model_uuid'] = cache.get('model_uuid') context['juju_principal_unit'] = cache.get('principal_name') logstash_hosts = cache.get('beat.logstash') elasticsearch_hosts = cache.get('beat.elasticsearch') kafka_hosts = cache.get('beat.kafka') if logstash_hosts: connected = True context.update({'logstash': logstash_hosts}) if context['logstash_hosts']: connected = True if elasticsearch_hosts: connected = True context.update({'elasticsearch': elasticsearch_hosts}) if kafka_hosts: connected = True context.update({'kafka': kafka_hosts}) if context['kafka_hosts']: connected = True if 'protocols' in context.keys(): context.update({'protocols': parse_protocols()}) # Transform some config options into proper lists if they aren't already. # Do this only for non-empty values for proper jinja templating. for key in ('fields', 'logpath'): if (key in context.keys() and context[key] and not isinstance(context[key], list)): context[key] = context[key].split(' ') render(source, target, context) return connected
def initialize_ca(): """ Initialize the certificate authority with keys """ # write out the CA CSR json to the primary store render('csr_ca.json', '/etc/cfssl/csr_ca.json', config()) # Generate the CA CSR from CFSSL and capture output response = cfssl.gencert('/etc/cfssl/csr_ca.json', initca=True) temp = NamedTemporaryFile(suffix='.json') # write out the response to a temporary file, and generate the certificates # from the response json. # TODO: Red October integration to keep our CA Key from being unencrypted # on disk. with open(temp.name, 'w') as fp: fp.write(response) # when we are in context, we need to flush the buffer to actually write fp.flush() # Generate TLS keys from the JSON response cfssljson.parse("/etc/cfssl/ca", bare=True, f=temp.name) set_state('certificate-authority.ca.placed')
def test_write(self): with TemporaryDirectory() as tmpdir: uid = os.geteuid() user = pwd.getpwuid(uid).pw_name gid = os.getegid() group = grp.getgrgid(gid).gr_name out_dir = os.path.join(tmpdir, 'test') out_file = os.path.join(out_dir, 'output.txt') render('nested/test.j2', out_file, owner=user, group=group, perms=0o400) with open(out_file) as of: self.assertEqual(of.read(), 'test') dir_stat = os.stat(out_dir) self.assertEqual(dir_stat.st_mode & 0o777, 0o700) self.assertEqual(dir_stat.st_uid, uid) self.assertEqual(dir_stat.st_gid, gid) file_stat = os.stat(out_file) self.assertEqual(file_stat.st_mode & 0o777, 0o400) self.assertEqual(file_stat.st_uid, uid) self.assertEqual(file_stat.st_gid, gid)
def render_init_scripts(): ''' We have related to either an api server or a load balancer connected to the apiserver. Render the config files and prepare for launch ''' context = {} context.update(hookenv.config()) layer_options = layer.options('tls-client') ca_cert_path = layer_options.get('ca_certificate_path') server_cert_path = layer_options.get('server_certificate_path') server_key_path = layer_options.get('server_key_path') unit_name = os.getenv('JUJU_UNIT_NAME').replace('/', '-') context.update({ 'kube_allow_priv': FlagManager('KUBE_ALLOW_PRIV').to_s(), 'kube_api_endpoint': FlagManager('KUBE_MASTER').to_s(), 'JUJU_UNIT_NAME': unit_name, }) kubelet_opts = FlagManager('kubelet') kubelet_opts.add('--require-kubeconfig', None) kubelet_opts.add('--kubeconfig', kubeconfig_path) kubelet_opts.add('--network-plugin', 'cni') kubelet_opts.add('--anonymous-auth', 'false') kubelet_opts.add('--client-ca-file', ca_cert_path) kubelet_opts.add('--tls-cert-file', server_cert_path) kubelet_opts.add('--tls-private-key-file', server_key_path) context['kubelet_opts'] = kubelet_opts.to_s() kube_proxy_opts = FlagManager('kube-proxy') kube_proxy_opts.add('--kubeconfig', kubeconfig_path) context['kube_proxy_opts'] = kube_proxy_opts.to_s() os.makedirs('/var/lib/kubelet', exist_ok=True) render('kube-default', '/etc/default/kube-default', context) render('kubelet.defaults', '/etc/default/kubelet', context) render('kubelet.service', '/lib/systemd/system/kubelet.service', context) render('kube-proxy.defaults', '/etc/default/kube-proxy', context) render('kube-proxy.service', '/lib/systemd/system/kube-proxy.service', context)
def nfs_storage(mount): '''NFS on kubernetes requires nfs config rendered into a deployment of the nfs client provisioner. That will handle the persistent volume claims with no persistent volume to back them.''' mount_data = get_first_mount(mount) if not mount_data: return addon_path = '/root/cdk/addons/{}' # Render the NFS deployment manifest = addon_path.format('nfs-provisioner.yaml') render('nfs-provisioner.yaml', manifest, mount_data) hookenv.log('Creating the nfs provisioner.') try: kubectl('apply', '-f', manifest) except CalledProcessError as e: hookenv.log(e) hookenv.log('Failed to create nfs provisioner. Will attempt again next update.') # noqa return set_state('nfs.configured')
def launch_default_ingress_controller(): ''' Launch the Kubernetes ingress controller & default backend (404) ''' context = {} context['arch'] = arch() addon_path = '/etc/kubernetes/addons/{}' manifest = addon_path.format('default-http-backend.yaml') # Render the default http backend (404) replicationcontroller manifest render('default-http-backend.yaml', manifest, context) hookenv.log('Creating the default http backend.') kubectl_manifest('create', manifest) # Render the ingress replication controller manifest manifest = addon_path.format('ingress-replication-controller.yaml') render('ingress-replication-controller.yaml', manifest, context) if kubectl_manifest('create', manifest): hookenv.log('Creating the ingress replication controller.') set_state('kubernetes-worker.ingress.available') hookenv.open_port(80) hookenv.open_port(443) else: hookenv.log('Failed to create ingress controller. Will attempt again next update.') # noqa hookenv.close_port(80) hookenv.close_port(443)
def render_without_context(source, target): ''' Render beat template from global state context ''' cache = kv() context = config() logstash_hosts = cache.get('beat.logstash') elasticsearch_hosts = cache.get('beat.elasticsearch') context['principal_unit'] = cache.get('principal_name') if logstash_hosts: context.update({'logstash': logstash_hosts}) if elasticsearch_hosts: context.update({'elasticsearch': elasticsearch_hosts}) if 'protocols' in context.keys(): context.update({'protocols': parse_protocols()}) # Split the log paths if 'logpath' in context.keys() and not isinstance(context['logpath'], list): # noqa context['logpath'] = context['logpath'].split(' ') render(source, target, context)
def render_init_scripts(api_servers): ''' We have related to either an api server or a load balancer connected to the apiserver. Render the config files and prepare for launch ''' context = {} context.update(hookenv.config()) # Get the tls paths from the layer data. layer_options = layer.options('tls-client') context['ca_cert_path'] = layer_options.get('ca_certificate_path') context['client_cert_path'] = layer_options.get('client_certificate_path') context['client_key_path'] = layer_options.get('client_key_path') unit_name = os.getenv('JUJU_UNIT_NAME').replace('/', '-') context.update({'kube_api_endpoint': ','.join(api_servers), 'JUJU_UNIT_NAME': unit_name}) # Create a flag manager for kubelet to render kubelet_opts. kubelet_opts = FlagManager('kubelet') # Declare to kubelet it needs to read from kubeconfig kubelet_opts.add('--require-kubeconfig', None) kubelet_opts.add('--kubeconfig', kubeconfig_path) kubelet_opts.add('--network-plugin', 'cni') context['kubelet_opts'] = kubelet_opts.to_s() # Create a flag manager for kube-proxy to render kube_proxy_opts. kube_proxy_opts = FlagManager('kube-proxy') kube_proxy_opts.add('--kubeconfig', kubeconfig_path) context['kube_proxy_opts'] = kube_proxy_opts.to_s() os.makedirs('/var/lib/kubelet', exist_ok=True) # Set the user when rendering config context['user'] = '******' # Set the user when rendering config context['user'] = '******' render('kube-default', '/etc/default/kube-default', context) render('kubelet.defaults', '/etc/default/kubelet', context) render('kube-proxy.defaults', '/etc/default/kube-proxy', context) render('kube-proxy.service', '/lib/systemd/system/kube-proxy.service', context) render('kubelet.service', '/lib/systemd/system/kubelet.service', context)
def setup_init_scripts(self): templates_list = ['history', 'master', 'slave'] for template in templates_list: if host.init_is_systemd(): template_path = '/etc/systemd/system/spark-{}.service'.format(template) else: template_path = '/etc/init/spark-{}.conf'.format(template) if os.path.exists(template_path): os.remove(template_path) self.stop() mode = hookenv.config()['spark_execution_mode'] templates_list = ['history'] if mode == 'standalone': templates_list.append('master') templates_list.append('slave') for template in templates_list: template_name = '{}-upstart.conf'.format(template) template_path = '/etc/init/spark-{}.conf'.format(template) if host.init_is_systemd(): template_name = '{}-systemd.conf'.format(template) template_path = '/etc/systemd/system/spark-{}.service'.format(template) render( template_name, template_path, context={ 'spark_bin': self.dist_config.path('spark'), 'master': self.get_master() }, ) if host.init_is_systemd(): utils.run_as('root', 'systemctl', 'enable', 'spark-{}.service'.format(template)) if host.init_is_systemd(): utils.run_as('root', 'systemctl', 'daemon-reload')
def replace_redis_container(redis): """ Prepare the data for the docker-compose template """ # Block the stand alone profile set_state('block_standalone') status_set('maintenance', 'Configuring charm for external Redis.') hosts = [] # iterate over all the connected redis hosts for unit in redis.redis_data(): hosts.append(unit['private_address']) redis_host = ','.join(hosts) # Create a merged dict with the config values we expect context = {} context.update(config()) context.update({'redis_host': redis_host}) render('docker-compose.yml', 'files/voting-app/docker-compose.yml', context) start_application() status_set('active', 'Ready to vote!') # Set our idempotency state set_state('voting-app.running')