def persist_state(): """Fake persistent state by calling helpers that modify unitdata.kv""" states = [k for k in bus.get_states().keys() if k.startswith('plugins') or k.startswith('extra_plugins')] helpers.any_file_changed(telegraf.list_config_files()) if states: helpers.data_changed('active_plugins', states)
def install_snaps(): any_file_changed(get_snap_resource_paths()) channel = hookenv.config('channel') hookenv.status_set('maintenance', 'Installing kubectl snap') snap.install('kubectl', channel=channel, classic=True) hookenv.status_set('maintenance', 'Installing kubelet snap') snap.install('kubelet', channel=channel, classic=True) hookenv.status_set('maintenance', 'Installing kube-proxy snap') snap.install('kube-proxy', channel=channel, classic=True) set_state('kubernetes-worker.snaps.installed') set_state('kubernetes-worker.restart-needed') remove_state('kubernetes-worker.snaps.upgrade-needed') remove_state('kubernetes-worker.snaps.upgrade-specified')
def configure(self): restart = False server = "{}:{}".format(self.charm_config['listen'], self.charm_config['port']) hookenv.log("Configuring taskd server {}".format(server), 'DEBUG') templating.render('config.j2', '/var/lib/taskd/config', {'server': server}) if any_file_changed(['/var/lib/taskd/config']): restart = True if self.charm_config['tls_cn']: cn = self.charm_config['tls_cn'] else: cn = socket.getfqdn() templating.render( 'vars.j2', '/usr/share/taskd/pki/vars', { 'expiry': self.charm_config['tls_expiry'], 'org': self.charm_config['tls_org'], 'cn': cn, 'country': self.charm_config['tls_country'], 'state': self.charm_config['tls_state'], 'locality': self.charm_config['tls_locality'] }) if any_file_changed(['/usr/share/taskd/pki/vars']): p = subprocess.Popen(['/usr/share/taskd/pki/generate'], cwd='/usr/share/taskd/pki') p.wait() restart = True fullport = "{}/tcp".format(self.charm_config['port']) for port in hookenv.opened_ports(): if not fullport == port: cport, cproto = port.split('/') hookenv.close_port(cport, cproto) if fullport not in hookenv.opened_ports(): hookenv.open_port(self.charm_config['port'], 'tcp') self.fix_permissions() self.start_enable() if restart: self.restart()
def restart_zookeeper_if_config_changed(): """Restart Zookeeper if zoo.cfg has changed. As peers come and go, zoo.cfg will be updated. When that file changes, restart the Zookeeper service and set an appropriate status message. """ # Possibly update bind address network_interface = hookenv.config().get('network_interface') if data_changed("zookeeper.bind_address", network_interface): zk = Zookeeper() zk.update_bind_address() zoo_cfg = DistConfig().path('zookeeper_conf') / 'zoo.cfg' if any_file_changed([zoo_cfg]): hookenv.status_set('maintenance', 'Server config changed: restarting Zookeeper') zk = Zookeeper() zk.stop() zk.start() zk_count = int(zk.get_zk_count()) extra_status = "" if zk_count < 3: extra_status = ": less than 3 is suboptimal" elif (zk_count % 2 == 0): extra_status = ": even number is suboptimal" hookenv.status_set('active', 'Ready (%d zk units%s)' % (zk_count, extra_status)) else: # Make sure zookeeper is running in any case zk = Zookeeper() zk.start() zk.open_ports()
def setup_non_leader_authentication(): service_key = '/root/cdk/serviceaccount.key' basic_auth = '/root/cdk/basic_auth.csv' known_tokens = '/root/cdk/known_tokens.csv' keys = [service_key, basic_auth, known_tokens] # The source of truth for non-leaders is the leader. # Therefore we overwrite_local with whatever the leader has. if not get_keys_from_leader(keys, overwrite_local=True): # the keys were not retrieved. Non-leaders have to retry. return if not any_file_changed(keys) and is_state('authentication.setup'): # No change detected and we have already setup the authentication return hookenv.status_set('maintenance', 'Rendering authentication templates.') api_opts = FlagManager('kube-apiserver') api_opts.add('basic-auth-file', basic_auth) api_opts.add('token-auth-file', known_tokens) api_opts.add('service-account-key-file', service_key) controller_opts = FlagManager('kube-controller-manager') controller_opts.add('service-account-private-key-file', service_key) remove_state('kubernetes-master.components.started') set_state('authentication.setup')
def restart_zookeeper_if_config_changed(): """Restart Zookeeper if zoo.cfg has changed. As peers come and go, zoo.cfg will be updated. When that file changes, restart the Zookeeper service and set an appropriate status message. """ # Possibly update bind address network_interface = hookenv.config().get('network_interface') if data_changed("zookeeper.bind_address", network_interface): zk = Zookeeper() zk.update_bind_address() zoo_cfg = DistConfig().path('zookeeper_conf') / 'zoo.cfg' if any_file_changed([zoo_cfg]): hookenv.status_set('maintenance', 'Server config changed: restarting Zookeeper') zk = Zookeeper() zk.stop() zk.start() zk_count = int(zk.get_zk_count()) extra_status = "" if zk_count < 3: extra_status = ": less than 3 is suboptimal" elif (zk_count % 2 == 0): extra_status = ": even number is suboptimal" hookenv.status_set('active', 'Ready (%d zk units%s)' % (zk_count, extra_status))
def configure_flume(sink): hookenv.status_set('maintenance', 'Configuring Flume') flume = Flume() flume.configure_flume({'agents': sink.agents()}) if any_file_changed([flume.config_file]): flume.restart() hookenv.status_set('active', 'Ready') set_state('flume-twitter.started')
def configure_flume(sink): hookenv.status_set("maintenance", "Configuring Flume") flume = Flume() flume.configure_flume({"agents": sink.agents()}) if any_file_changed([flume.config_file]): flume.restart() hookenv.status_set("active", "Ready") set_state("flume-twitter.started")
def restart_grafana(): if not host.service_running(SVCNAME): hookenv.log('Starting {}...'.format(SVCNAME)) host.service_start(SVCNAME) elif any_file_changed([GRAFANA_INI]): hookenv.log('Restarting {}, config file changed...'.format(SVCNAME)) host.service_restart(SVCNAME) hookenv.status_set('active', 'Ready') set_state('grafana.started')
def start_collectd(): if not host.service_running('collectd'): hookenv.log('Starting collectd...') host.service_start('collectd') set_state('collectd.started') if any_file_changed(['/etc/collectd/collectd.conf']): hookenv.log('Restarting collectd, config file changed...') host.service_restart('collectd') remove_state('collectd.start')
def _install_local(path, **kw): key = 'snap.local.{}'.format(path) if (data_changed(key, kw) or any_file_changed([path])): cmd = ['snap', 'install'] cmd.extend(_snap_args(**kw)) cmd.append('--dangerous') cmd.append(path) hookenv.log('Installing {} from local resource'.format(path)) subprocess.check_call(cmd, universal_newlines=True)
def _install_local(path, **kw): key = "snap.local.{}".format(path) if data_changed(key, kw) or any_file_changed([path]): cmd = ["snap", "install"] cmd.extend(_snap_args(**kw)) cmd.append("--dangerous") cmd.append(path) hookenv.log("Installing {} from local resource".format(path)) subprocess.check_call(cmd)
def start_collectd_exporter(): if not host.service_running('collectd-exporter'): hookenv.log('Starting collectd-exporter...') host.service_start('collectd-exporter') set_state('collectd-exporter.started') if any_file_changed(['/etc/default/collectd-exporter']): # Restart, reload breaks it hookenv.log('Restarting collectd-exporter, config file changed...') host.service_restart('collectd-exporter') remove_state('collectd-exporter.start')
def install_hbase(hdfs, zk): ''' Anytime our dependencies are available, check to see if we have a valid reason to (re)install. These include: - initial install - config change - Zookeeper unit has joined/departed ''' zks = zk.zookeepers() deployment_matrix = { 'zookeepers': zks, } # Handle nuances when installing versus re-installing if not is_state('hbase.installed'): prefix = "installing" # On initial install, prime our kv with the current deployment matrix. # Subsequent calls will use this to determine if a reinstall is needed. data_changed('deployment_matrix', deployment_matrix) else: prefix = "configuring" # We do not need to reinstall when peers come and go; that is covered # by other handlers below. if is_state('hbpeer.departed') or is_state('hbpeer.joined'): return # Return if neither config nor our matrix has changed if not (is_state('config.changed') or data_changed('deployment_matrix', deployment_matrix)): return hookenv.status_set('maintenance', '{} hbase'.format(prefix)) hookenv.log("{} hbase with: {}".format(prefix, deployment_matrix)) hbase = HBase() hosts = {} hosts['namenode'] = hdfs.namenodes()[0] hbase.configure(hosts, zks) # Ensure our IP is in the regionservers list; restart if the rs conf # file has changed. hbase.update_regionservers([hookenv.unit_private_ip()]) if any_file_changed(['/etc/hbase/conf/regionservers']): hbase.restart() # set app version string for juju status output hbase_version = get_package_version('hbase-master') or 'unknown' hookenv.application_version_set(hbase_version) hbase.open_ports() report_status() set_state('hbase.installed')
def snap_resources_changed(): ''' Check if the snapped resources have changed. The first time this method is called will report "unknown". Returns: "yes" in case a snap resource file has changed, "no" in case a snap resources are the same as last call, "unknown" if it is the first time this method is called ''' db = unitdata.kv() resources = ['kubectl', 'kube-apiserver', 'kube-controller-manager', 'kube-scheduler', 'cdk-addons'] paths = [hookenv.resource_get(resource) for resource in resources] if db.get('snap.resources.fingerprint.initialised'): result = 'yes' if any_file_changed(paths) else 'no' return result else: db.set('snap.resources.fingerprint.initialised', True) any_file_changed(paths) return 'unknown'
def configure_flume(sink, kafka): hookenv.status_set('maintenance', 'Configuring Flume') flume = Flume() flume.configure_flume({ 'agents': sink.agents(), 'zookeepers': kafka.zookeepers(), }) flume.configure_zookeeper() if any_file_changed([flume.config_file]): flume.restart() hookenv.status_set('active', 'Ready') set_state('flume-kafka.started')
def _configure_local_client(): '''Configure daemon.json and certs for the local docker client.''' charm_config = hookenv.config() # client config depends on whether the registry is secure or insecure netloc = get_netloc() if is_flag_set('charm.docker-registry.tls-enabled'): insecure_registry = '' # if our ca changed, install it into the default sys location # (docker client > 1.13 will use this) tls_ca = charm_config.get('tls-ca-path', '') if os.path.isfile(tls_ca) and any_file_changed([tls_ca]): ca_content = None with open(tls_ca, 'rb') as f: ca_content = f.read() if ca_content: host.install_ca_cert(ca_content) # Put our certs where the docker client expects to find them # NB: these are the same certs used to serve the registry, but have # strict path requirements when used for docker client auth. client_tls_dst = '/etc/docker/certs.d/{}'.format(netloc) os.makedirs(client_tls_dst, exist_ok=True) tls_cert = charm_config.get('tls-cert-path', '') if os.path.isfile(tls_cert) and any_file_changed([tls_cert]): tls_cert_link = '{}/client.cert'.format(client_tls_dst) _remove_if_exists(tls_cert_link) os.symlink(tls_cert, tls_cert_link) tls_key = charm_config.get('tls-key-path', '') if os.path.isfile(tls_key) and any_file_changed([tls_key]): tls_key_link = '{}/client.key'.format(client_tls_dst) _remove_if_exists(tls_key_link) os.symlink(tls_key, tls_key_link) else: insecure_registry = '"{}"'.format(netloc) templating.render('daemon.json', '/etc/docker/daemon.json', {'registries': insecure_registry}) host.service_restart('docker')
def start_or_restart(): states = sorted([k for k in get_states().keys() if k.startswith('plugins') or k.startswith('extra_plugins')]) config_files_changed = helpers.any_file_changed(list_config_files()) active_plugins_changed = helpers.data_changed('active_plugins', states or '') if config_files_changed or active_plugins_changed: hookenv.log("Restarting telegraf") host.service_restart('telegraf') else: hookenv.log("Not restarting: active_plugins_changed={} | " "config_files_changed={}".format(active_plugins_changed, config_files_changed))
def check_for_upgrade_needed(): '''An upgrade charm event was triggered by Juju, react to that here.''' hookenv.status_set('maintenance', 'Checking resources') migrate_from_pre_snaps() add_rbac_roles() set_state('reconfigure.authentication.setup') remove_state('authentication.setup') resources = ['kubectl', 'kube-apiserver', 'kube-controller-manager', 'kube-scheduler', 'cdk-addons'] paths = [hookenv.resource_get(resource) for resource in resources] if any_file_changed(paths): set_upgrade_needed()
def configure_flume(sink): flume = Flume() flume.configure_flume({'agents': sink.agents()}) if any_file_changed([flume.config_file]): # the port is currently hard-coded in the rsyslog-forwarder-ha charm # must run as root to listen on low-number UDP port hookenv.status_set('maintenance', 'Configuring Flume') hookenv.open_port(hookenv.config('source_port')) flume.restart(user='******') set_state('flume-syslog.started') syslog = RelationBase.from_state('syslog.joined') if syslog is None: hookenv.status_set('active', 'Ready') else: count = syslog.client_count() hookenv.status_set('active', 'Ready (Syslog sources: %s)' % count)
def configure_flume(sink): flume = Flume() flume.configure_flume({'agents': sink.agents()}) if any_file_changed([flume.config_file]): # the port is currently hard-coded in the rsyslog-forwarder-ha charm # must run as root to listen on low-number UDP port hookenv.status_set('maintenance', 'Configuring Flume') hookenv.open_port(hookenv.config('source_port')) flume.restart(user='******') set_state('flume-syslog.started') syslog = RelationBase.from_state('syslog.joined') if syslog is None: hookenv.status_set('active', 'Ready') else: hookenv.status_set('active', 'Ready (Syslog souces: {})' .format(syslog.client_count()))
def handle_peers(): ''' We use HBase peers to keep track of the RegionServer IP addresses in a cluster. Use get_nodes() from the appropriate peer relation to retrieve a list of peer tuples, e.g.: [('hbase/0', '172.31.5.161'), ('hbase/2', '172.31.5.11')] Depending on the state, this handler will add or remove peer IP addresses from the regionservers config file. ''' if is_state('hbpeer.departed'): hbpeer = RelationBase.from_state('hbpeer.departed') is_departing = True message = 'removing hbase peer(s)' else: hbpeer = RelationBase.from_state('hbpeer.joined') is_departing = False message = 'adding hbase peer(s)' # Make sure we have a valid relation object if hbpeer: nodes = hbpeer.get_nodes() else: hookenv.log('Ignoring unknown HBase peer state') return hookenv.status_set('maintenance', message) hbase = HBase() ip_addrs = [node[1] for node in nodes] hookenv.log('{}: {}'.format(message, ip_addrs)) hbase.update_regionservers(ip_addrs, remove=is_departing) # NB: the rs conf file will always change when handling peer updates, but # we still include this condition to keep the files_changed kv current. if any_file_changed(['/etc/hbase/conf/regionservers']): hbase.restart() # Dismiss appropriate state now that we've handled the peer if is_departing: hbpeer.dismiss_departed() else: hbpeer.dismiss_joined() report_status()
def update_config(zk): """Configure ready zookeepers and restart kafka if needed. Also restart if network_interface has changed. As zks come and go, server.properties will be updated. When that file changes, restart Kafka and set appropriate status messages. """ hookenv.log('Checking Zookeeper configuration') kafka = Kafka() zks = zk.zookeepers() network_interface = hookenv.config().get('network_interface') kafka.configure_kafka(zks, network_interface) server_cfg = DistConfig().path('kafka_conf') / 'server.properties' if any_file_changed([server_cfg]): hookenv.status_set('maintenance', 'Server config changed: restarting Kafka') hookenv.log('Server config changed: restarting Kafka') kafka.restart() hookenv.status_set('active', 'Ready')
def setup_nrpe_checks(nagios): config = hookenv.config() options = { 'check_name': 'check_collectd', 'description': 'Verify that collectd process is running', 'servicegroups': config['nagios_servicegroups'], 'command': '/usr/lib/nagios/plugins/check_procs -C collectd -c 1:1' } options['hostname'] = '{}-{}'.format(config['nagios_context'], hookenv.local_unit()).replace( '/', '-') render(source='nagios-export.jinja2', target='/var/lib/nagios/export/service__{}_collectd.cfg'.format( options['hostname']), context=options) render(source='nrpe-config.jinja2', target='/etc/nagios/nrpe.d/check_collectd.cfg', context=options) if any_file_changed(['/etc/nagios/nrpe.d/check_collectd.cfg']): host.service_reload('nagios-nrpe-server')
def configure_pgloader(self): """Render templated commands.load file for pgloader to self.gitlab_commands_file.""" hookenv.log("Rendering pgloader commands.load file to /etc/gitlab", hookenv.INFO) templating.render( "commands.load.j2", self.gitlab_commands_file, { "pgsql_host": self.kv.get("pgsql_host"), "pgsql_port": self.kv.get("pgsql_port"), "pgsql_database": self.kv.get("pgsql_db"), "pgsql_user": self.kv.get("pgsql_user"), "pgsql_password": self.kv.get("pgsql_pass"), "mysql_host": self.kv.get("mysql_host"), "mysql_port": self.kv.get("mysql_port"), "mysql_database": self.kv.get("mysql_db"), "mysql_user": self.kv.get("mysql_user"), "mysql_password": self.kv.get("mysql_pass"), }, ) if any_file_changed([self.gitlab_commands_file]): self.run_pgloader()
def render_ircd_config(self): """Render the configuration for Matrix ircd.""" hookenv.log( "Rendering IRCd configuration to {}".format( self.matrix_ircd_config), hookenv.DEBUG, ) if self.pgsql_configured(): render_result = templating.render( "matrix-ircd.env.j2", self.matrix_ircd_config, { "home_server": self.get_internal_url(), "bind": "{}:{}".format(self.irc_internal_listen, self.irc_internal_port), }, ) if render_result: if any_file_changed([self.matrix_ircd_config]): self.restart_matrix_ircd() return True return False
def when_file_changed(*filenames): """ Check if files have changed since the last time they were checked. """ return helpers.any_file_changed(filenames)
def check_resources_for_upgrade_needed(): hookenv.status_set('maintenance', 'Checking resources') resources = ['kubectl', 'kubelet', 'kube-proxy'] paths = [hookenv.resource_get(resource) for resource in resources] if any_file_changed(paths): set_upgrade_needed()
def templates_changed(tmpl_list): return any_file_changed(['templates/{}'.format(x) for x in tmpl_list])
def check_resources_for_upgrade_needed(): hookenv.status_set('maintenance', 'Checking resources') if any_file_changed(get_snap_resource_paths()): set_upgrade_needed()
def render_synapse_config(self): """Render the configuration for Matrix synapse.""" hookenv.log( "Rendering synapse configuration to {}".format( self.synapse_config), hookenv.DEBUG, ) if self.pgsql_configured(): templating.render( "homeserver.yaml.j2", self.synapse_config, { "conf_dir": self.synapse_conf_dir, "signing_key": self.get_synapse_signing_key(), "registration_shared_secret": self.get_shared_secret(), "pgsql_configured": self.pgsql_configured(), "pgsql_host": self.kv.get("pgsql_host"), "pgsql_port": self.kv.get("pgsql_port"), "pgsql_db": self.kv.get("pgsql_db"), "pgsql_user": self.kv.get("pgsql_user"), "pgsql_pass": self.kv.get("pgsql_pass"), "server_name": self.get_server_name(), "public_baseurl": self.get_public_baseurl(), "enable_tls": self.get_tls(), "enable_search": self.charm_config["enable-search"], "enable_user_directory": self.charm_config["enable-user-directory"], "enable_room_list_search": self.charm_config["enable-room-list-search"], "enable_registration": self.charm_config["enable-registration"], "use_presence": self.charm_config["track-presence"], "require_auth_for_profile_requests": self.charm_config["require-auth-profile-requests"], "default_room_version": self.charm_config["default-room-version"], "block_non_admin_invites": not bool(self.charm_config["enable-non-admin-invites"]), "report_stats": self.charm_config["enable-reporting-stats"], "allow_public_rooms_without_auth": self.charm_config["allow-public-rooms-unauthed"], "allow_public_rooms_over_federation": self.charm_config["allow-public-rooms-federated"], "federation_domain_whitelist": self.get_domain_whitelist(), "federation_ip_range_blacklist": self.get_federation_iprange_blacklist(), }, ) if any_file_changed([self.synapse_config]): self.restart_synapse() return True return False
def render_config(self): """Render the configuration for GitLab omnibus.""" if self.pgsql_configured(): templating.render( "gitlab.rb.j2", self.gitlab_config, { "db_adapter": "postgresql", "db_host": self.kv.get("pgsql_host"), "db_port": self.kv.get("pgsql_port"), "db_database": self.kv.get("pgsql_db"), "db_user": self.kv.get("pgsql_user"), "db_password": self.kv.get("pgsql_pass"), "redis_host": self.kv.get("redis_host"), "redis_port": self.kv.get("redis_port"), "http_port": self.charm_config.get("http_port"), "ssh_host": self.get_sshhost(), "ssh_port": self.get_sshport(), "smtp_enabled": self.get_smtp_enabled(), "smtp_server": self.charm_config.get("smtp_server"), "smtp_port": self.charm_config.get("smtp_port"), "smtp_user": self.charm_config.get("smtp_user"), "smtp_password": self.charm_config.get("smtp_password"), "smtp_domain": self.get_smtp_domain(), "smtp_authentication": self.charm_config.get("smtp_authentication"), "smtp_tls": str(self.charm_config.get("smtp_tls")).lower(), "email_from": self.charm_config.get("email_from"), "email_display_name": self.charm_config.get("email_display_name"), "email_reply_to": self.charm_config.get("email_reply_to"), "url": self.get_external_uri(), }, ) elif self.mysql_configured(): templating.render( "gitlab.rb.j2", self.gitlab_config, { "db_adapter": "mysql2", "db_host": self.kv.get("mysql_host"), "db_port": self.kv.get("mysql_port"), "db_database": self.kv.get("mysql_db"), "db_user": self.kv.get("mysql_user"), "db_password": self.kv.get("mysql_pass"), "redis_host": self.kv.get("redis_host"), "redis_port": self.kv.get("redis_port"), "http_port": self.charm_config["http_port"], "ssh_host": self.get_sshhost(), "ssh_port": self.get_sshport(), "smtp_enabled": self.get_smtp_enabled(), "smtp_server": self.charm_config.get("smtp_server"), "smtp_port": self.charm_config.get("smtp_port"), "smtp_user": self.charm_config.get("smtp_user"), "smtp_password": self.charm_config.get("smtp_password"), "smtp_domain": self.get_smtp_domain(), "smtp_authentication": self.charm_config.get("smtp_authentication"), "smtp_tls": str(self.charm_config.get("smtp_tls")).lower(), "email_from": self.charm_config.get("email_from"), "email_display_name": self.charm_config.get("email_display_name"), "email_reply_to": self.charm_config.get("email_reply_to"), "url": self.get_external_uri(), }, ) elif self.legacy_db_configured(): templating.render( "gitlab.rb.j2", self.gitlab_config, { "db_adapter": "mysql2", "db_host": self.kv.get("db_host"), "db_port": self.kv.get("db_port"), "db_database": self.kv.get("db_db"), "db_user": self.kv.get("db_user"), "db_password": self.kv.get("db_pass"), "redis_host": self.kv.get("redis_host"), "redis_port": self.kv.get("redis_port"), "http_port": self.charm_config["http_port"], "ssh_host": self.get_sshhost(), "ssh_port": self.get_sshport(), "smtp_enabled": self.get_smtp_enabled(), "smtp_server": self.charm_config.get("smtp_server"), "smtp_port": self.charm_config.get("smtp_port"), "smtp_user": self.charm_config.get("smtp_user"), "smtp_password": self.charm_config.get("smtp_password"), "smtp_domain": self.get_smtp_domain(), "smtp_authentication": self.charm_config.get("smtp_authentication"), "smtp_tls": str(self.charm_config.get("smtp_tls")).lower(), "email_from": self.charm_config.get("email_from"), "email_display_name": self.charm_config.get("email_display_name"), "email_reply_to": self.charm_config.get("email_reply_to"), "url": self.get_external_uri(), }, ) else: hookenv.status_set( "blocked", "DB configuration is missing. Verify database relations to continue.", ) hookenv.log("Skipping configuration due to missing DB config") return False if any_file_changed([self.gitlab_config]): self.gitlab_reconfigure_run() return True