def configure_ntpmon(): """ Reconfigure ntpmon - does nothing at present """ log('configuring ntpmon') set_flag('ntpmon.configured') clear_flag('ntpmon.started')
def install_ntpmon(): """ Install package dependencies, source files, and startup configuration. """ install_dir = layer.options.get('ntpmon', 'install-dir') service_name = layer.options.get('ntpmon', 'service-name') using_systemd = host.init_is_systemd() if install_dir: log('installing ntpmon') host.mkdir(os.path.dirname(install_dir)) host.rsync('src/', '{}/'.format(install_dir)) if service_name: if using_systemd: systemd_config = '/etc/systemd/system/' + service_name + '.service' log('installing systemd service: {}'.format(service_name)) with open(systemd_config, 'w') as conffile: conffile.write(templating.render('src/' + service_name + '.systemd', layer.options.get('ntpmon'))) subprocess.call(['systemd', 'daemon-reload']) else: upstart_config = '/etc/init/' + service_name + '.conf' log('installing upstart service: {}'.format(service_name)) with open(upstart_config, 'w') as conffile: conffile.write(templating.render('src/' + service_name + '.upstart', layer.options.get('ntpmon'))) set_flag('ntpmon.installed') clear_flag('ntpmon.configured')
def restart(): helpers.status_set('maintenance', 'Stopping Cassandra') if not cassandra.stop(): helpers.status_set('blocked', 'Failed to stop Cassandra') return False auth_enabled = cassandra.config()['authenticator'] != 'allowallauthenticator' reactive.toggle_flag('cassandra.auth.enabled', auth_enabled) # cassandra.remount() # TODO: Storage support cassandra.ensure_all_database_directories() helpers.status_set('maintenance', 'Starting Cassandra') if not cassandra.start(): helpers.status_set('blocked', 'Failed to start Cassandra') return False timeout = time.time() + 300 for _ in cassandra.backoff("Cassandra to startup"): if cassandra.is_cassandra_running(): reactive.clear_flag('cassandra.needs_restart') return True if time.time() > timeout: break helpers.status_set('blocked', 'Cassandra failed to startup') return False
def set_worker_config(config): '''List with available configs https://docs.confluent.io/current/connect/allconfigs.html#connect-allconfigs ''' unitdata.kv().set('worker.properties', config) clear_flag('kafka-connect-base.configured')
def broken(self): clear_flag(self.expand_name('available'))
def upgrade_charm(): clear_flag("charm.started")
def remove_ready(self): clear_flag(self.expand_name("ready"))
def upgrade_charm(): workloadstatus.status_set("maintenance", "Upgrading charm") rels = context.Relations() # The master is now appointed by the leader. if hookenv.is_leader(): master = replication.get_master() if not master: master = hookenv.local_unit() peer_rel = helpers.get_peer_relation() if peer_rel: for peer_relinfo in peer_rel.values(): if peer_relinfo.get("state") == "master": master = peer_relinfo.unit break hookenv.log("Discovered {} is the master".format(master)) leadership.leader_set(master=master) # The name of this crontab has changed. It will get regenerated. if os.path.exists("/etc/cron.d/postgresql"): hookenv.log("Removing old crontab") os.unlink("/etc/cron.d/postgresql") # Older generated usernames where generated from the relation id, # and really old ones contained random components. This made it # problematic to restore a database into a fresh environment, # because the new usernames would not match the old usernames and # done of the database permissions would match. We now generate # usernames using just the client service name, so restoring a # database into a fresh environment will work provided the service # names match. We want to update the old usernames in upgraded # services to the new format to improve their disaster recovery # story. for relname, superuser in [("db", False), ("db-admin", True)]: for client_rel in rels[relname].values(): hookenv.log("Migrating database users for {}".format(client_rel)) password = client_rel.local.get("password", host.pwgen()) old_username = client_rel.local.get("user") new_username = postgresql.username(client_rel.service, superuser, False) if old_username and old_username != new_username: migrate_user(old_username, new_username, password, superuser) client_rel.local["user"] = new_username client_rel.local["password"] = password old_username = client_rel.local.get("schema_user") if old_username and old_username != new_username: migrate_user(old_username, new_username, password, superuser) client_rel.local["schema_user"] = new_username client_rel.local["schema_password"] = password # Admin relations used to get 'all' published as the database name, # which was bogus. for client_rel in rels["db-admin"].values(): if client_rel.local.get("database") == "all": client_rel.local["database"] = client_rel.service # Reconfigure PostgreSQL and republish client relations. reactive.remove_state("postgresql.cluster.configured") reactive.remove_state("postgresql.client.published") # Don't recreate the cluster. reactive.set_state("postgresql.cluster.created") # Set the postgresql.replication.cloned flag, so we don't rebuild # standbys when upgrading the charm from a pre-reactive version. reactive.set_state("postgresql.replication.cloned") # Publish which node we are following peer_rel = helpers.get_peer_relation() if peer_rel and "following" not in peer_rel.local: following = unitdata.kv().get("postgresql.replication.following") if following is None and not replication.is_master(): following = replication.get_master() peer_rel.local["following"] = following # Ensure storage that was attached but ignored is no longer ignored. if not reactive.is_state("postgresql.storage.pgdata.attached"): if hookenv.storage_list("pgdata"): storage.attach() # Ensure client usernames and passwords match leader settings. for relname in ("db", "db-admin"): for rel in rels[relname].values(): del rel.local["user"] del rel.local["password"] # Ensure the configure version is cached. postgresql.version() # Skip checks for pre-existing databases, as that has already happened. reactive.set_state("postgresql.cluster.initial-check") # Reinstall support scripts reactive.remove_state("postgresql.cluster.support-scripts") # Ensure that systemd is managing the PostgreSQL process if host.init_is_systemd( ) and not reactive.is_flag_set("postgresql.upgrade.systemd"): reactive.set_flag("postgresql.upgrade.systemd") if reactive.is_flag_set("postgresql.cluster.is_running"): hookenv.log("Restarting PostgreSQL under systemd", hookenv.WARNING) reactive.clear_flag("postgresql.cluster.is_running") postgresql.stop_pgctlcluster() # Update the PGDG source, in case the signing key has changed. config = hookenv.config() if config["pgdg"]: service.add_pgdg_source()
def upgrade_charm(): reactive.clear_flag('cassandra.config.validated') reactive.clear_flag('cassandra.configured') reactive.clear_flag('cassandra.ports.opened')
def upgrade_charm(): log('Forcing NTPmon upgrade on upgrade-charm') clear_flag('ntpmon.installed')
def check_ready(self): # My middle name is ready. No, that doesn't sound right. # I eat ready for breakfast. toggle_flag(self.expand_name('ready'), self.is_ready) clear_flag(self.expand_name('changed'))
def disable_openstack(): reactive.clear_flag('charm.ovn-chassis.enable-openstack')
def update_image(): clear_flag('charm.kubeflow-tf-serving.started')
def update_model(): clear_flag('charm.kubeflow-tf-serving.has-model')
def upgrade_charm(): reactive.clear_flag('cassandra.nrpe.installed')
def trigger_publish(): reactive.clear_flag('cassandra.client.published') reactive.clear_flag('endpoint.database.changed') reactive.clear_flag('endpoint.database-admin.changed')
def check_requests(self): toggle_flag(self.expand_name('requests-pending'), len(self.all_requests) > 0) clear_flag(self.expand_name('changed'))
def secrets_plugin_configure(): hookenv.log('Received information about secrets plugin', level=hookenv.INFO) reactive.clear_flag('secrets.new-plugin') reactive.set_flag('secrets.available') reactive.set_flag('config.changed')
def chrony_conf_updated(): log('{} changed - checking if ntpmon needs starting'.format(CHRONY_CONF)) clear_flag('ntpmon.started')
def upgrade_charm(): clear_flag('charm.started')
def remove_ready(self): clear_flag(self.expand_name('ready'))
def upgrade_charm(): reactive.clear_flag('cassandra.installed') reactive.clear_flag('cassandra.swapoff.done') reactive.clear_flag('cassandra.kernelsettings.done') reactive.clear_flag("cassandra.limits.done") reactive.clear_flag('cassandra.crontab.installed') reactive.clear_flag('cassandra.io_schedulers.done') reactive.clear_flag('cassandra.version.set') cassandra.config()['last_version_update'] = 0
def restart_app(): host.service_reload('apache2') clear_flag('restart-app') status_set('active', 'Apache/gdb ready and concrete')
def _request(self, keyvals): alphabet = string.ascii_letters + string.digits nonce = ''.join(random.choice(alphabet) for _ in range(8)) self._to_publish.update(keyvals) self._to_publish['requested'] = nonce clear_flag(self.expand_name('ready'))
def update_image(): clear_flag('charm.started')
def disable_snap(): hookenv.status_set('maintenance', 'Turning off controller') with lock_snap_context(): check_call(['maas', 'config'] + get_snap_args('none', None)) clear_flag('maas.snap.init')
def check_ready(self): toggle_flag(self.expand_name("ready"), self.is_ready) clear_flag(self.expand_name("changed"))
def write_db_config(pgsql): hookenv.status_set('maintenance', 'Configuring connection to database') with lock_snap_context(): check_call(['maas', 'config'] + get_snap_args('region', pgsql)) clear_flag('db.master.changed') hookenv.status_set('active', 'Running')
def update_image(): clear_flag("charm.started")
def new_release(self): set_flag(self.expand_name('endpoint.{endpoint_name}.new-release')) clear_flag( self.expand_name('endpoint.{endpoint_name}.changed.release'))
def changed(self): if any(unit.received['port'] for unit in self.all_joined_units): set_flag(self.expand_name('available')) clear_flag(self.expand_name('changed'))
def changed(self): set_flag(self.expand_name('endpoint.{endpoint_name}.connected')) clear_flag(self.expand_name('endpoint.{endpoint_name}.changed'))
def joined(self): reactive.clear_flag(self.expand_name('{endpoint_name}.available')) reactive.set_flag(self.expand_name('{endpoint_name}.connected')) self.set_ingress_address()
def broken(self): clear_flag(self.expand_name('endpoint.{endpoint_name}.connected')) clear_flag(self.expand_name('endpoint.{endpoint_name}.departed'))
def set_base_image(image): unitdata.kv().set('docker-image', image) clear_flag('kafka-connect-base.configured')
def set_http_relation_data(): endpoint = endpoint_from_flag('http.available') endpoint.configure(SENTRY_HTTP_PORT) clear_flag('http.available')
def clear_changed(self): '''Clear changed flags immediately after triggers have been applied''' reactive.clear_flag(self.expand_name('endpoint.{endpoint_name}.changed.bootstrapped'))
def update_web_override(): render_web_override() call(['systemctl', 'daemon-reload']) start_restart(SENTRY_WEB_SERVICE) clear_flag('sentry.web-override.needs-rendering')
def ntp_conf_updated(): log('{} changed - checking if ntpmon needs starting'.format(NTP_CONF)) clear_flag('ntpmon.started')
def set_nrpe_flag(): clear_flag('sentry.nagios-setup.complete')
def check_admin_pass(): admin_pass = config()['admin-pass'] if admin_pass: set_flag('admin-pass') else: clear_flag('admin-pass')
def restart_app(): host.service_reload('apache2') clear_flag('restart-app') status_set('active', 'app ready')
def departed(self): reactive.clear_flag(self.expand_name('{endpoint_name}.connected'))
def _clear_flag(self, flag): clear_flag(self.expand_name(flag))