def handle_storage_relation(dead_chicken): # Remove this once Juju storage is no longer experiemental and # everyone has had a chance to upgrade. data_rels = context.Relations()["data"] if len(data_rels) > 1: helpers.status_set("blocked", "Too many relations to the storage subordinate") return elif data_rels: relid, rel = list(data_rels.items())[0] rel.local["mountpoint"] = external_volume_mount if needs_remount(): reactive.set_state("postgresql.storage.needs_remount") apt.queue_install(["rsync"]) # Migrate any data when we can restart. coordinator.acquire("restart")
def config_changed(): if reactive.is_flag_set('leadership.is_leader'): with charm.provide_charm_instance() as instance: instance.render_all_configs() instance.wait_until_cluster_available() if reactive.is_flag_set('config.changed.auto-rejoin-tries'): instance.set_cluster_option("autoRejoinTries", instance.options.auto_rejoin_tries) else: with charm.provide_charm_instance() as instance: try: instance.wait_until_cluster_available() except Exception: ch_core.hookenv.log("Cluster was not availble as expected.", "WARNING") ch_core.hookenv.log("Non-leader requst to restart.", "DEBUG") coordinator.acquire('config-changed-restart')
def handle_storage_relation(dead_chicken): # Remove this once Juju storage is no longer experiemental and # everyone has had a chance to upgrade. data_rels = context.Relations()['data'] if len(data_rels) > 1: helpers.status_set('blocked', 'Too many relations to the storage subordinate') return elif data_rels: relid, rel = list(data_rels.items())[0] rel.local['mountpoint'] = external_volume_mount if needs_remount(): reactive.set_state('postgresql.storage.needs_remount') apt.queue_install(['rsync']) # Migrate any data when we can restart. coordinator.acquire('restart')
def attach(): mount = hookenv.storage_get()['location'] pgdata = os.path.join(mount, postgresql.version(), 'main') unitdata.kv().set(pgdata_mount_key, mount) unitdata.kv().set(pgdata_path_key, pgdata) hookenv.log('PGDATA storage attached at {}'.format(mount)) # Never happens with Juju 2.0 as we can't reuse an old mount. This # check is here for the future. existingdb = os.path.exists(pgdata) required_space = shutil.disk_usage(postgresql.data_dir()).used free_space = shutil.disk_usage(mount).free if required_space > free_space and not existingdb: hookenv.status_set('blocked', 'Not enough free space in pgdata storage') else: apt.queue_install(['rsync']) coordinator.acquire('restart') reactive.set_state('postgresql.storage.pgdata.attached')
def request_restart(): if coordinator.acquire("restart"): hookenv.log("Restart permission granted") else: hookenv.log("Restart permission requested")
def request_restart_lock_for_repair(): coordinator.acquire('restart')
def request_restart_lock(): coordinator.acquire('restart')
def request_restart(): if coordinator.acquire('restart'): hookenv.log('Restart permission granted') else: hookenv.log('Restart permission requested')
def need_clone_lock(): # We need to grab the restart lock before cloning, to ensure # that the master is not restarted during the process. coordinator.acquire("restart")