Exemple #1
0
def get_peer_relation():
    """Return the peer class:`context.Relation`

    We can't use context.Relations().peer to find the peer relation,
    because with multiple peer relations the one it returns is unstable.
    """
    for rel in context.Relations()["replication"].values():
        return rel
Exemple #2
0
def mirror_master():
    """A standby mirrors client connection details from the master.

    The master pings its peers using the peer relation to ensure a hook
    is invoked and this handler called after the credentials have been
    published.
    """
    rels = context.Relations()
    for relname in CLIENT_RELNAMES:
        for rel in rels[relname].values():
            db_relation_mirror(rel)
            db_relation_common(rel)
    reactive.set_state("postgresql.client.published")
    # Now we know the username and database, ensure pg_hba.conf gets
    # regenerated to match and the clients can actually login.
    reactive.remove_state("postgresql.cluster.configured")
def handle_storage_relation(dead_chicken):
    # Remove this once Juju storage is no longer experiemental and
    # everyone has had a chance to upgrade.
    data_rels = context.Relations()["data"]
    if len(data_rels) > 1:
        helpers.status_set("blocked",
                           "Too many relations to the storage subordinate")
        return
    elif data_rels:
        relid, rel = list(data_rels.items())[0]
        rel.local["mountpoint"] = external_volume_mount

    if needs_remount():
        reactive.set_state("postgresql.storage.needs_remount")
        apt.queue_install(["rsync"])
        # Migrate any data when we can restart.
        coordinator.acquire("restart")
Exemple #4
0
def master_provides():
    """The master publishes client connection details.

    Note that this may not be happening in the -relation-changed
    hook, as this unit may not have been the master when the relation
    was joined.
    """
    rels = context.Relations()
    for relname in CLIENT_RELNAMES:
        for rel in rels[relname].values():
            if len(rel):
                db_relation_master(rel)
                db_relation_common(rel)
                ensure_db_relation_resources(rel)
    reactive.set_state("postgresql.client.published")
    # Now we know the username and database, ensure pg_hba.conf gets
    # regenerated to match and the clients can actually login.
    reactive.remove_state("postgresql.cluster.configured")
Exemple #5
0
def update_pg_hba_conf():
    # grab the needed current state
    config = hookenv.config()
    rels = context.Relations()
    path = postgresql.pg_hba_conf_path()
    with open(path, "r") as f:
        pg_hba = f.read()

    # generate the new state
    pg_hba_content = generate_pg_hba_conf(pg_hba, config, rels)

    # write out the new state
    helpers.rewrite(path, pg_hba_content)

    # Use @when_file_changed for this when Issue #44 is resolved.
    if reactive.helpers.any_file_changed([path]) and reactive.is_state("postgresql.cluster.is_running"):
        hookenv.log("pg_hba.conf has changed. PostgreSQL needs reload.")
        reactive.set_state("postgresql.cluster.needs_reload")
Exemple #6
0
def set_client_passwords():
    """The leader chooses passwords for client connections.

    Storing the passwords in the leadership settings is the most
    reliable way of distributing them to peers.
    """
    raw = leadership.leader_get("client_passwords")
    pwds = json.loads(raw) if raw else {}
    rels = context.Relations()
    updated = False
    for relname in CLIENT_RELNAMES:
        for rel in rels[relname].values():
            superuser, replication = _credential_types(rel)
            for remote in rel.values():
                user = postgresql.username(remote.service, superuser=superuser, replication=replication)
                if user not in pwds:
                    password = host.pwgen()
                    pwds[user] = password
                    updated = True
    if updated:
        leadership.leader_set(client_passwords=json.dumps(pwds, sort_keys=True))
    reactive.set_state("postgresql.client.passwords_set")
Exemple #7
0
def ensure_viable_postgresql_conf(opts):
    def force(**kw):
        for k, v in kw.items():
            if opts.get(k) != v:
                hookenv.log("Setting {} to {}".format(k, v), DEBUG)
                opts[k] = v

    config = hookenv.config()
    rels = context.Relations()

    # Number of standby units - count peers and 'master' relations.
    num_standbys = len(helpers.get_peer_relation() or {})
    for rel in rels["master"].values():
        num_standbys += len(rel)

    num_clients = 0
    for rel in list(rels["db"]) + list(rels["db-admin"]):
        num_clients += len(rel)

    # Even without replication, replication slots get used by
    # pg_basebackup(1). Bump up max_wal_senders so things work. It is
    # cheap, so perhaps we should just pump it to several thousand.
    min_wal_senders = num_standbys * 2 + 5
    if min_wal_senders > int(opts.get("max_wal_senders", 0)):
        force(max_wal_senders=min_wal_senders)

    # We used to calculate a minimum max_connections here, ensuring
    # that we had at least one per client and enough for replication
    # and backups. It wasn't much use though, as the major variable
    # is not the number of clients but how many connections the
    # clients open (connection pools of 20 or more are not uncommon).
    # lp:1594667 required the calculation to be synchronized, or just
    # removed. So removed to avoid complexity for dubious gains.
    #
    # max_connections. One per client unit, plus replication.
    # max_wal_senders = int(opts.get('max_wal_senders', 0))
    # assert max_wal_senders > 0
    # min_max_connections = max_wal_senders + max(1, num_clients)
    #
    min_max_connections = 100
    if min_max_connections > int(opts.get("max_connections", 0)):
        force(max_connections=min_max_connections)

    # We want 'hot_standby' at a minimum, as it lets us run
    # pg_basebackup() and it is recommended over the more
    # minimal 'archive'. Is it worth only enabling the higher-still
    # 'logical' level only when necessary? How do we detect that?
    force(hot_standby=True)
    if postgresql.has_version("9.4"):
        force(wal_level="logical")
    else:
        force(wal_level="hot_standby")

    # Having two config options for the one setting is confusing. Perhaps
    # we should deprecate this.
    if num_standbys and (int(config["replicated_wal_keep_segments"]) > int(opts.get("wal_keep_segments", 0))):
        force(wal_keep_segments=config["replicated_wal_keep_segments"])

    # Log shipping with WAL-E.
    if config["wal_e_storage_uri"]:
        force(archive_mode="on")  # Boolean pre-9.5, enum 9.5+
        force(archive_command=wal_e.wal_e_archive_command())

    # Log destinations for syslog. This charm only supports standard
    # Debian logging, or Debian + syslog. This will grow more complex in
    # the future, as the local logs are redundant if you are using syslog
    # for log aggregation, and we will want to add csvlog because it is
    # so much easier to parse.
    if context.Relations()["syslog"]:
        force(
            log_destination="stderr,syslog",
            syslog_ident=hookenv.local_unit().replace("/", "_"),
        )
Exemple #8
0
def upgrade_charm():
    workloadstatus.status_set("maintenance", "Upgrading charm")

    rels = context.Relations()

    # The master is now appointed by the leader.
    if hookenv.is_leader():
        master = replication.get_master()
        if not master:
            master = hookenv.local_unit()
            peer_rel = helpers.get_peer_relation()
            if peer_rel:
                for peer_relinfo in peer_rel.values():
                    if peer_relinfo.get("state") == "master":
                        master = peer_relinfo.unit
                        break
            hookenv.log("Discovered {} is the master".format(master))
            leadership.leader_set(master=master)

    # The name of this crontab has changed. It will get regenerated.
    if os.path.exists("/etc/cron.d/postgresql"):
        hookenv.log("Removing old crontab")
        os.unlink("/etc/cron.d/postgresql")

    # Older generated usernames where generated from the relation id,
    # and really old ones contained random components. This made it
    # problematic to restore a database into a fresh environment,
    # because the new usernames would not match the old usernames and
    # done of the database permissions would match. We now generate
    # usernames using just the client service name, so restoring a
    # database into a fresh environment will work provided the service
    # names match. We want to update the old usernames in upgraded
    # services to the new format to improve their disaster recovery
    # story.
    for relname, superuser in [("db", False), ("db-admin", True)]:
        for client_rel in rels[relname].values():
            hookenv.log("Migrating database users for {}".format(client_rel))
            password = client_rel.local.get("password", host.pwgen())
            old_username = client_rel.local.get("user")
            new_username = postgresql.username(client_rel.service, superuser,
                                               False)
            if old_username and old_username != new_username:
                migrate_user(old_username, new_username, password, superuser)
                client_rel.local["user"] = new_username
                client_rel.local["password"] = password

            old_username = client_rel.local.get("schema_user")
            if old_username and old_username != new_username:
                migrate_user(old_username, new_username, password, superuser)
                client_rel.local["schema_user"] = new_username
                client_rel.local["schema_password"] = password

    # Admin relations used to get 'all' published as the database name,
    # which was bogus.
    for client_rel in rels["db-admin"].values():
        if client_rel.local.get("database") == "all":
            client_rel.local["database"] = client_rel.service

    # Reconfigure PostgreSQL and republish client relations.
    reactive.remove_state("postgresql.cluster.configured")
    reactive.remove_state("postgresql.client.published")

    # Don't recreate the cluster.
    reactive.set_state("postgresql.cluster.created")

    # Set the postgresql.replication.cloned flag, so we don't rebuild
    # standbys when upgrading the charm from a pre-reactive version.
    reactive.set_state("postgresql.replication.cloned")

    # Publish which node we are following
    peer_rel = helpers.get_peer_relation()
    if peer_rel and "following" not in peer_rel.local:
        following = unitdata.kv().get("postgresql.replication.following")
        if following is None and not replication.is_master():
            following = replication.get_master()
        peer_rel.local["following"] = following

    # Ensure storage that was attached but ignored is no longer ignored.
    if not reactive.is_state("postgresql.storage.pgdata.attached"):
        if hookenv.storage_list("pgdata"):
            storage.attach()

    # Ensure client usernames and passwords match leader settings.
    for relname in ("db", "db-admin"):
        for rel in rels[relname].values():
            del rel.local["user"]
            del rel.local["password"]

    # Ensure the configure version is cached.
    postgresql.version()

    # Skip checks for pre-existing databases, as that has already happened.
    reactive.set_state("postgresql.cluster.initial-check")

    # Reinstall support scripts
    reactive.remove_state("postgresql.cluster.support-scripts")

    # Ensure that systemd is managing the PostgreSQL process
    if host.init_is_systemd(
    ) and not reactive.is_flag_set("postgresql.upgrade.systemd"):
        reactive.set_flag("postgresql.upgrade.systemd")
        if reactive.is_flag_set("postgresql.cluster.is_running"):
            hookenv.log("Restarting PostgreSQL under systemd", hookenv.WARNING)
            reactive.clear_flag("postgresql.cluster.is_running")
            postgresql.stop_pgctlcluster()

    # Update the PGDG source, in case the signing key has changed.
    config = hookenv.config()
    if config["pgdg"]:
        service.add_pgdg_source()