Exemplo n.º 1
0
def set_active():
    if postgresql.is_running():
        if replication.is_master():
            msg = "Live master"
        elif postgresql.is_primary():
            msg = "Live primary"
        else:
            msg = "Live secondary"
        status_set("active", "{} ({})".format(msg, postgresql.point_version()))
    else:
        # PostgreSQL crashed! Maybe bad configuration we failed to
        # pick up, or maybe a full disk. The admin will need to diagnose.
        status_set("blocked", "PostgreSQL unexpectedly shut down")
Exemplo n.º 2
0
def set_active():
    if postgresql.is_running():
        if replication.is_master():
            msg = 'Live master'
        elif postgresql.is_primary():
            msg = 'Live primary'
        else:
            msg = 'Live secondary'
        status_set('active', '{} ({})'.format(msg, postgresql.point_version()))
    else:
        # PostgreSQL crashed! Maybe bad configuration we failed to
        # pick up, or maybe a full disk. The admin will need to diagnose.
        status_set('blocked', 'PostgreSQL unexpectedly shut down')
Exemplo n.º 3
0
def update_postgresql_crontab():
    config = hookenv.config()
    data = dict(config)

    data['scripts_dir'] = helpers.scripts_dir()
    data['is_master'] = replication.is_master()
    data['is_primary'] = postgresql.is_primary()

    if config['wal_e_storage_uri']:
        data['wal_e_enabled'] = True
        data['wal_e_backup_command'] = wal_e.wal_e_backup_command()
        data['wal_e_prune_command'] = wal_e.wal_e_prune_command()
    else:
        data['wal_e_enabled'] = False

    destination = os.path.join(helpers.cron_dir(), 'juju-postgresql')
    templating.render('postgres.cron.tmpl', destination, data,
                      owner='root', group='postgres',
                      perms=0o640)
Exemplo n.º 4
0
def update_postgresql_crontab():
    config = hookenv.config()
    data = dict(config)

    data["scripts_dir"] = helpers.scripts_dir()
    data["is_master"] = replication.is_master()
    data["is_primary"] = postgresql.is_primary()

    if config["wal_e_storage_uri"]:
        data["wal_e_enabled"] = True
        data["wal_e_backup_command"] = wal_e.wal_e_backup_command()
        data["wal_e_prune_command"] = wal_e.wal_e_prune_command()
    else:
        data["wal_e_enabled"] = False

    destination = os.path.join(helpers.cron_dir(), "juju-postgresql")
    templating.render(
        "postgres.cron.tmpl",
        destination,
        data,
        owner="root",
        group="postgres",
        perms=0o640,
    )
Exemplo n.º 5
0
def db_relation_common(rel):
    """Publish unit specific relation details."""
    local = rel.local
    if "database" not in local:
        return  # Not yet ready.

    # Version number, allowing clients to adjust or block if their
    # expectations are not met.
    local["version"] = postgresql.version()

    # Calculate the state of this unit. 'standalone' will disappear
    # in a future version of this interface, as this state was
    # only needed to deal with race conditions now solved by
    # Juju leadership. We check for is_primary() rather than
    # the postgresql.replication.is_master reactive state to
    # publish the correct state when we are using manual replication
    # (there might be multiple independent masters, possibly useful for
    # sharding, or perhaps this is a multi master BDR setup).
    if postgresql.is_primary():
        if reactive.helpers.is_state("postgresql.replication.has_peers"):
            local["state"] = "master"
        else:
            local["state"] = "standalone"
    else:
        local["state"] = "hot standby"

    # Host is the private ip address, but this might change and
    # become the address of an attached proxy or alternative peer
    # if this unit is in maintenance.
    local["host"] = ingress_address(local.relname, local.relid)

    # Port will be 5432, unless the user has overridden it or
    # something very weird happened when the packages where installed.
    local["port"] = str(postgresql.port())

    # The list of remote units on this relation granted access.
    # This is to avoid the race condition where a new client unit
    # joins an existing client relation and sees valid credentials,
    # before we have had a chance to grant it access.
    local["allowed-units"] = " ".join(unit for unit, relinfo in rel.items() if len(incoming_addresses(relinfo)) > 0)

    # The list of IP address ranges on this relation granted access.
    # This will replace allowed-units, which does not work with cross
    # model ralations due to the anonymization of the external client.
    local["allowed-subnets"] = ",".join(
        sorted({r: True for r in chain(*[incoming_addresses(relinfo) for relinfo in rel.values()])}.keys())
    )

    # v2 protocol. Publish connection strings for this unit and its peers.
    # Clients should use these connection strings in favour of the old
    # host, port, database settings. A single proxy unit can thus
    # publish several end points to clients.
    master = replication.get_master()
    if replication.is_master():
        master_relinfo = local
    else:
        master_relinfo = rel.peers.get(master)
    local["master"] = relinfo_to_cs(master_relinfo)
    if rel.peers:
        all_relinfo = rel.peers.values()
    all_relinfo = list(rel.peers.values()) if rel.peers else []
    all_relinfo.append(rel.local)
    standbys = filter(
        None,
        [relinfo_to_cs(relinfo) for relinfo in all_relinfo if relinfo.unit != master],
    )
    local["standbys"] = "\n".join(sorted(standbys)) or None
Exemplo n.º 6
0
def upgrade_charm():
    workloadstatus.status_set("maintenance", "Upgrading charm")

    rels = context.Relations()

    # The master is now appointed by the leader.
    if hookenv.is_leader():
        master = replication.get_master()
        if not master:
            master = hookenv.local_unit()
            peer_rel = helpers.get_peer_relation()
            if peer_rel:
                for peer_relinfo in peer_rel.values():
                    if peer_relinfo.get("state") == "master":
                        master = peer_relinfo.unit
                        break
            hookenv.log("Discovered {} is the master".format(master))
            leadership.leader_set(master=master)

    # The name of this crontab has changed. It will get regenerated.
    if os.path.exists("/etc/cron.d/postgresql"):
        hookenv.log("Removing old crontab")
        os.unlink("/etc/cron.d/postgresql")

    # Older generated usernames where generated from the relation id,
    # and really old ones contained random components. This made it
    # problematic to restore a database into a fresh environment,
    # because the new usernames would not match the old usernames and
    # done of the database permissions would match. We now generate
    # usernames using just the client service name, so restoring a
    # database into a fresh environment will work provided the service
    # names match. We want to update the old usernames in upgraded
    # services to the new format to improve their disaster recovery
    # story.
    for relname, superuser in [("db", False), ("db-admin", True)]:
        for client_rel in rels[relname].values():
            hookenv.log("Migrating database users for {}".format(client_rel))
            password = client_rel.local.get("password", host.pwgen())
            old_username = client_rel.local.get("user")
            new_username = postgresql.username(client_rel.service, superuser,
                                               False)
            if old_username and old_username != new_username:
                migrate_user(old_username, new_username, password, superuser)
                client_rel.local["user"] = new_username
                client_rel.local["password"] = password

            old_username = client_rel.local.get("schema_user")
            if old_username and old_username != new_username:
                migrate_user(old_username, new_username, password, superuser)
                client_rel.local["schema_user"] = new_username
                client_rel.local["schema_password"] = password

    # Admin relations used to get 'all' published as the database name,
    # which was bogus.
    for client_rel in rels["db-admin"].values():
        if client_rel.local.get("database") == "all":
            client_rel.local["database"] = client_rel.service

    # Reconfigure PostgreSQL and republish client relations.
    reactive.remove_state("postgresql.cluster.configured")
    reactive.remove_state("postgresql.client.published")

    # Don't recreate the cluster.
    reactive.set_state("postgresql.cluster.created")

    # Set the postgresql.replication.cloned flag, so we don't rebuild
    # standbys when upgrading the charm from a pre-reactive version.
    reactive.set_state("postgresql.replication.cloned")

    # Publish which node we are following
    peer_rel = helpers.get_peer_relation()
    if peer_rel and "following" not in peer_rel.local:
        following = unitdata.kv().get("postgresql.replication.following")
        if following is None and not replication.is_master():
            following = replication.get_master()
        peer_rel.local["following"] = following

    # Ensure storage that was attached but ignored is no longer ignored.
    if not reactive.is_state("postgresql.storage.pgdata.attached"):
        if hookenv.storage_list("pgdata"):
            storage.attach()

    # Ensure client usernames and passwords match leader settings.
    for relname in ("db", "db-admin"):
        for rel in rels[relname].values():
            del rel.local["user"]
            del rel.local["password"]

    # Ensure the configure version is cached.
    postgresql.version()

    # Skip checks for pre-existing databases, as that has already happened.
    reactive.set_state("postgresql.cluster.initial-check")

    # Reinstall support scripts
    reactive.remove_state("postgresql.cluster.support-scripts")

    # Ensure that systemd is managing the PostgreSQL process
    if host.init_is_systemd(
    ) and not reactive.is_flag_set("postgresql.upgrade.systemd"):
        reactive.set_flag("postgresql.upgrade.systemd")
        if reactive.is_flag_set("postgresql.cluster.is_running"):
            hookenv.log("Restarting PostgreSQL under systemd", hookenv.WARNING)
            reactive.clear_flag("postgresql.cluster.is_running")
            postgresql.stop_pgctlcluster()

    # Update the PGDG source, in case the signing key has changed.
    config = hookenv.config()
    if config["pgdg"]:
        service.add_pgdg_source()