def remount():
    if reactive.is_state('postgresql.cluster.is_running'):
        # Attempting this while PostgreSQL is live would be really, really bad.
        service.stop()

    old_data_dir = postgresql.data_dir()
    new_data_dir = os.path.join(external_volume_mount, 'postgresql',
                                postgresql.version(), 'main')
    backup_data_dir = '{}-{}'.format(old_data_dir, int(time.time()))

    if os.path.isdir(new_data_dir):
        hookenv.log('Remounting existing database at {}'.format(new_data_dir),
                    WARNING)
    else:
        status_set('maintenance',
                   'Migrating data from {} to {}'.format(old_data_dir,
                                                         new_data_dir))
        helpers.makedirs(new_data_dir, mode=0o770,
                         user='******', group='postgres')
        try:
            rsync_cmd = ['rsync', '-av',
                         old_data_dir + '/',
                         new_data_dir + '/']
            hookenv.log('Running {}'.format(' '.join(rsync_cmd)), DEBUG)
            subprocess.check_call(rsync_cmd)
        except subprocess.CalledProcessError:
            status_set('blocked',
                       'Failed to sync data from {} to {}'
                       ''.format(old_data_dir, new_data_dir))
            return

    os.replace(old_data_dir, backup_data_dir)
    os.symlink(new_data_dir, old_data_dir)
    fix_perms(new_data_dir)
    reactive.remove_state('postgresql.storage.needs_remount')
Exemple #2
0
def block_on_invalid_config():
    """
    Sanity check charm configuration, blocking the unit if we have
    bogus bogus config values or config changes the charm does not
    yet (or cannot) support.

    We need to do this before the main reactive loop (@preflight),
    or we risk failing to run handlers that rely on @when_file_changed,
    reactive.helpers.data_changed or similar state tied to
    charmhelpers.core.unitdata transactions.
    """
    valid = True
    config = hookenv.config()

    enums = dict(version=set(['', '9.1', '9.2', '9.3', '9.4', '9.5', '9.6']),
                 package_status=set(['install', 'hold']))
    for key, vals in enums.items():
        config[key] = config[key].lower()  # Rewrite to lower case.
        if config[key] not in vals:
            valid = False
            status_set('blocked',
                       'Invalid value for {} ({!r})'.format(key, config[key]))

    unchangeable_config = ['locale', 'encoding', 'pgdg', 'manual_replication']
    if config._prev_dict is not None:
        for name in unchangeable_config:
            if config.changed(name):
                config[name] = config.previous(name)
                valid = False
                status_set('blocked',
                           'Cannot change {!r} after install '
                           '(from {!r} to {!r}).'
                           .format(name, config.previous(name),
                                   config.get('name')))
        if config.changed('version') and (config.previous('version') !=
                                          postgresql.version()):
            valid = False
            status_set('blocked',
                       'Cannot change version after install '
                       '(from {!r} to {!r}).'
                       .format(config.previous('version'), config['version']))
            config['version'] = config.previous('version')
            valid = False

    metrics_target = config['metrics_target'].strip()
    if metrics_target:
        if ':' not in metrics_target:
            status_set('blocked',
                       'Invalid metrics_target {}'.format(metrics_target))
            valid = False
        metrics_interval = config['metrics_sample_interval']
        if not metrics_interval:
            status_set('blocked',
                       'metrics_sample_interval is required when '
                       'metrics_target is set')
            valid = False

    if not valid:
        raise SystemExit(0)
    def test_version(self, codename, config):
        # Explicit version in config.
        config.return_value = {"version": sentinel.version}
        self.assertEqual(postgresql.version(), sentinel.version)

        # Precise fallback
        config.return_value = {}
        codename.return_value = "precise"
        self.assertEqual(postgresql.version(), "9.1")

        # Trusty fallback
        codename.return_value = "trusty"
        self.assertEqual(postgresql.version(), "9.3")

        # No other fallbacks, yet.
        codename.return_value = "whatever"
        with self.assertRaises(KeyError):
            postgresql.version()
Exemple #4
0
    def test_version(self, codename, config, lsclusters_version):
        # Installed version
        lsclusters_version.return_value = '9.5'
        postgresql.clear_version_cache()
        self.assertEqual(postgresql.version(), '9.5')
        lsclusters_version.return_value = None

        # Explicit version in config.
        config.return_value = {'version': '23'}
        postgresql.clear_version_cache()
        self.assertEqual(postgresql.version(), '23')

        config.return_value = {'version': ''}

        # Xenial default
        codename.return_value = 'xenial'
        postgresql.clear_version_cache()
        self.assertEqual(postgresql.version(), '9.5')

        # Bionic default
        codename.return_value = 'bionic'
        postgresql.clear_version_cache()
        self.assertEqual(postgresql.version(), '10')

        # No other fallbacks, yet.
        codename.return_value = 'whatever'
        postgresql.clear_version_cache()
        with self.assertRaises(NotImplementedError):
            postgresql.version()
Exemple #5
0
def db_relation_common(rel):
    """Publish unit specific relation details."""
    local = rel.local
    if "database" not in local:
        return  # Not yet ready.

    # Version number, allowing clients to adjust or block if their
    # expectations are not met.
    local["version"] = postgresql.version()

    # Calculate the state of this unit. 'standalone' will disappear
    # in a future version of this interface, as this state was
    # only needed to deal with race conditions now solved by
    # Juju leadership. We check for is_primary() rather than
    # the postgresql.replication.is_master reactive state to
    # publish the correct state when we are using manual replication
    # (there might be multiple independent masters, possibly useful for
    # sharding, or perhaps this is a multi master BDR setup).
    if postgresql.is_primary():
        if reactive.helpers.is_state("postgresql.replication.has_peers"):
            local["state"] = "master"
        else:
            local["state"] = "standalone"
    else:
        local["state"] = "hot standby"

    # Host is the private ip address, but this might change and
    # become the address of an attached proxy or alternative peer
    # if this unit is in maintenance.
    local["host"] = hookenv.unit_private_ip()

    # Port will be 5432, unless the user has overridden it or
    # something very weird happened when the packages where installed.
    local["port"] = str(postgresql.port())

    # The list of remote units on this relation granted access.
    # This is to avoid the race condition where a new client unit
    # joins an existing client relation and sees valid credentials,
    # before we have had a chance to grant it access.
    local["allowed-units"] = " ".join(unit for unit, relinfo in rel.items() if "private-address" in relinfo)
def remount():
    if reactive.is_state("postgresql.cluster.is_running"):
        # Attempting this while PostgreSQL is live would be really, really bad.
        service.stop()

    old_data_dir = postgresql.data_dir()
    new_data_dir = os.path.join(external_volume_mount, "postgresql",
                                postgresql.version(), "main")
    backup_data_dir = "{}-{}".format(old_data_dir, int(time.time()))

    if os.path.isdir(new_data_dir):
        hookenv.log("Remounting existing database at {}".format(new_data_dir),
                    WARNING)
    else:
        status_set(
            "maintenance",
            "Migrating data from {} to {}".format(old_data_dir, new_data_dir),
        )
        helpers.makedirs(new_data_dir,
                         mode=0o770,
                         user="******",
                         group="postgres")
        try:
            rsync_cmd = [
                "rsync", "-av", old_data_dir + "/", new_data_dir + "/"
            ]
            hookenv.log("Running {}".format(" ".join(rsync_cmd)), DEBUG)
            subprocess.check_call(rsync_cmd)
        except subprocess.CalledProcessError:
            status_set(
                "blocked",
                "Failed to sync data from {} to {}"
                "".format(old_data_dir, new_data_dir),
            )
            return

    os.replace(old_data_dir, backup_data_dir)
    os.symlink(new_data_dir, old_data_dir)
    fix_perms(new_data_dir)
    reactive.remove_state("postgresql.storage.needs_remount")
Exemple #7
0
def attach():
    mount = hookenv.storage_get()['location']
    pgdata = os.path.join(mount, postgresql.version(), 'main')
    unitdata.kv().set(pgdata_mount_key, mount)
    unitdata.kv().set(pgdata_path_key, pgdata)

    hookenv.log('PGDATA storage attached at {}'.format(mount))

    # Never happens with Juju 2.0 as we can't reuse an old mount. This
    # check is here for the future.
    existingdb = os.path.exists(pgdata)

    required_space = shutil.disk_usage(postgresql.data_dir()).used
    free_space = shutil.disk_usage(mount).free

    if required_space > free_space and not existingdb:
        hookenv.status_set('blocked',
                           'Not enough free space in pgdata storage')
    else:
        apt.queue_install(['rsync'])
        coordinator.acquire('restart')
        reactive.set_state('postgresql.storage.pgdata.attached')
Exemple #8
0
def db_relation_common(rel):
    """Publish unit specific relation details."""
    local = rel.local
    if "database" not in local:
        return  # Not yet ready.

    # Version number, allowing clients to adjust or block if their
    # expectations are not met.
    local["version"] = postgresql.version()

    # Calculate the state of this unit. 'standalone' will disappear
    # in a future version of this interface, as this state was
    # only needed to deal with race conditions now solved by
    # Juju leadership. We check for is_primary() rather than
    # the postgresql.replication.is_master reactive state to
    # publish the correct state when we are using manual replication
    # (there might be multiple independent masters, possibly useful for
    # sharding, or perhaps this is a multi master BDR setup).
    if postgresql.is_primary():
        if reactive.helpers.is_state("postgresql.replication.has_peers"):
            local["state"] = "master"
        else:
            local["state"] = "standalone"
    else:
        local["state"] = "hot standby"

    # Host is the private ip address, but this might change and
    # become the address of an attached proxy or alternative peer
    # if this unit is in maintenance.
    local["host"] = ingress_address(local.relname, local.relid)

    # Port will be 5432, unless the user has overridden it or
    # something very weird happened when the packages where installed.
    local["port"] = str(postgresql.port())

    # The list of remote units on this relation granted access.
    # This is to avoid the race condition where a new client unit
    # joins an existing client relation and sees valid credentials,
    # before we have had a chance to grant it access.
    local["allowed-units"] = " ".join(unit for unit, relinfo in rel.items() if len(incoming_addresses(relinfo)) > 0)

    # The list of IP address ranges on this relation granted access.
    # This will replace allowed-units, which does not work with cross
    # model ralations due to the anonymization of the external client.
    local["allowed-subnets"] = ",".join(
        sorted({r: True for r in chain(*[incoming_addresses(relinfo) for relinfo in rel.values()])}.keys())
    )

    # v2 protocol. Publish connection strings for this unit and its peers.
    # Clients should use these connection strings in favour of the old
    # host, port, database settings. A single proxy unit can thus
    # publish several end points to clients.
    master = replication.get_master()
    if replication.is_master():
        master_relinfo = local
    else:
        master_relinfo = rel.peers.get(master)
    local["master"] = relinfo_to_cs(master_relinfo)
    if rel.peers:
        all_relinfo = rel.peers.values()
    all_relinfo = list(rel.peers.values()) if rel.peers else []
    all_relinfo.append(rel.local)
    standbys = filter(
        None,
        [relinfo_to_cs(relinfo) for relinfo in all_relinfo if relinfo.unit != master],
    )
    local["standbys"] = "\n".join(sorted(standbys)) or None
Exemple #9
0
def upgrade_charm():
    workloadstatus.status_set("maintenance", "Upgrading charm")

    rels = context.Relations()

    # The master is now appointed by the leader.
    if hookenv.is_leader():
        master = replication.get_master()
        if not master:
            master = hookenv.local_unit()
            peer_rel = helpers.get_peer_relation()
            if peer_rel:
                for peer_relinfo in peer_rel.values():
                    if peer_relinfo.get("state") == "master":
                        master = peer_relinfo.unit
                        break
            hookenv.log("Discovered {} is the master".format(master))
            leadership.leader_set(master=master)

    # The name of this crontab has changed. It will get regenerated.
    if os.path.exists("/etc/cron.d/postgresql"):
        hookenv.log("Removing old crontab")
        os.unlink("/etc/cron.d/postgresql")

    # Older generated usernames where generated from the relation id,
    # and really old ones contained random components. This made it
    # problematic to restore a database into a fresh environment,
    # because the new usernames would not match the old usernames and
    # done of the database permissions would match. We now generate
    # usernames using just the client service name, so restoring a
    # database into a fresh environment will work provided the service
    # names match. We want to update the old usernames in upgraded
    # services to the new format to improve their disaster recovery
    # story.
    for relname, superuser in [("db", False), ("db-admin", True)]:
        for client_rel in rels[relname].values():
            hookenv.log("Migrating database users for {}".format(client_rel))
            password = client_rel.local.get("password", host.pwgen())
            old_username = client_rel.local.get("user")
            new_username = postgresql.username(client_rel.service, superuser,
                                               False)
            if old_username and old_username != new_username:
                migrate_user(old_username, new_username, password, superuser)
                client_rel.local["user"] = new_username
                client_rel.local["password"] = password

            old_username = client_rel.local.get("schema_user")
            if old_username and old_username != new_username:
                migrate_user(old_username, new_username, password, superuser)
                client_rel.local["schema_user"] = new_username
                client_rel.local["schema_password"] = password

    # Admin relations used to get 'all' published as the database name,
    # which was bogus.
    for client_rel in rels["db-admin"].values():
        if client_rel.local.get("database") == "all":
            client_rel.local["database"] = client_rel.service

    # Reconfigure PostgreSQL and republish client relations.
    reactive.remove_state("postgresql.cluster.configured")
    reactive.remove_state("postgresql.client.published")

    # Don't recreate the cluster.
    reactive.set_state("postgresql.cluster.created")

    # Set the postgresql.replication.cloned flag, so we don't rebuild
    # standbys when upgrading the charm from a pre-reactive version.
    reactive.set_state("postgresql.replication.cloned")

    # Publish which node we are following
    peer_rel = helpers.get_peer_relation()
    if peer_rel and "following" not in peer_rel.local:
        following = unitdata.kv().get("postgresql.replication.following")
        if following is None and not replication.is_master():
            following = replication.get_master()
        peer_rel.local["following"] = following

    # Ensure storage that was attached but ignored is no longer ignored.
    if not reactive.is_state("postgresql.storage.pgdata.attached"):
        if hookenv.storage_list("pgdata"):
            storage.attach()

    # Ensure client usernames and passwords match leader settings.
    for relname in ("db", "db-admin"):
        for rel in rels[relname].values():
            del rel.local["user"]
            del rel.local["password"]

    # Ensure the configure version is cached.
    postgresql.version()

    # Skip checks for pre-existing databases, as that has already happened.
    reactive.set_state("postgresql.cluster.initial-check")

    # Reinstall support scripts
    reactive.remove_state("postgresql.cluster.support-scripts")

    # Ensure that systemd is managing the PostgreSQL process
    if host.init_is_systemd(
    ) and not reactive.is_flag_set("postgresql.upgrade.systemd"):
        reactive.set_flag("postgresql.upgrade.systemd")
        if reactive.is_flag_set("postgresql.cluster.is_running"):
            hookenv.log("Restarting PostgreSQL under systemd", hookenv.WARNING)
            reactive.clear_flag("postgresql.cluster.is_running")
            postgresql.stop_pgctlcluster()

    # Update the PGDG source, in case the signing key has changed.
    config = hookenv.config()
    if config["pgdg"]:
        service.add_pgdg_source()
Exemple #10
0
def block_on_invalid_config():
    """
    Sanity check charm configuration, blocking the unit if we have
    bogus bogus config values or config changes the charm does not
    yet (or cannot) support.

    We need to do this before the main reactive loop (@preflight),
    or we risk failing to run handlers that rely on @when_file_changed,
    reactive.helpers.data_changed or similar state tied to
    charmhelpers.core.unitdata transactions.
    """
    valid = True
    config = hookenv.config()

    enums = dict(
        version=set(["", "9.5", "9.6", "10", "11", "12"]),
        package_status=set(["install", "hold"]),
    )
    for key, vals in enums.items():
        config[key] = (config.get(key) or "").lower()
        if config[key] not in vals:
            valid = False
            status_set("blocked",
                       "Invalid value for {} ({!r})".format(key, config[key]))

    unchangeable_config = ["locale", "encoding", "manual_replication"]
    if config._prev_dict is not None:
        for name in unchangeable_config:
            if config.changed(name):
                config[name] = config.previous(name)
                valid = False
                status_set(
                    "blocked",
                    "Cannot change {!r} after install "
                    "(from {!r} to {!r}).".format(name, config.previous(name),
                                                  config.get("name")),
                )
        if config.changed("version") and (config.previous("version") !=
                                          postgresql.version()):
            valid = False
            status_set(
                "blocked",
                "Cannot change version after install "
                "(from {!r} to {!r}).".format(config.previous("version"),
                                              config["version"]),
            )
            config["version"] = config.previous("version")
            valid = False

    metrics_target = config["metrics_target"].strip()
    if metrics_target:
        if ":" not in metrics_target:
            status_set("blocked",
                       "Invalid metrics_target {}".format(metrics_target))
            valid = False
        metrics_interval = config["metrics_sample_interval"]
        if not metrics_interval:
            status_set(
                "blocked",
                "metrics_sample_interval is required when "
                "metrics_target is set",
            )
            valid = False

    if not valid:
        raise SystemExit(0)