Beispiel #1
1
def install_administrative_scripts():
    scripts_dir = helpers.scripts_dir()
    logs_dir = helpers.logs_dir()
    helpers.makedirs(scripts_dir, mode=0o755)

    # The database backup script. Most of this is redundant now.
    source = os.path.join(hookenv.charm_dir(), 'scripts', 'pgbackup.py')
    destination = os.path.join(scripts_dir, 'dump-pg-db')
    with open(source, 'r') as f:
        helpers.write(destination, f.read(), mode=0o755)

    backups_dir = helpers.backups_dir()
    helpers.makedirs(backups_dir, mode=0o750,
                     user='******', group='postgres')

    # Generate a wrapper that invokes the backup script for each
    # database.
    data = dict(logs_dir=logs_dir,
                scripts_dir=scripts_dir,
                # backups_dir probably should be deprecated in favour of
                # a juju storage mount.
                backups_dir=backups_dir)
    destination = os.path.join(helpers.scripts_dir(), 'pg_backup_job')
    templating.render('pg_backup_job.tmpl', destination, data,
                      owner='root', group='postgres', perms=0o755)

    # Install the reaper scripts.
    script = 'pgkillidle.py'
    source = os.path.join(hookenv.charm_dir(), 'scripts', script)
    if (reactive.helpers.any_file_changed([source]) or
            not os.path.exists(source)):
        destination = os.path.join(scripts_dir, script)
        with open(source, 'r') as f:
            helpers.write(destination, f.read(), mode=0o755)

    if not os.path.exists(logs_dir):
        helpers.makedirs(logs_dir, mode=0o755, user='******',
                         group='postgres')
        # Create the backups.log file used by the backup wrapper if it
        # does not exist, in order to trigger spurious alerts when a
        # unit is installed, per Bug #1329816.
        helpers.write(helpers.backups_log_path(), '', mode=0o644,
                      user='******', group='postgres')
Beispiel #2
0
def remount():
    if reactive.is_state('postgresql.cluster.is_running'):
        # Attempting this while PostgreSQL is live would be really, really bad.
        service.stop()

    old_data_dir = postgresql.data_dir()
    new_data_dir = os.path.join(external_volume_mount, 'postgresql',
                                postgresql.version(), 'main')
    backup_data_dir = '{}-{}'.format(old_data_dir, int(time.time()))

    if os.path.isdir(new_data_dir):
        hookenv.log('Remounting existing database at {}'.format(new_data_dir),
                    WARNING)
    else:
        status_set('maintenance',
                   'Migrating data from {} to {}'.format(old_data_dir,
                                                         new_data_dir))
        helpers.makedirs(new_data_dir, mode=0o770,
                         user='******', group='postgres')
        try:
            rsync_cmd = ['rsync', '-av',
                         old_data_dir + '/',
                         new_data_dir + '/']
            hookenv.log('Running {}'.format(' '.join(rsync_cmd)), DEBUG)
            subprocess.check_call(rsync_cmd)
        except subprocess.CalledProcessError:
            status_set('blocked',
                       'Failed to sync data from {} to {}'
                       ''.format(old_data_dir, new_data_dir))
            return

    os.replace(old_data_dir, backup_data_dir)
    os.symlink(new_data_dir, old_data_dir)
    fix_perms(new_data_dir)
    reactive.remove_state('postgresql.storage.needs_remount')
Beispiel #3
0
def migrate_pgdata():
    '''
    Copy the data from /var/lib/postgresql/9.x/main to the
    new path and replace the original PGDATA with a symlink.
    Note that the original may already be a symlink, either from
    the block storage broker or manual changes by admins.
    '''
    if reactive.is_state('postgresql.cluster.is_running'):
        # Attempting this while PostgreSQL is live would be really, really bad.
        service.stop()

    old_data_dir = postgresql.data_dir()
    new_data_dir = unitdata.kv().get(pgdata_path_key)

    backup_data_dir = '{}-{}'.format(old_data_dir, int(time.time()))

    if os.path.isdir(new_data_dir):
        # This never happens with Juju storage, at least with 2.0,
        # because we have no way of reusing old partitions.
        hookenv.log('Remounting existing database at {}'.format(new_data_dir),
                    WARNING)
    else:
        status_set('maintenance',
                   'Migrating data from {} to {}'.format(old_data_dir,
                                                         new_data_dir))
        helpers.makedirs(new_data_dir, mode=0o770,
                         user='******', group='postgres')
        try:
            rsync_cmd = ['rsync', '-av',
                         old_data_dir + '/',
                         new_data_dir + '/']
            hookenv.log('Running {}'.format(' '.join(rsync_cmd)), DEBUG)
            subprocess.check_call(rsync_cmd, universal_newlines=True)
        except subprocess.CalledProcessError:
            status_set('blocked',
                       'Failed to sync data from {} to {}'
                       ''.format(old_data_dir, new_data_dir))
            return

    os.replace(old_data_dir, backup_data_dir)
    os.symlink(new_data_dir, old_data_dir)
    fix_perms(new_data_dir)
    reactive.set_state('postgresql.storage.pgdata.migrated')
Beispiel #4
0
def remount():
    if reactive.is_state("postgresql.cluster.is_running"):
        # Attempting this while PostgreSQL is live would be really, really bad.
        service.stop()

    old_data_dir = postgresql.data_dir()
    new_data_dir = os.path.join(external_volume_mount, "postgresql",
                                postgresql.version(), "main")
    backup_data_dir = "{}-{}".format(old_data_dir, int(time.time()))

    if os.path.isdir(new_data_dir):
        hookenv.log("Remounting existing database at {}".format(new_data_dir),
                    WARNING)
    else:
        status_set(
            "maintenance",
            "Migrating data from {} to {}".format(old_data_dir, new_data_dir),
        )
        helpers.makedirs(new_data_dir,
                         mode=0o770,
                         user="******",
                         group="postgres")
        try:
            rsync_cmd = [
                "rsync", "-av", old_data_dir + "/", new_data_dir + "/"
            ]
            hookenv.log("Running {}".format(" ".join(rsync_cmd)), DEBUG)
            subprocess.check_call(rsync_cmd)
        except subprocess.CalledProcessError:
            status_set(
                "blocked",
                "Failed to sync data from {} to {}"
                "".format(old_data_dir, new_data_dir),
            )
            return

    os.replace(old_data_dir, backup_data_dir)
    os.symlink(new_data_dir, old_data_dir)
    fix_perms(new_data_dir)
    reactive.remove_state("postgresql.storage.needs_remount")
Beispiel #5
0
def install_administrative_scripts():
    scripts_dir = helpers.scripts_dir()
    logs_dir = helpers.logs_dir()
    helpers.makedirs(scripts_dir, mode=0o755)

    # The database backup script. Most of this is redundant now.
    source = os.path.join(hookenv.charm_dir(), "scripts", "pgbackup.py")
    destination = os.path.join(scripts_dir, "dump-pg-db")
    with open(source, "r") as f:
        helpers.write(destination, f.read(), mode=0o755)

    backups_dir = helpers.backups_dir()
    helpers.makedirs(backups_dir, mode=0o750, user="******", group="postgres")

    # Generate a wrapper that invokes the backup script for each
    # database.
    data = dict(
        logs_dir=logs_dir,
        scripts_dir=scripts_dir,
        # backups_dir probably should be deprecated in favour of
        # a juju storage mount.
        backups_dir=backups_dir,
    )
    destination = os.path.join(helpers.scripts_dir(), "pg_backup_job")
    templating.render(
        "pg_backup_job.tmpl",
        destination,
        data,
        owner="root",
        group="postgres",
        perms=0o755,
    )

    # Install the reaper scripts.
    script = "pgkillidle.py"
    source = os.path.join(hookenv.charm_dir(), "scripts", script)
    destination = os.path.join(scripts_dir, script)
    if reactive.helpers.any_file_changed([source]) or not os.path.exists(destination):
        with open(source, "r") as f:
            helpers.write(destination, f.read(), mode=0o755)

    if not os.path.exists(logs_dir):
        helpers.makedirs(logs_dir, mode=0o755, user="******", group="postgres")
        # Create the backups.log file used by the backup wrapper if it
        # does not exist, in order to trigger spurious alerts when a
        # unit is installed, per Bug #1329816.
        helpers.write(
            helpers.backups_log_path(),
            "",
            mode=0o644,
            user="******",
            group="postgres",
        )

    reactive.set_state("postgresql.cluster.support-scripts")
Beispiel #6
0
def update_wal_e_env_dir():
    '''Regenerate the envdir(1) environment used to drive WAL-E.

    We do this even if wal-e is not enabled to ensure we destroy
    any secrets potentially left around from when it was enabled.
    '''
    config = hookenv.config()
    env = dict(
        # wal-e Swift creds
        SWIFT_AUTHURL=config.get('os_auth_url', ''),
        SWIFT_TENANT=config.get('os_tenant_name', ''),
        SWIFT_USER=config.get('os_username', ''),
        SWIFT_PASSWORD=config.get('os_password', ''),

        # wal-e AWS creds
        AWS_ACCESS_KEY_ID=config.get('aws_access_key_id', ''),
        AWS_SECRET_ACCESS_KEY=config.get('aws_secret_access_key', ''),

        # wal-e Azure cred
        WABS_ACCOUNT_NAME=config.get('wabs_account_name', ''),
        WABS_ACCESS_KEY=config.get('wabs_access_key', ''),

        # OpenStack creds for swift(1) cli tool
        OS_AUTH_URL=config.get('os_auth_url', ''),
        OS_USERNAME=config.get('os_username', ''),
        OS_PASSWORD=config.get('os_password', ''),
        OS_TENANT_NAME=config.get('os_tenant_name', ''),

        WALE_SWIFT_PREFIX='',
        WALE_S3_PREFIX='',
        WALE_WABS_PREFIX='')

    uri = config.get('wal_e_storage_uri', None)
    if uri:
        required_env = []
        parsed_uri = urlparse(uri)
        if parsed_uri.scheme == 'swift':
            env['WALE_SWIFT_PREFIX'] = uri
            required_env = ['SWIFT_AUTHURL', 'SWIFT_TENANT',
                            'SWIFT_USER', 'SWIFT_PASSWORD']
        elif parsed_uri.scheme == 's3':
            env['WALE_S3_PREFIX'] = uri
            required_env = ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY']
        elif parsed_uri.scheme == 'wabs':
            env['WALE_WABS_PREFIX'] = uri
            required_env = ['WABS_ACCOUNT_NAME', 'WABS_ACCESS_KEY']
        else:
            hookenv.log('Invalid wal_e_storage_uri {}'.format(uri), ERROR)

        for env_key in required_env:
            if not env[env_key].strip():
                hookenv.log('Missing {}'.format(env_key), ERROR)

    # Regenerate the envdir(1) environment recommended by WAL-E.
    # All possible keys are rewritten to ensure we remove old secrets.
    helpers.makedirs(wal_e_env_dir(), mode=0o750,
                     user='******', group='postgres')
    for k, v in env.items():
        helpers.write(os.path.join(wal_e_env_dir(), k), v.strip(),
                      mode=0o640, user='******', group='postgres')

    reactive.set_state('postgresql.wal_e.configured')
Beispiel #7
0
def update_wal_e_env_dir(dirpath, storage_uri):
    """Regenerate the envdir(1) environment used to drive WAL-E.

    We do this even if wal-e is not enabled to ensure we destroy
    any secrets potentially left around from when it was enabled.
    """
    config = hookenv.config()
    env = dict(
        # wal-e Swift creds
        SWIFT_AUTHURL=config.get("os_auth_url", ""),
        SWIFT_USER=config.get("os_username", ""),
        SWIFT_PASSWORD=config.get("os_password", ""),
        SWIFT_TENANT=config.get("os_tenant_name", ""),
        SWIFT_REGION=config.get("os_region_name", ""),
        SWIFT_AUTH_VERSION=config.get("os_identity_api_version", ""),
        SWIFT_USER_DOMAIN_NAME=config.get("os_user_domain_name", ""),
        SWIFT_PROJECT_NAME=config.get("os_project_name", ""),
        SWIFT_PROJECT_DOMAIN_NAME=config.get("os_project_domain_name", ""),
        # wal-e AWS creds
        AWS_ACCESS_KEY_ID=config.get("aws_access_key_id", ""),
        AWS_SECRET_ACCESS_KEY=config.get("aws_secret_access_key", ""),
        AWS_REGION=config.get("aws_region", ""),
        # wal-e Azure cred
        WABS_ACCOUNT_NAME=config.get("wabs_account_name", ""),
        WABS_ACCESS_KEY=config.get("wabs_access_key", ""),
        # OpenStack creds for swift(1) cli tool
        OS_AUTH_URL=config.get("os_auth_url", ""),
        OS_USERNAME=config.get("os_username", ""),
        OS_PASSWORD=config.get("os_password", ""),
        OS_TENANT_NAME=config.get("os_tenant_name", ""),
        OS_REGION_NAME=config.get("os_region_name", ""),
        OS_IDENTITY_API_VERSION=config.get("os_identity_api_version", ""),
        OS_USER_DOMAIN_NAME=config.get("os_user_domain_name", ""),
        OS_PROJECT_NAME=config.get("os_project_name", ""),
        OS_PROJECT_DOMAIN_NAME=config.get("os_project_domain_name", ""),
        WALE_SWIFT_PREFIX="",
        WALE_S3_PREFIX="",
        WALE_WABS_PREFIX="",
    )

    uri = storage_uri
    if uri:
        required_env = []
        parsed_uri = urlparse(uri)
        if parsed_uri.scheme == "swift":
            env["WALE_SWIFT_PREFIX"] = uri
            required_env = [
                "SWIFT_AUTHURL",
                "SWIFT_USER",
                "SWIFT_PASSWORD",
            ]
        elif parsed_uri.scheme == "s3":
            env["WALE_S3_PREFIX"] = uri
            required_env = ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION"]
        elif parsed_uri.scheme == "wabs":
            env["WALE_WABS_PREFIX"] = uri
            required_env = ["WABS_ACCOUNT_NAME", "WABS_ACCESS_KEY"]
        else:
            hookenv.log("Invalid wal_e_storage_uri {}".format(uri), ERROR)

        for env_key in required_env:
            if not env[env_key].strip():
                hookenv.log("Missing {}".format(env_key), ERROR)

    # Regenerate the envdir(1) environment recommended by WAL-E.
    # All possible keys are rewritten to ensure we remove old secrets.
    helpers.makedirs(dirpath, mode=0o750, user="******", group="postgres")
    for k, v in env.items():
        helpers.write(
            os.path.join(dirpath, k),
            v.strip(),
            mode=0o640,
            user="******",
            group="postgres",
        )
Beispiel #8
0
def clone_master():
    master = get_master()
    peer_rel = helpers.get_peer_relation()
    master_relinfo = peer_rel[master]

    # Be paranoid since we are about to destroy data.
    assert not reactive.helpers.is_state("postgresql.replication.is_master")
    assert not reactive.helpers.is_state("postgresql.cluster.is_running")

    # We use realpath on data_dir as it may have been replaced with
    # a symbolic link, so we empty and recreate the actual directory
    # and the links remain in place.
    data_dir = os.path.realpath(postgresql.data_dir())

    if os.path.exists(data_dir):
        hookenv.log("Removing {} in preparation for clone".format(data_dir))
        shutil.rmtree(data_dir)
    helpers.makedirs(data_dir, mode=0o700, user="******", group="postgres")

    if postgresql.has_version("10"):
        wal_method = "--wal-method=stream"
    else:
        wal_method = "--xlog-method=stream"
    cmd = [
        "sudo",
        "-H",  # -H needed to locate $HOME/.pgpass
        "-u",
        "postgres",
        "pg_basebackup",
        "-D",
        postgresql.data_dir(),
        "-h",
        master_relinfo["host"],
        "-p",
        master_relinfo["port"],
        "--checkpoint=fast",
        "--progress",
        wal_method,
        "--no-password",
        "--username=_juju_repl",
    ]
    hookenv.log("Cloning {} with {}".format(master, " ".join(cmd)))
    status_set("maintenance", "Cloning {}".format(master))
    try:
        # Switch to a directory the postgres user can access.
        with helpers.switch_cwd("/tmp"):
            subprocess.check_call(cmd, universal_newlines=True)
    except subprocess.CalledProcessError as x:
        hookenv.log("Clone failed with {}".format(x), ERROR)
        # We failed, and the local cluster is broken.
        status_set("blocked", "Failed to clone {}".format(master))
        postgresql.drop_cluster()
        reactive.remove_state("postgresql.cluster.configured")
        reactive.remove_state("postgresql.cluster.created")
        # Terminate. We need this hook to exit, rather than enter a loop.
        raise SystemExit(0)

    update_recovery_conf(follow=master)

    reactive.set_state("postgresql.replication.cloned")
    update_replication_states()