예제 #1
0
def write_log_environment(placeholders):
    log_env = defaultdict(lambda: '')
    log_env.update(placeholders)

    aws_region = log_env.get('AWS_REGION')
    if not aws_region:
        aws_region = placeholders['instance_data']['zone'][:-1]
    log_env['LOG_AWS_HOST'] = 's3.{}.amazonaws.com'.format(aws_region)

    log_s3_key = 'spilo/{LOG_BUCKET_SCOPE_PREFIX}{SCOPE}{LOG_BUCKET_SCOPE_SUFFIX}/log/'.format(
        **log_env)
    log_s3_key += placeholders['instance_data']['id']
    log_env['LOG_S3_KEY'] = log_s3_key

    if not os.path.exists(log_env['LOG_TMPDIR']):
        os.makedirs(log_env['LOG_TMPDIR'])
        os.chmod(log_env['LOG_TMPDIR'], 0o1777)

    if not os.path.exists(log_env['LOG_ENV_DIR']):
        os.makedirs(log_env['LOG_ENV_DIR'])

    for var in ('LOG_TMPDIR', 'LOG_AWS_HOST', 'LOG_S3_KEY', 'LOG_S3_BUCKET',
                'PGLOG'):
        write_file(log_env[var], os.path.join(log_env['LOG_ENV_DIR'], var),
                   True)
예제 #2
0
def write_certificates(environment, overwrite):
    """Write SSL certificate to files

    If certificates are specified, they are written, otherwise
    dummy certificates are generated and written"""

    ssl_keys = ['SSL_CERTIFICATE', 'SSL_PRIVATE_KEY']
    if set(ssl_keys) <= set(environment):
        for k in ssl_keys:
            write_file(environment[k], environment[k + '_FILE'], overwrite)
    else:
        if os.path.exists(environment['SSL_PRIVATE_KEY_FILE']) and not overwrite:
            logging.warning('Private key already exists, not overwriting. (Use option --force if necessary)')
            return
        openssl_cmd = [
            '/usr/bin/openssl',
            'req',
            '-nodes',
            '-new',
            '-x509',
            '-subj',
            '/CN=spilo.example.org',
            '-keyout',
            environment['SSL_PRIVATE_KEY_FILE'],
            '-out',
            environment['SSL_CERTIFICATE_FILE'],
        ]
        logging.info('Generating ssl certificate')
        p = subprocess.Popen(openssl_cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
        output, _ = p.communicate()
        logging.debug(output)

    os.chmod(environment['SSL_PRIVATE_KEY_FILE'], 0o600)
    adjust_owner(environment['SSL_PRIVATE_KEY_FILE'], gid=-1)
예제 #3
0
def write_clone_pgpass(placeholders, overwrite):
    pgpassfile = placeholders['CLONE_PGPASS']
    # pgpass is host:port:database:user:password
    r = {'host': escape_pgpass_value(placeholders['CLONE_HOST']),
         'port': placeholders['CLONE_PORT'],
         'database': '*',
         'user': escape_pgpass_value(placeholders['CLONE_USER']),
         'password': escape_pgpass_value(placeholders['CLONE_PASSWORD'])}
    pgpass_string = "{host}:{port}:{database}:{user}:{password}".format(**r)
    write_file(pgpass_string, pgpassfile, overwrite)
    os.chmod(pgpassfile, 0o600)
    adjust_owner(pgpassfile, gid=-1)
예제 #4
0
def write_pam_oauth2_configuration(placeholders, overwrite):
    pam_oauth2_args = placeholders.get('PAM_OAUTH2') or ''
    t = pam_oauth2_args.split()
    if len(t) < 2:
        return logging.info("No PAM_OAUTH2 configuration was specified, skipping")

    r = urlparse(t[0])
    if not r.scheme or r.scheme != 'https':
        return logging.error('First argument of PAM_OAUTH2 must be a valid https url: %s', r)

    pam_oauth2_config = 'auth sufficient pam_oauth2.so {0}\n'.format(pam_oauth2_args)
    pam_oauth2_config += 'account sufficient pam_oauth2.so\n'

    write_file(pam_oauth2_config, '/etc/pam.d/postgresql', overwrite)
예제 #5
0
def write_pgbouncer_configuration(placeholders, overwrite):
    pgbouncer_config = placeholders.get('PGBOUNCER_CONFIGURATION')
    if not pgbouncer_config:
        return logging.info('No PGBOUNCER_CONFIGURATION was specified, skipping')

    pgbouncer_dir = os.path.join(placeholders['RW_DIR'], 'pgbouncer')
    if not os.path.exists(pgbouncer_dir):
        os.makedirs(pgbouncer_dir)
    write_file(pgbouncer_config, pgbouncer_dir + '/pgbouncer.ini', overwrite)

    pgbouncer_auth = placeholders.get('PGBOUNCER_AUTHENTICATION') or placeholders.get('PGBOUNCER_AUTH')
    if pgbouncer_auth:
        write_file(pgbouncer_auth, pgbouncer_dir + '/userlist.txt', overwrite)

    link_runit_service(placeholders, 'pgbouncer')
예제 #6
0
def update_configs(new_version):
    from spilo_commons import append_extensions, get_bin_dir, get_patroni_config, write_file, write_patroni_config

    config = get_patroni_config()

    config['postgresql']['bin_dir'] = get_bin_dir(new_version)

    version = float(new_version)
    shared_preload_libraries = config['postgresql'].get(
        'parameters', {}).get('shared_preload_libraries')
    if shared_preload_libraries is not None:
        config['postgresql']['parameters']['shared_preload_libraries'] =\
                append_extensions(shared_preload_libraries, version)

    extwlist_extensions = config['postgresql'].get(
        'parameters', {}).get('extwlist.extensions')
    if extwlist_extensions is not None:
        config['postgresql']['parameters']['extwlist.extensions'] =\
                append_extensions(extwlist_extensions, version, True)

    write_patroni_config(config, True)

    # update wal-e/wal-g envdir files
    restore_command = shlex.split(config['postgresql'].get(
        'recovery_conf', {}).get('restore_command', ''))
    if len(restore_command) > 6 and restore_command[0] == 'envdir':
        envdir = restore_command[1]

        try:
            for name in os.listdir(envdir):
                # len('WALE__PREFIX') = 12
                if len(name) > 12 and name.endswith(
                        '_PREFIX') and name[:5] in ('WALE_', 'WALG_'):
                    name = os.path.join(envdir, name)
                    try:
                        with open(name) as f:
                            value = f.read().strip()
                        new_value = patch_wale_prefix(value, new_version)
                        if new_value != value:
                            write_file(new_value, name, True)
                    except Exception as e:
                        logger.error('Failed to process %s: %r', name, e)
        except Exception:
            pass
        else:
            return envdir
예제 #7
0
def write_wale_environment(placeholders, prefix, overwrite):
    s3_names = [
        'WALE_S3_PREFIX', 'WALG_S3_PREFIX', 'AWS_ACCESS_KEY_ID',
        'AWS_SECRET_ACCESS_KEY', 'WALE_S3_ENDPOINT', 'AWS_ENDPOINT',
        'AWS_REGION', 'AWS_INSTANCE_PROFILE', 'WALG_S3_SSE_KMS_ID',
        'WALG_S3_SSE', 'WALG_DISABLE_S3_SSE', 'AWS_S3_FORCE_PATH_STYLE'
    ]
    azure_names = [
        'WALG_AZ_PREFIX', 'AZURE_STORAGE_ACCOUNT', 'AZURE_STORAGE_ACCESS_KEY'
    ]
    gs_names = [
        'WALE_GS_PREFIX', 'WALG_GS_PREFIX', 'GOOGLE_APPLICATION_CREDENTIALS'
    ]
    swift_names = [
        'WALE_SWIFT_PREFIX', 'SWIFT_AUTHURL', 'SWIFT_TENANT',
        'SWIFT_TENANT_ID', 'SWIFT_USER', 'SWIFT_USER_ID',
        'SWIFT_USER_DOMAIN_NAME', 'SWIFT_USER_DOMAIN_ID', 'SWIFT_PASSWORD',
        'SWIFT_AUTH_VERSION', 'SWIFT_ENDPOINT_TYPE', 'SWIFT_REGION',
        'SWIFT_DOMAIN_NAME', 'SWIFT_DOMAIN_ID', 'SWIFT_PROJECT_NAME',
        'SWIFT_PROJECT_ID', 'SWIFT_PROJECT_DOMAIN_NAME',
        'SWIFT_PROJECT_DOMAIN_ID'
    ]

    walg_names = [
        'WALG_DELTA_MAX_STEPS', 'WALG_DELTA_ORIGIN',
        'WALG_DOWNLOAD_CONCURRENCY', 'WALG_UPLOAD_CONCURRENCY',
        'WALG_UPLOAD_DISK_CONCURRENCY', 'WALG_DISK_RATE_LIMIT',
        'WALG_NETWORK_RATE_LIMIT', 'WALG_COMPRESSION_METHOD',
        'USE_WALG_BACKUP', 'USE_WALG_RESTORE',
        'WALG_BACKUP_COMPRESSION_METHOD', 'WALG_BACKUP_FROM_REPLICA',
        'WALG_SENTINEL_USER_DATA', 'WALG_PREVENT_WAL_OVERWRITE'
    ]

    wale = defaultdict(lambda: '')
    for name in [
            'PGVERSION', 'WALE_ENV_DIR', 'SCOPE', 'WAL_BUCKET_SCOPE_PREFIX',
            'WAL_BUCKET_SCOPE_SUFFIX', 'WAL_S3_BUCKET', 'WAL_GCS_BUCKET',
            'WAL_GS_BUCKET', 'WAL_SWIFT_BUCKET', 'BACKUP_NUM_TO_RETAIN',
            'ENABLE_WAL_PATH_COMPAT'
    ] + s3_names + swift_names + gs_names + walg_names + azure_names:
        wale[name] = placeholders.get(prefix + name, '')

    if wale.get('WAL_S3_BUCKET') or wale.get('WALE_S3_PREFIX') or wale.get(
            'WALG_S3_PREFIX'):
        wale_endpoint = wale.pop('WALE_S3_ENDPOINT', None)
        aws_endpoint = wale.pop('AWS_ENDPOINT', None)
        aws_region = wale.pop('AWS_REGION', None)

        # for S3-compatible storage we want to specify WALE_S3_ENDPOINT and AWS_ENDPOINT, but not AWS_REGION
        if aws_endpoint or wale_endpoint:
            if not aws_endpoint:
                aws_endpoint = wale_endpoint.replace('+path://', '://')
            elif not wale_endpoint:
                wale_endpoint = aws_endpoint.replace('://', '+path://')
            wale.update(WALE_S3_ENDPOINT=wale_endpoint,
                        AWS_ENDPOINT=aws_endpoint,
                        WALG_DISABLE_S3_SSE='true')
        elif not aws_region:
            # try to determine region from the endpoint or bucket name
            name = wale.get('WAL_S3_BUCKET') or wale.get('WALE_S3_PREFIX')
            match = re.search(r'.*(\w{2}-\w+-\d)-.*', name)
            if match:
                aws_region = match.group(1)
            else:
                aws_region = placeholders['instance_data']['zone'][:-1]
            wale['AWS_REGION'] = aws_region
        else:
            wale['AWS_REGION'] = aws_region

        if not (wale.get('AWS_SECRET_ACCESS_KEY')
                and wale.get('AWS_ACCESS_KEY_ID')):
            wale['AWS_INSTANCE_PROFILE'] = 'true'
        if wale.get('USE_WALG_BACKUP'
                    ) and wale.get('WALG_DISABLE_S3_SSE'
                                   ) != 'true' and not wale.get('WALG_S3_SSE'):
            wale['WALG_S3_SSE'] = 'AES256'
        write_envdir_names = s3_names + walg_names
    elif wale.get('WAL_GCS_BUCKET') or wale.get('WAL_GS_BUCKET') or\
            wale.get('WALE_GCS_PREFIX') or wale.get('WALE_GS_PREFIX') or wale.get('WALG_GS_PREFIX'):
        if wale.get('WALE_GCS_PREFIX'):
            wale['WALE_GS_PREFIX'] = wale['WALE_GCS_PREFIX']
        elif wale.get('WAL_GCS_BUCKET'):
            wale['WAL_GS_BUCKET'] = wale['WAL_GCS_BUCKET']
        write_envdir_names = gs_names + walg_names
    elif wale.get('WAL_SWIFT_BUCKET') or wale.get('WALE_SWIFT_PREFIX'):
        write_envdir_names = swift_names
    elif wale.get("WALG_AZ_PREFIX"):
        write_envdir_names = azure_names + walg_names
    else:
        return

    prefix_env_name = write_envdir_names[0]
    store_type = prefix_env_name[5:].split('_')[0]
    if not wale.get(prefix_env_name
                    ):  # WALE_*_PREFIX is not defined in the environment
        bucket_path = '/spilo/{WAL_BUCKET_SCOPE_PREFIX}{SCOPE}{WAL_BUCKET_SCOPE_SUFFIX}/wal/{PGVERSION}'.format(
            **wale)
        prefix_template = '{0}://{{WAL_{1}_BUCKET}}{2}'.format(
            store_type.lower(), store_type, bucket_path)
        wale[prefix_env_name] = prefix_template.format(**wale)
    # Set WALG_*_PREFIX for future compatibility
    if store_type in ('S3', 'GS') and not wale.get(write_envdir_names[1]):
        wale[write_envdir_names[1]] = wale[prefix_env_name]

    if not os.path.exists(wale['WALE_ENV_DIR']):
        os.makedirs(wale['WALE_ENV_DIR'])

    wale['WALE_LOG_DESTINATION'] = 'stderr'
    for name in write_envdir_names + ['WALE_LOG_DESTINATION'] + (
        [] if prefix else ['BACKUP_NUM_TO_RETAIN']):
        if wale.get(name):
            path = os.path.join(wale['WALE_ENV_DIR'], name)
            write_file(wale[name], path, overwrite)
            adjust_owner(placeholders, path, gid=-1)

    if not os.path.exists(placeholders['WALE_TMPDIR']):
        os.makedirs(placeholders['WALE_TMPDIR'])
        os.chmod(placeholders['WALE_TMPDIR'], 0o1777)

    write_file(placeholders['WALE_TMPDIR'],
               os.path.join(wale['WALE_ENV_DIR'], 'TMPDIR'), True)