def _rewind_against_master(self, service): """Call pg_rewind to resync datadir against state of new master We should already have a recovery.conf file in PGDATA """ rconf = operating_system.read_file( service.pgsql_recovery_config, codec=stream_codecs.KeyValueCodec(line_terminator='\n'), as_root=True) conninfo = rconf['primary_conninfo'].strip() # The recovery.conf file we want should already be there, but pg_rewind # will delete it, so copy it out first rec = service.pgsql_recovery_config tmprec = "/tmp/recovery.conf.bak" operating_system.move(rec, tmprec, as_root=True) cmd_full = " ".join([ "pg_rewind", "-D", service.pgsql_data_dir, '--source-pgdata=' + service.pgsql_data_dir, '--source-server=' + conninfo ]) out, err = utils.execute("sudo", "su", "-", service.pgsql_owner, "-c", "%s" % cmd_full, check_exit_code=0) LOG.debug("Got stdout %s and stderr %s from pg_rewind" % (str(out), str(err))) operating_system.move(tmprec, rec, as_root=True)
def apply(self, name, datastore, ds_version, data_file, admin_module): data = operating_system.read_file(data_file, codec=stream_codecs.KeyValueCodec()) for key, value in data.items(): if 'message' == key.lower(): return True, value return False, 'Message not found in contents file'
def pfile_codec(self): return stream_codecs.KeyValueCodec( delimiter='=', comment_marker='#', line_terminator='\n', value_quoting=True, value_quote_char="'", bool_case=stream_codecs.KeyValueCodec.BOOL_UPPER, big_ints=True, hidden_marker='_')
def apply(self, name, datastore, ds_version, data_file, admin_module): license_key = None data = operating_system.read_file(data_file, codec=stream_codecs.KeyValueCodec()) for key, value in data.items(): if 'license_key' == key.lower(): license_key = value break if license_key: self._add_license_key(license_key) self._server_control('start') else: return False, "'license_key' not found in contents file"
def configuration_manager(self): if self._configuration_manager: return self._configuration_manager self._configuration_manager = configuration.ConfigurationManager( CONFIG_FILE, CONF.database_service_uid, CONF.database_service_uid, stream_codecs.KeyValueCodec( value_quoting=True, bool_case=stream_codecs.KeyValueCodec.BOOL_LOWER, big_ints=True), requires_root=True, override_strategy=configuration.ImportOverrideStrategy( CNF_INCLUDE_DIR, CNF_EXT)) return self._configuration_manager
def apply(self, name, datastore, ds_version, data_file): success = False message = "Message not found in contents file" try: data = operating_system.read_file( data_file, codec=stream_codecs.KeyValueCodec()) for key, value in data.items(): if 'message' == key.lower(): success = True message = value break except Exception: # assume we couldn't read the file, because there was some # issue with it (for example, it's a binary file). Just log # it and drive on. LOG.error( _("Could not extract contents from '%s' - possibly " "a binary file?") % name) return success, message
def _create_tns_file(self, service, dbs): tns_file = service.paths.tns_file tns_entries = {} for db in dbs: tns_entry = ('(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)' '(HOST=%(host)s)(PORT=%(port)s))' '(CONNECT_DATA=(SERVICE_NAME=%(service_name)s)))' % { 'dbname': db['db_unique_name'], 'host': db['host'], 'port': CONF.get(MANAGER).listener_port, 'service_name': service.admin.database_name }) tns_entries[db['db_unique_name']] = tns_entry operating_system.write_file(tns_file, tns_entries, codec=stream_codecs.KeyValueCodec(), as_root=True) operating_system.chown(tns_file, service.instance_owner, service.instance_owner_group, force=True, as_root=True)
class PgSqlApp(service.BaseDbApp): configuration_manager = configuration.ConfigurationManager( CONFIG_FILE, CONF.database_service_uid, CONF.database_service_uid, stream_codecs.KeyValueCodec( value_quoting=True, bool_case=stream_codecs.KeyValueCodec.BOOL_LOWER, big_ints=True), requires_root=True, override_strategy=configuration.ImportOverrideStrategy( CNF_INCLUDE_DIR, CNF_EXT)) def __init__(self, status, docker_client): super(PgSqlApp, self).__init__(status, docker_client) # See # https://github.com/docker-library/docs/blob/master/postgres/README.md#pgdata mount_point = cfg.get_configuration_property('mount_point') self.datadir = f"{mount_point}/data/pgdata" self.adm = PgSqlAdmin(SUPER_USER_NAME) @classmethod def get_data_dir(cls): return cls.configuration_manager.get_value('data_directory') @classmethod def set_data_dir(cls, value): cls.configuration_manager.apply_system_override( {'data_directory': value}) def reload(self): cmd = f"pg_ctl reload -D {self.datadir}" docker_util.run_command(self.docker_client, cmd) def apply_access_rules(self): """PostgreSQL Client authentication settings The order of entries is important. The first failure to authenticate stops the lookup. That is why the 'local' connections validate first. The OrderedDict is necessary to guarantee the iteration order. """ LOG.debug("Applying client authentication access rules.") access_rules = OrderedDict([ ('local', [['all', SUPER_USER_NAME, None, 'trust'], ['replication', SUPER_USER_NAME, None, 'trust'], ['all', 'all', None, 'md5']]), ('host', [['all', SUPER_USER_NAME, '127.0.0.1/32', 'trust'], ['all', SUPER_USER_NAME, '::1/128', 'trust'], ['all', SUPER_USER_NAME, 'localhost', 'trust'], ['all', SUPER_USER_NAME, '0.0.0.0/0', 'reject'], ['all', SUPER_USER_NAME, '::/0', 'reject'], ['all', 'all', '0.0.0.0/0', 'md5'], ['all', 'all', '::/0', 'md5']]) ]) operating_system.write_file( HBA_CONFIG_FILE, access_rules, stream_codecs.PropertiesCodec(string_mappings={'\t': None}), as_root=True) operating_system.chown(HBA_CONFIG_FILE, CONF.database_service_uid, CONF.database_service_uid, as_root=True) operating_system.chmod(HBA_CONFIG_FILE, operating_system.FileMode.SET_USR_RO, as_root=True) def update_overrides(self, overrides): """Update config options in the include directory.""" if overrides: self.configuration_manager.apply_user_override(overrides) def reset_configuration(self, configuration): self.configuration_manager.save_configuration(configuration) def apply_overrides(self, overrides): """Reload config.""" cmd = "pg_ctl reload" docker_util.run_command(self.docker_client, cmd) def start_db(self, update_db=False, ds_version=None, command=None, extra_volumes=None): """Start and wait for database service.""" docker_image = CONF.get(CONF.datastore_manager).docker_image image = (f'{docker_image}:latest' if not ds_version else f'{docker_image}:{ds_version}') command = command if command else '' try: postgres_pass = self.get_auth_password(file="postgres.cnf") except exception.UnprocessableEntity: postgres_pass = utils.generate_random_password() # Get uid and gid user = "******" % (CONF.database_service_uid, CONF.database_service_uid) # Create folders for postgres on localhost for folder in ['/etc/postgresql', '/var/run/postgresql']: operating_system.ensure_directory(folder, user=CONF.database_service_uid, group=CONF.database_service_uid, force=True, as_root=True) volumes = { "/etc/postgresql": { "bind": "/etc/postgresql", "mode": "rw" }, "/var/run/postgresql": { "bind": "/var/run/postgresql", "mode": "rw" }, "/var/lib/postgresql": { "bind": "/var/lib/postgresql", "mode": "rw" }, "/var/lib/postgresql/data": { "bind": "/var/lib/postgresql/data", "mode": "rw" }, } if extra_volumes: volumes.update(extra_volumes) try: LOG.info("Starting docker container, image: %s", image) docker_util.start_container(self.docker_client, image, volumes=volumes, network_mode="host", user=user, environment={ "POSTGRES_PASSWORD": postgres_pass, "PGDATA": self.datadir, }, command=command) # Save root password LOG.debug("Saving root credentials to local host.") self.save_password('postgres', postgres_pass) except Exception: LOG.exception("Failed to start database service") raise exception.TroveError("Failed to start database service") if not self.status.wait_for_status( service_status.ServiceStatuses.HEALTHY, CONF.state_change_wait_time, update_db): raise exception.TroveError("Failed to start database service") def restart(self): LOG.info("Restarting database") # Ensure folders permission for database. for folder in ['/etc/postgresql', '/var/run/postgresql']: operating_system.ensure_directory(folder, user=CONF.database_service_uid, group=CONF.database_service_uid, force=True, as_root=True) try: docker_util.restart_container(self.docker_client) except Exception: LOG.exception("Failed to restart database") raise exception.TroveError("Failed to restart database") if not self.status.wait_for_status( service_status.ServiceStatuses.HEALTHY, CONF.state_change_wait_time, update_db=True): raise exception.TroveError("Failed to start database") LOG.info("Finished restarting database") def restore_backup(self, context, backup_info, restore_location): backup_id = backup_info['id'] storage_driver = CONF.storage_strategy backup_driver = cfg.get_configuration_property('backup_strategy') image = cfg.get_configuration_property('backup_docker_image') name = 'db_restore' volumes = { '/var/lib/postgresql/data': { 'bind': '/var/lib/postgresql/data', 'mode': 'rw' } } os_cred = (f"--os-token={context.auth_token} " f"--os-auth-url={CONF.service_credentials.auth_url} " f"--os-tenant-id={context.project_id}") command = ( f'/usr/bin/python3 main.py --nobackup ' f'--storage-driver={storage_driver} --driver={backup_driver} ' f'{os_cred} ' f'--restore-from={backup_info["location"]} ' f'--restore-checksum={backup_info["checksum"]} ' f'--pg-wal-archive-dir {WAL_ARCHIVE_DIR}') if CONF.backup_aes_cbc_key: command = (f"{command} " f"--backup-encryption-key={CONF.backup_aes_cbc_key}") LOG.debug( 'Stop the database and clean up the data before restore ' 'from %s', backup_id) self.stop_db() for dir in [WAL_ARCHIVE_DIR, self.datadir]: operating_system.remove_dir_contents(dir) # Start to run restore inside a separate docker container LOG.info('Starting to restore backup %s, command: %s', backup_id, command) output, ret = docker_util.run_container(self.docker_client, image, name, volumes=volumes, command=command) result = output[-1] if not ret: msg = f'Failed to run restore container, error: {result}' LOG.error(msg) raise Exception(msg) for dir in [WAL_ARCHIVE_DIR, self.datadir]: operating_system.chown(dir, CONF.database_service_uid, CONF.database_service_uid, force=True, as_root=True) def is_replica(self): """Wrapper for pg_is_in_recovery() for detecting a server in standby mode """ r = self.adm.query("SELECT pg_is_in_recovery()") return r[0][0] def get_current_wal_lsn(self): """Wrapper for pg_current_wal_lsn() Cannot be used against a running replica """ r = self.adm.query("SELECT pg_current_wal_lsn()") return r[0][0] def get_last_wal_replay_lsn(self): """Wrapper for pg_last_wal_replay_lsn() For use on replica servers """ r = self.adm.query("SELECT pg_last_wal_replay_lsn()") return r[0][0] def pg_rewind(self, conn_info): docker_image = CONF.get(CONF.datastore_manager).docker_image image = f'{docker_image}:{CONF.datastore_version}' user = "******" % (CONF.database_service_uid, CONF.database_service_uid) volumes = { "/var/run/postgresql": { "bind": "/var/run/postgresql", "mode": "rw" }, "/var/lib/postgresql": { "bind": "/var/lib/postgresql", "mode": "rw" }, "/var/lib/postgresql/data": { "bind": "/var/lib/postgresql/data", "mode": "rw" }, } command = (f"pg_rewind --target-pgdata={self.datadir} " f"--source-server='{conn_info}'") docker_util.remove_container(self.docker_client, name='pg_rewind') LOG.info('Running pg_rewind in container') output, ret = docker_util.run_container(self.docker_client, image, 'pg_rewind', volumes=volumes, command=command, user=user) result = output[-1] LOG.debug(f"Finished running pg_rewind, last output: {result}") if not ret: msg = f'Failed to run pg_rewind in container, error: {result}' LOG.error(msg) raise Exception(msg)