def prepare_new_pgdata(self, version): from spilo_commons import append_extentions locale = self.query('SHOW lc_collate').fetchone()[0] encoding = self.query('SHOW server_encoding').fetchone()[0] initdb_config = [{'locale': locale}, {'encoding': encoding}] if self.query("SELECT current_setting('data_checksums')::bool").fetchone()[0]: initdb_config.append('data-checksums') logger.info('initdb config: %s', initdb_config) self._new_data_dir = os.path.abspath(self._data_dir) self._old_data_dir = self._new_data_dir + '_old' self._data_dir = self._new_data_dir + '_new' self.remove_new_data(self._data_dir) old_postgresql_conf = self.config._postgresql_conf self.config._postgresql_conf = os.path.join(self._data_dir, 'postgresql.conf') old_version_file = self._version_file self._version_file = os.path.join(self._data_dir, 'PG_VERSION') self.set_bin_dir(version) # shared_preload_libraries for the old cluster, cleaned from incompatible/missing libs old_shared_preload_libraries = self.config.get('parameters').get('shared_preload_libraries') # restore original values of archive_mode and shared_preload_libraries if getattr(self, '_old_config_values', None): for name, value in self._old_config_values.items(): if value is None: self.config.get('parameters').pop(name) else: self.config.get('parameters')[name] = value # for the new version we maybe need to add some libs to the shared_preload_libraries shared_preload_libraries = self.config.get('parameters').get('shared_preload_libraries') if shared_preload_libraries: self._old_shared_preload_libraries = self.config.get('parameters')['shared_preload_libraries'] =\ append_extentions(shared_preload_libraries, float(version)) self.no_bg_mon() if not self.bootstrap._initdb(initdb_config): return False self.bootstrap._running_custom_bootstrap = False # Copy old configs. XXX: some parameters might be incompatible! for f in os.listdir(self._new_data_dir): if f.startswith('postgresql.') or f.startswith('pg_hba.conf') or f == 'patroni.dynamic.json': shutil.copy(os.path.join(self._new_data_dir, f), os.path.join(self._data_dir, f)) self.config.write_postgresql_conf() self._new_data_dir, self._data_dir = self._data_dir, self._new_data_dir self.config._postgresql_conf = old_postgresql_conf self._version_file = old_version_file if old_shared_preload_libraries: self.config.get('parameters')['shared_preload_libraries'] = old_shared_preload_libraries self.no_bg_mon() self.configure_server_parameters() return True
def update_configs(new_version): from spilo_commons import append_extentions, get_bin_dir, get_patroni_config, write_file, write_patroni_config config = get_patroni_config() config['postgresql']['bin_dir'] = get_bin_dir(new_version) version = float(new_version) shared_preload_libraries = config['postgresql'].get( 'parameters', {}).get('shared_preload_libraries') if shared_preload_libraries is not None: config['postgresql']['parameters']['shared_preload_libraries'] =\ append_extentions(shared_preload_libraries, version) extwlist_extensions = config['postgresql'].get( 'parameters', {}).get('extwlist.extensions') if extwlist_extensions is not None: config['postgresql']['parameters']['extwlist.extensions'] =\ append_extentions(extwlist_extensions, version, True) write_patroni_config(config, True) # update wal-e/wal-g envdir files restore_command = shlex.split(config['postgresql'].get( 'recovery_conf', {}).get('restore_command', '')) if len(restore_command) > 4 and restore_command[0] == 'envdir': envdir = restore_command[1] try: for name in os.listdir(envdir): # len('WALE__PREFIX') = 12 if len(name) > 12 and name.endswith( '_PREFIX') and name[:5] in ('WALE_', 'WALG_'): name = os.path.join(envdir, name) try: with open(name) as f: value = f.read().strip() new_value = patch_wale_prefix(value, new_version) if new_value != value: write_file(new_value, name, True) except Exception as e: logger.error('Failed to process %s: %r', name, e) except Exception: pass else: return envdir
def main(): debug = os.environ.get('DEBUG', '') in ['1', 'true', 'TRUE', 'on', 'ON'] args = parse_args() logging.basicConfig( format='%(asctime)s - bootstrapping - %(levelname)s - %(message)s', level=('DEBUG' if debug else (args.get('loglevel') or 'INFO').upper())) provider = get_provider() placeholders = get_placeholders(provider) logging.info('Looks like your running %s', provider) config = yaml.load(pystache_render(TEMPLATE, placeholders)) config.update(get_dcs_config(config, placeholders)) user_config = yaml.load( os.environ.get('SPILO_CONFIGURATION', os.environ.get('PATRONI_CONFIGURATION', ''))) or {} if not isinstance(user_config, dict): config_var_name = 'SPILO_CONFIGURATION' if 'SPILO_CONFIGURATION' in os.environ else 'PATRONI_CONFIGURATION' raise ValueError('{0} should contain a dict, yet it is a {1}'.format( config_var_name, type(user_config))) user_config_copy = deepcopy(user_config) config = deep_update(user_config_copy, config) if provider == PROVIDER_LOCAL and not any( 1 for key in config.keys() if key == 'kubernetes' or key in PATRONI_DCS): link_runit_service(placeholders, 'etcd') config['etcd'] = {'host': '127.0.0.1:2379'} pgdata = config['postgresql']['data_dir'] version_file = os.path.join(pgdata, 'PG_VERSION') # if PG_VERSION file exists stick to it and build respective bin_dir if os.path.exists(version_file): with open(version_file) as f: version = f.read().strip() if is_valid_pg_version(version): config['postgresql']['bin_dir'] = get_bin_dir(version) # try to build bin_dir from PGVERSION if bin_dir is not set in SPILO_CONFIGURATION and PGDATA is empty if not config['postgresql'].get('bin_dir'): version = os.environ.get('PGVERSION', '') if not is_valid_pg_version(version): version = get_binary_version('') config['postgresql']['bin_dir'] = get_bin_dir(version) placeholders['PGVERSION'] = get_binary_version( config['postgresql'].get('bin_dir')) version = float(placeholders['PGVERSION']) if 'shared_preload_libraries' not in user_config.get('postgresql', {}).get( 'parameters', {}): config['postgresql']['parameters']['shared_preload_libraries'] =\ append_extentions(config['postgresql']['parameters']['shared_preload_libraries'], version) if 'extwlist.extensions' not in user_config.get('postgresql', {}).get('parameters', {}): config['postgresql']['parameters']['extwlist.extensions'] =\ append_extentions(config['postgresql']['parameters']['extwlist.extensions'], version, True) # Ensure replication is available if 'pg_hba' in config['bootstrap'] and not any( ['replication' in i for i in config['bootstrap']['pg_hba']]): rep_hba = 'hostssl replication {} all md5'.\ format(config['postgresql']['authentication']['replication']['username']) config['bootstrap']['pg_hba'].insert(0, rep_hba) for section in args['sections']: logging.info('Configuring %s', section) if section == 'patroni': write_patroni_config(config, args['force']) adjust_owner(placeholders, PATRONI_CONFIG_FILE, gid=-1) link_runit_service(placeholders, 'patroni') pg_socket_dir = '/run/postgresql' if not os.path.exists(pg_socket_dir): os.makedirs(pg_socket_dir) os.chmod(pg_socket_dir, 0o2775) adjust_owner(placeholders, pg_socket_dir) # It is a recurring and very annoying problem with crashes (host/pod/container) # while the backup is taken in the exclusive mode which leaves the backup_label # in the PGDATA. Having the backup_label file in the PGDATA makes postgres think # that we are restoring from the backup and it puts this information into the # pg_control. Effectively it makes it impossible to start postgres in recovery # with such PGDATA because the recovery never-ever-ever-ever finishes. # # As a workaround we will remove the backup_label file from PGDATA if we know # for sure that the Postgres was already running with exactly this PGDATA. # As proof that the postgres was running we will use the presence of postmaster.pid # in the PGDATA, because it is 100% known that neither pg_basebackup nor # wal-e/wal-g are backing up/restoring this file. # # We are not doing such trick in the Patroni (removing backup_label) because # we have absolutely no idea what software people use for backup/recovery. # In case of some home-grown solution they might end up in copying postmaster.pid... postmaster_pid = os.path.join(pgdata, 'postmaster.pid') backup_label = os.path.join(pgdata, 'backup_label') if os.path.isfile(postmaster_pid) and os.path.isfile(backup_label): os.unlink(backup_label) elif section == 'pgqd': link_runit_service(placeholders, 'pgqd') elif section == 'log': if bool(placeholders.get('LOG_S3_BUCKET')): write_log_environment(placeholders) elif section == 'wal-e': if placeholders['USE_WALE']: write_wale_environment(placeholders, '', args['force']) elif section == 'certificate': write_certificates(placeholders, args['force']) elif section == 'crontab': write_crontab(placeholders, args['force']) elif section == 'pam-oauth2': write_pam_oauth2_configuration(placeholders, args['force']) elif section == 'pgbouncer': write_pgbouncer_configuration(placeholders, args['force']) elif section == 'bootstrap': if placeholders['CLONE_WITH_WALE']: update_and_write_wale_configuration(placeholders, 'CLONE_', args['force']) if placeholders['CLONE_WITH_BASEBACKUP']: write_clone_pgpass(placeholders, args['force']) elif section == 'standby-cluster': if placeholders['STANDBY_WITH_WALE']: update_and_write_wale_configuration(placeholders, 'STANDBY_', args['force']) else: raise Exception('Unknown section: {}'.format(section)) # We will abuse non zero exit code as an indicator for the launch.sh that it should not even try to create a backup sys.exit(int(not placeholders['USE_WALE']))