def main(): from pg_upgrade import PostgresqlUpgrade from patroni.config import Config from patroni.utils import polling_loop from spilo_commons import get_binary_version config = Config(sys.argv[1]) upgrade = PostgresqlUpgrade(config) bin_version = get_binary_version(upgrade.pgcommand('')) cluster_version = upgrade.get_cluster_version() if cluster_version == bin_version: return 0 logger.info('Cluster version: %s, bin version: %s', cluster_version, bin_version) assert float(cluster_version) < float(bin_version) logger.info('Trying to start the cluster with old postgres') if not upgrade.start_old_cluster(config['bootstrap'], cluster_version): raise Exception('Failed to start the cluster with old postgres') for _ in polling_loop(upgrade.config.get('pg_ctl_timeout'), 10): upgrade.reset_cluster_info_state() if upgrade.is_leader(): break logger.info('waiting for end of recovery of the old cluster') if not upgrade.bootstrap.call_post_bootstrap(config['bootstrap']): upgrade.stop(block_callbacks=True, checkpoint=False) raise Exception('Failed to run bootstrap.post_init') if not upgrade.prepare_new_pgdata(bin_version): raise Exception('initdb failed') try: upgrade.drop_possibly_incompatible_objects() except Exception: upgrade.stop(block_callbacks=True, checkpoint=False) raise logger.info('Doing a clean shutdown of the cluster before pg_upgrade') if not upgrade.stop(block_callbacks=True, checkpoint=False): raise Exception('Failed to stop the cluster with old postgres') if not upgrade.do_upgrade(): raise Exception('Failed to upgrade cluster from {0} to {1}'.format( cluster_version, bin_version)) logger.info('Starting the cluster with new postgres after upgrade') if not upgrade.start(): raise Exception('Failed to start the cluster with new postgres') try: upgrade.update_extensions() except Exception as e: logger.error('Failed to update extensions: %r', e) upgrade.analyze()
def get_possible_versions(): from spilo_commons import LIB_DIR, get_binary_version, get_bin_dir, get_patroni_config config = get_patroni_config() max_version = float( get_binary_version(config.get('postgresql', {}).get('bin_dir'))) versions = {} for d in os.listdir(LIB_DIR): try: ver = get_binary_version(get_bin_dir(d)) fver = float(ver) if fver <= max_version: versions[fver] = ver except Exception: pass # return possible versions in reversed order, i.e. 12, 11, 10, 9.6, and so on return [ver for _, ver in sorted(versions.items(), reverse=True)]
def main(): from pg_upgrade import PostgresqlUpgrade from patroni.config import Config from spilo_commons import get_binary_version config = Config(sys.argv[1]) upgrade = PostgresqlUpgrade(config) bin_version = get_binary_version(upgrade.pgcommand('')) cluster_version = upgrade.get_cluster_version() logger.info('Cluster version: %s, bin version: %s', cluster_version, bin_version) assert float(cluster_version) <= float(bin_version) perform_pitr(upgrade, cluster_version, bin_version, config['bootstrap']) if cluster_version == bin_version: return 0 if not upgrade.bootstrap.call_post_bootstrap(config['bootstrap']): upgrade.stop(block_callbacks=True, checkpoint=False) raise Exception('Failed to run bootstrap.post_init') if not upgrade.prepare_new_pgdata(bin_version): raise Exception('initdb failed') try: upgrade.drop_possibly_incompatible_objects() except Exception: upgrade.stop(block_callbacks=True, checkpoint=False) raise logger.info('Doing a clean shutdown of the cluster before pg_upgrade') if not upgrade.stop(block_callbacks=True, checkpoint=False): raise Exception('Failed to stop the cluster with old postgres') if not upgrade.do_upgrade(): raise Exception('Failed to upgrade cluster from {0} to {1}'.format( cluster_version, bin_version)) logger.info('Starting the cluster with new postgres after upgrade') if not upgrade.start(): raise Exception('Failed to start the cluster with new postgres') try: upgrade.update_extensions() except Exception as e: logger.error('Failed to update extensions: %r', e) upgrade.analyze()
def get_desired_version(): from spilo_commons import get_bin_dir, get_binary_version try: spilo_configuration = yaml.safe_load( os.environ.get('SPILO_CONFIGURATION', '')) bin_dir = spilo_configuration.get('postgresql', {}).get('bin_dir') except Exception: bin_dir = None if not bin_dir and os.environ.get('PGVERSION'): bin_dir = get_bin_dir(os.environ['PGVERSION']) return get_binary_version(bin_dir)
def main(): debug = os.environ.get('DEBUG', '') in ['1', 'true', 'TRUE', 'on', 'ON'] args = parse_args() logging.basicConfig( format='%(asctime)s - bootstrapping - %(levelname)s - %(message)s', level=('DEBUG' if debug else (args.get('loglevel') or 'INFO').upper())) provider = get_provider() placeholders = get_placeholders(provider) logging.info('Looks like your running %s', provider) config = yaml.load(pystache_render(TEMPLATE, placeholders)) config.update(get_dcs_config(config, placeholders)) user_config = yaml.load( os.environ.get('SPILO_CONFIGURATION', os.environ.get('PATRONI_CONFIGURATION', ''))) or {} if not isinstance(user_config, dict): config_var_name = 'SPILO_CONFIGURATION' if 'SPILO_CONFIGURATION' in os.environ else 'PATRONI_CONFIGURATION' raise ValueError('{0} should contain a dict, yet it is a {1}'.format( config_var_name, type(user_config))) user_config_copy = deepcopy(user_config) config = deep_update(user_config_copy, config) if provider == PROVIDER_LOCAL and not any( 1 for key in config.keys() if key == 'kubernetes' or key in PATRONI_DCS): link_runit_service(placeholders, 'etcd') config['etcd'] = {'host': '127.0.0.1:2379'} pgdata = config['postgresql']['data_dir'] version_file = os.path.join(pgdata, 'PG_VERSION') # if PG_VERSION file exists stick to it and build respective bin_dir if os.path.exists(version_file): with open(version_file) as f: version = f.read().strip() if is_valid_pg_version(version): config['postgresql']['bin_dir'] = get_bin_dir(version) # try to build bin_dir from PGVERSION if bin_dir is not set in SPILO_CONFIGURATION and PGDATA is empty if not config['postgresql'].get('bin_dir'): version = os.environ.get('PGVERSION', '') if not is_valid_pg_version(version): version = get_binary_version('') config['postgresql']['bin_dir'] = get_bin_dir(version) placeholders['PGVERSION'] = get_binary_version( config['postgresql'].get('bin_dir')) version = float(placeholders['PGVERSION']) if 'shared_preload_libraries' not in user_config.get('postgresql', {}).get( 'parameters', {}): config['postgresql']['parameters']['shared_preload_libraries'] =\ append_extentions(config['postgresql']['parameters']['shared_preload_libraries'], version) if 'extwlist.extensions' not in user_config.get('postgresql', {}).get('parameters', {}): config['postgresql']['parameters']['extwlist.extensions'] =\ append_extentions(config['postgresql']['parameters']['extwlist.extensions'], version, True) # Ensure replication is available if 'pg_hba' in config['bootstrap'] and not any( ['replication' in i for i in config['bootstrap']['pg_hba']]): rep_hba = 'hostssl replication {} all md5'.\ format(config['postgresql']['authentication']['replication']['username']) config['bootstrap']['pg_hba'].insert(0, rep_hba) for section in args['sections']: logging.info('Configuring %s', section) if section == 'patroni': write_patroni_config(config, args['force']) adjust_owner(placeholders, PATRONI_CONFIG_FILE, gid=-1) link_runit_service(placeholders, 'patroni') pg_socket_dir = '/run/postgresql' if not os.path.exists(pg_socket_dir): os.makedirs(pg_socket_dir) os.chmod(pg_socket_dir, 0o2775) adjust_owner(placeholders, pg_socket_dir) # It is a recurring and very annoying problem with crashes (host/pod/container) # while the backup is taken in the exclusive mode which leaves the backup_label # in the PGDATA. Having the backup_label file in the PGDATA makes postgres think # that we are restoring from the backup and it puts this information into the # pg_control. Effectively it makes it impossible to start postgres in recovery # with such PGDATA because the recovery never-ever-ever-ever finishes. # # As a workaround we will remove the backup_label file from PGDATA if we know # for sure that the Postgres was already running with exactly this PGDATA. # As proof that the postgres was running we will use the presence of postmaster.pid # in the PGDATA, because it is 100% known that neither pg_basebackup nor # wal-e/wal-g are backing up/restoring this file. # # We are not doing such trick in the Patroni (removing backup_label) because # we have absolutely no idea what software people use for backup/recovery. # In case of some home-grown solution they might end up in copying postmaster.pid... postmaster_pid = os.path.join(pgdata, 'postmaster.pid') backup_label = os.path.join(pgdata, 'backup_label') if os.path.isfile(postmaster_pid) and os.path.isfile(backup_label): os.unlink(backup_label) elif section == 'pgqd': link_runit_service(placeholders, 'pgqd') elif section == 'log': if bool(placeholders.get('LOG_S3_BUCKET')): write_log_environment(placeholders) elif section == 'wal-e': if placeholders['USE_WALE']: write_wale_environment(placeholders, '', args['force']) elif section == 'certificate': write_certificates(placeholders, args['force']) elif section == 'crontab': write_crontab(placeholders, args['force']) elif section == 'pam-oauth2': write_pam_oauth2_configuration(placeholders, args['force']) elif section == 'pgbouncer': write_pgbouncer_configuration(placeholders, args['force']) elif section == 'bootstrap': if placeholders['CLONE_WITH_WALE']: update_and_write_wale_configuration(placeholders, 'CLONE_', args['force']) if placeholders['CLONE_WITH_BASEBACKUP']: write_clone_pgpass(placeholders, args['force']) elif section == 'standby-cluster': if placeholders['STANDBY_WITH_WALE']: update_and_write_wale_configuration(placeholders, 'STANDBY_', args['force']) else: raise Exception('Unknown section: {}'.format(section)) # We will abuse non zero exit code as an indicator for the launch.sh that it should not even try to create a backup sys.exit(int(not placeholders['USE_WALE']))