def install_administrative_scripts(): scripts_dir = helpers.scripts_dir() logs_dir = helpers.logs_dir() helpers.makedirs(scripts_dir, mode=0o755) # The database backup script. Most of this is redundant now. source = os.path.join(hookenv.charm_dir(), 'scripts', 'pgbackup.py') destination = os.path.join(scripts_dir, 'dump-pg-db') with open(source, 'r') as f: helpers.write(destination, f.read(), mode=0o755) backups_dir = helpers.backups_dir() helpers.makedirs(backups_dir, mode=0o750, user='******', group='postgres') # Generate a wrapper that invokes the backup script for each # database. data = dict(logs_dir=logs_dir, scripts_dir=scripts_dir, # backups_dir probably should be deprecated in favour of # a juju storage mount. backups_dir=backups_dir) destination = os.path.join(helpers.scripts_dir(), 'pg_backup_job') templating.render('pg_backup_job.tmpl', destination, data, owner='root', group='postgres', perms=0o755) # Install the reaper scripts. script = 'pgkillidle.py' source = os.path.join(hookenv.charm_dir(), 'scripts', script) if (reactive.helpers.any_file_changed([source]) or not os.path.exists(source)): destination = os.path.join(scripts_dir, script) with open(source, 'r') as f: helpers.write(destination, f.read(), mode=0o755) if not os.path.exists(logs_dir): helpers.makedirs(logs_dir, mode=0o755, user='******', group='postgres') # Create the backups.log file used by the backup wrapper if it # does not exist, in order to trigger spurious alerts when a # unit is installed, per Bug #1329816. helpers.write(helpers.backups_log_path(), '', mode=0o644, user='******', group='postgres')
def install_administrative_scripts(): scripts_dir = helpers.scripts_dir() logs_dir = helpers.logs_dir() helpers.makedirs(scripts_dir, mode=0o755) # The database backup script. Most of this is redundant now. source = os.path.join(hookenv.charm_dir(), "scripts", "pgbackup.py") destination = os.path.join(scripts_dir, "dump-pg-db") with open(source, "r") as f: helpers.write(destination, f.read(), mode=0o755) backups_dir = helpers.backups_dir() helpers.makedirs(backups_dir, mode=0o750, user="******", group="postgres") # Generate a wrapper that invokes the backup script for each # database. data = dict( logs_dir=logs_dir, scripts_dir=scripts_dir, # backups_dir probably should be deprecated in favour of # a juju storage mount. backups_dir=backups_dir, ) destination = os.path.join(helpers.scripts_dir(), "pg_backup_job") templating.render( "pg_backup_job.tmpl", destination, data, owner="root", group="postgres", perms=0o755, ) # Install the reaper scripts. script = "pgkillidle.py" source = os.path.join(hookenv.charm_dir(), "scripts", script) destination = os.path.join(scripts_dir, script) if reactive.helpers.any_file_changed([source]) or not os.path.exists(destination): with open(source, "r") as f: helpers.write(destination, f.read(), mode=0o755) if not os.path.exists(logs_dir): helpers.makedirs(logs_dir, mode=0o755, user="******", group="postgres") # Create the backups.log file used by the backup wrapper if it # does not exist, in order to trigger spurious alerts when a # unit is installed, per Bug #1329816. helpers.write( helpers.backups_log_path(), "", mode=0o644, user="******", group="postgres", ) reactive.set_state("postgresql.cluster.support-scripts")
def update_postgresql_crontab(): config = hookenv.config() data = dict(config) data['scripts_dir'] = helpers.scripts_dir() data['is_master'] = replication.is_master() data['is_primary'] = postgresql.is_primary() if config['wal_e_storage_uri']: data['wal_e_enabled'] = True data['wal_e_backup_command'] = wal_e.wal_e_backup_command() data['wal_e_prune_command'] = wal_e.wal_e_prune_command() else: data['wal_e_enabled'] = False destination = os.path.join(helpers.cron_dir(), 'juju-postgresql') templating.render('postgres.cron.tmpl', destination, data, owner='root', group='postgres', perms=0o640)
def write_metrics_cronjob(): config = hookenv.config() path = os.path.join(helpers.cron_dir(), 'juju-postgresql-metrics') # Validated in preflight.block_on_invalid_config() metrics_target = config['metrics_target'].strip() metrics_sample_interval = config['metrics_sample_interval'] reactive.remove_state('postgresql.metrics.needs_update') if not metrics_target: if os.path.exists(path): hookenv.log('Turning off metrics cronjob') os.unlink(path) return charm_dir = hookenv.charm_dir() statsd_host, statsd_port = metrics_target.split(':', 1) metrics_prefix = config['metrics_prefix'].strip() metrics_prefix = metrics_prefix.replace( "$UNIT", hookenv.local_unit().replace('.', '-').replace('/', '-')) # ensure script installed charm_script = os.path.join(charm_dir, 'files', 'metrics', 'postgres_to_statsd.py') script_path = os.path.join(helpers.scripts_dir(), 'postgres_to_statsd.py') with open(charm_script, 'r') as f: helpers.write(script_path, f.read(), mode=0o755) # write the crontab data = dict(interval=config['metrics_sample_interval'], script_path=script_path, metrics_prefix=metrics_prefix, metrics_sample_interval=metrics_sample_interval, statsd_host=statsd_host, statsd_port=statsd_port) templating.render('metrics_cronjob.template', charm_script, data, perms=0o644)
def write_metrics_cronjob(): config = hookenv.config() path = os.path.join(helpers.cron_dir(), "juju-postgresql-metrics") # Validated in preflight.block_on_invalid_config() metrics_target = config["metrics_target"].strip() metrics_sample_interval = config["metrics_sample_interval"] reactive.remove_state("postgresql.metrics.needs_update") if not metrics_target: if os.path.exists(path): hookenv.log("Turning off metrics cronjob") os.unlink(path) return charm_dir = hookenv.charm_dir() statsd_host, statsd_port = metrics_target.split(":", 1) metrics_prefix = config["metrics_prefix"].strip() metrics_prefix = metrics_prefix.replace("$UNIT", hookenv.local_unit().replace(".", "-").replace("/", "-")) # ensure script installed charm_script = os.path.join(charm_dir, "files", "metrics", "postgres_to_statsd.py") script_path = os.path.join(helpers.scripts_dir(), "postgres_to_statsd.py") with open(charm_script, "r") as f: helpers.write(script_path, f.read(), mode=0o755) # write the crontab data = dict( interval=config["metrics_sample_interval"], script_path=script_path, metrics_prefix=metrics_prefix, metrics_sample_interval=metrics_sample_interval, statsd_host=statsd_host, statsd_port=statsd_port, ) templating.render("metrics_cronjob.template", charm_script, data, perms=0o644)
def update_postgresql_crontab(): config = hookenv.config() data = dict(config) data["scripts_dir"] = helpers.scripts_dir() data["is_master"] = replication.is_master() data["is_primary"] = postgresql.is_primary() if config["wal_e_storage_uri"]: data["wal_e_enabled"] = True data["wal_e_backup_command"] = wal_e.wal_e_backup_command() data["wal_e_prune_command"] = wal_e.wal_e_prune_command() else: data["wal_e_enabled"] = False destination = os.path.join(helpers.cron_dir(), "juju-postgresql") templating.render( "postgres.cron.tmpl", destination, data, owner="root", group="postgres", perms=0o640, )
def update_nrpe_config(): update_nagios_pgpass() nrpe = NRPE() user = nagios_username() port = postgresql.port() nrpe.add_check( shortname="pgsql", description="Check pgsql", check_cmd="check_pgsql -P {} -l {}".format(port, user), ) # copy the check script which will run cronned as postgres user with open("scripts/find_latest_ready_wal.py") as fh: check_script = fh.read() check_script_path = "{}/{}".format(helpers.scripts_dir(), "find_latest_ready_wal.py") helpers.write(check_script_path, check_script, mode=0o755) # create an (empty) file with appropriate permissions for the above check_output_path = "/var/lib/nagios/postgres-wal-max-age.txt" if not os.path.exists(check_output_path): helpers.write(check_output_path, b"0\n", mode=0o644, user="******", group="postgres") # retrieve the threshold values from the charm config config = hookenv.config() check_warn_threshold = config["wal_archive_warn_threshold"] or 0 check_crit_threshold = config["wal_archive_crit_threshold"] or 0 check_cron_path = "/etc/cron.d/postgres-wal-archive-check" if check_warn_threshold and check_crit_threshold: # create the cron job to run the above check_cron = "*/2 * * * * postgres {}".format(check_script_path) helpers.write(check_cron_path, check_cron, mode=0o644) # copy the nagios plugin which will check the cronned output with open("scripts/check_latest_ready_wal.py") as fh: check_script = fh.read() check_script_path = "{}/{}".format("/usr/local/lib/nagios/plugins", "check_latest_ready_wal.py") helpers.write(check_script_path, check_script, mode=0o755) # write the nagios check definition nrpe.add_check( shortname="pgsql_stale_wal", description="Check for stale WAL backups", check_cmd="{} {} {}".format(check_script_path, check_warn_threshold, check_crit_threshold), ) if reactive.is_state("postgresql.replication.is_master"): # TODO: These should be calculated from the backup schedule, # which is difficult since that is specified in crontab format. warn_age = 172800 crit_age = 194400 backups_log = helpers.backups_log_path() nrpe.add_check( shortname="pgsql_backups", description="Check pgsql backups", check_cmd=("check_file_age -w {} -c {} -f {}" "".format(warn_age, crit_age, backups_log)), ) else: # Standbys don't do backups. We still generate a check though, # to ensure alerts get through to monitoring after a failover. nrpe.add_check( shortname="pgsql_backups", description="Check pgsql backups", check_cmd=r"check_dummy 0 standby_does_not_backup", ) nrpe.write() reactive.remove_state("postgresql.nagios.needs_update")