def get_or_create_site_storage(self, site): storage = self.site_transfers.get(site) if not storage: storage_config = get_object_storage_config(self.config, site) storage = get_transfer(storage_config) self.site_transfers[site] = storage return storage
def _test_create_basebackup(self, capsys, db, pghoard, mode, replica=False, active_backup_mode='archive_command'): pghoard.create_backup_site_paths(pghoard.test_site) basebackup_path = os.path.join(pghoard.config["backup_location"], pghoard.test_site, "basebackup") q = Queue() pghoard.config["backup_sites"][ pghoard.test_site]["basebackup_mode"] = mode pghoard.config["backup_sites"][ pghoard.test_site]["active_backup_mode"] = active_backup_mode pghoard.create_basebackup(pghoard.test_site, db.user, basebackup_path, q) result = q.get(timeout=60) assert result["success"] # make sure it shows on the list Restore().run([ "list-basebackups", "--config", pghoard.config_path, "--site", pghoard.test_site, "--verbose", ]) out, _ = capsys.readouterr() assert pghoard.test_site in out assert "pg-version" in out assert "start-wal-segment" in out if mode == "local-tar": assert "end-time" in out if replica is False: assert "end-wal-segment" in out storage_config = common.get_object_storage_config( pghoard.config, pghoard.test_site) storage = get_transfer(storage_config) backups = storage.list_path( os.path.join( pghoard.config["backup_sites"][pghoard.test_site]["prefix"], "basebackup")) for backup in backups: assert "start-wal-segment" in backup["metadata"] assert "start-time" in backup["metadata"] assert dateutil.parser.parse( backup["metadata"]["start-time"]).tzinfo # pylint: disable=no-member if mode == "local-tar": if replica is False: assert "end-wal-segment" in backup["metadata"] assert "end-time" in backup["metadata"] assert dateutil.parser.parse( backup["metadata"]["end-time"]).tzinfo # pylint: disable=no-member
def _test_restore_basebackup(self, db, pghoard, tmpdir, active_backup_mode="archive_command"): backup_out = tmpdir.join("test-restore").strpath # Restoring to empty directory works os.makedirs(backup_out) Restore().run([ "get-basebackup", "--config", pghoard.config_path, "--site", pghoard.test_site, "--target-dir", backup_out, ]) # Restoring on top of another $PGDATA doesn't with pytest.raises(RestoreError) as excinfo: Restore().run([ "get-basebackup", "--config", pghoard.config_path, "--site", pghoard.test_site, "--target-dir", backup_out, ]) assert "--overwrite not specified" in str(excinfo.value) # Until we use the --overwrite flag Restore().run([ "get-basebackup", "--config", pghoard.config_path, "--site", pghoard.test_site, "--target-dir", backup_out, "--overwrite", ]) check_call([os.path.join(db.pgbin, "pg_controldata"), backup_out]) # TODO: check that the backup is valid # there should only be a single backup so lets compare what was in the metadata with what # was in the backup label storage_config = common.get_object_storage_config(pghoard.config, pghoard.test_site) storage = get_transfer(storage_config) backups = storage.list_path(os.path.join(pghoard.config["backup_sites"][pghoard.test_site]["prefix"], "basebackup")) # lets grab the backup label details for what we restored pgb = PGBaseBackup(config=None, site="foosite", connection_info=None, basebackup_path=None, compression_queue=None, transfer_queue=None, metrics=metrics.Metrics(statsd={})) path = os.path.join(backup_out, "backup_label") with open(path, "r") as myfile: data = myfile.read() start_wal_segment, start_time = pgb.parse_backup_label(data) assert start_wal_segment == backups[0]['metadata']['start-wal-segment'] assert start_time == backups[0]['metadata']['start-time'] # for a standalone hot backup, the start wal file will be in the pg_xlog / pg_wal directory wal_dir = "pg_xlog" if float(db.pgver) >= float("10.0"): wal_dir = "pg_wal" path = os.path.join(backup_out, wal_dir, backups[0]['metadata']['start-wal-segment']) if active_backup_mode == "standalone_hot_backup": assert os.path.isfile(path) is True else: assert os.path.isfile(path) is False
def set_config(self, config_file, site): self.config = config.read_json_config_file(config_file, check_commands=False) self.site = config.get_site_from_config(self.config, site) self.backup_site = self.config["backup_sites"][self.site] storage_config = common.get_object_storage_config( self.config, self.site) self.storage = get_transfer(storage_config)
def get_object_storage(self, site_name): storage = self.site_transfers.get(site_name) if not storage: storage_type, storage_config = get_object_storage_config(self.config, site_name) storage = get_transfer(storage_type, storage_config) self.site_transfers[site_name] = storage return storage
def test_storage_config(tmpdir): config = { "backup_location": None, } assert get_object_storage_config(config, "default") is None site_config = config.setdefault("backup_sites", {}).setdefault("default", {}) assert get_object_storage_config(config, "default") is None config["backup_location"] = tmpdir.strpath local_type_conf = {"directory": tmpdir.strpath, "storage_type": "local"} assert get_object_storage_config(config, "default") == local_type_conf site_config["object_storage"] = {} with pytest.raises(errors.InvalidConfigurationError) as excinfo: get_object_storage_config(config, "default") assert "storage_type not defined in site 'default'" in str(excinfo.value) site_config["object_storage"] = {"storage_type": "foo", "other": "bar"} foo_type_conf = get_object_storage_config(config, "default") assert foo_type_conf == {"storage_type": "foo", "other": "bar"} with pytest.raises(errors.InvalidConfigurationError) as excinfo: get_transfer(foo_type_conf) assert "unsupported storage type 'foo'" in str(excinfo.value)
def _test_create_basebackup(self, capsys, db, pghoard, mode, replica=False, active_backup_mode="archive_command"): pghoard.create_backup_site_paths(pghoard.test_site) basebackup_path = os.path.join(pghoard.config["backup_location"], pghoard.test_site, "basebackup") q = Queue() pghoard.config["backup_sites"][pghoard.test_site]["basebackup_mode"] = mode pghoard.config["backup_sites"][pghoard.test_site]["active_backup_mode"] = active_backup_mode now = datetime.datetime.now(datetime.timezone.utc) metadata = { "backup-reason": "scheduled", "backup-decision-time": now.isoformat(), "normalized-backup-time": now.isoformat(), } pghoard.create_basebackup(pghoard.test_site, db.user, basebackup_path, q, metadata) result = q.get(timeout=60) assert result["success"] # make sure it shows on the list Restore().run([ "list-basebackups", "--config", pghoard.config_path, "--site", pghoard.test_site, "--verbose", ]) out, _ = capsys.readouterr() assert pghoard.test_site in out assert "pg-version" in out assert "start-wal-segment" in out if mode in {BaseBackupMode.local_tar, BaseBackupMode.delta}: assert "end-time" in out if replica is False: assert "end-wal-segment" in out storage_config = common.get_object_storage_config(pghoard.config, pghoard.test_site) storage = get_transfer(storage_config) backups = storage.list_path(os.path.join(pghoard.config["backup_sites"][pghoard.test_site]["prefix"], "basebackup")) for backup in backups: assert "start-wal-segment" in backup["metadata"] assert "start-time" in backup["metadata"] assert dateutil.parser.parse(backup["metadata"]["start-time"]).tzinfo # pylint: disable=no-member assert backup["metadata"]["backup-reason"] == "scheduled" assert backup["metadata"]["backup-decision-time"] == now.isoformat() assert backup["metadata"]["normalized-backup-time"] == now.isoformat() if mode in {BaseBackupMode.local_tar, BaseBackupMode.delta}: if replica is False: assert "end-wal-segment" in backup["metadata"] assert "end-time" in backup["metadata"] assert dateutil.parser.parse(backup["metadata"]["end-time"]).tzinfo # pylint: disable=no-member
def get_remote_basebackups_info(self, site): storage = self.site_transfers.get(site) if not storage: storage_config = get_object_storage_config(self.config, site) storage = get_transfer(storage_config) self.site_transfers[site] = storage results = storage.list_path(os.path.join(self.config["path_prefix"], site, "basebackup")) for entry in results: # drop path from resulting list and convert timestamps entry["name"] = os.path.basename(entry["name"]) entry["metadata"]["start-time"] = dateutil.parser.parse(entry["metadata"]["start-time"]) results.sort(key=lambda entry: entry["metadata"]["start-time"]) return results
def get_remote_basebackups_info(self, site): storage = self.site_transfers.get(site) if not storage: storage_config = get_object_storage_config(self.config, site) storage = get_transfer(storage_config) self.site_transfers[site] = storage results = storage.list_path(os.path.join(self.config["backup_sites"][site]["prefix"], "basebackup")) for entry in results: # drop path from resulting list and convert timestamps entry["name"] = os.path.basename(entry["name"]) entry["metadata"]["start-time"] = dates.parse_timestamp(entry["metadata"]["start-time"]) results.sort(key=lambda entry: entry["metadata"]["start-time"]) return results
def get_remote_basebackups_info(self, site): storage = self.site_transfers.get(site) if not storage: storage_config = get_object_storage_config(self.config, site) storage = get_transfer(storage_config) self.site_transfers[site] = storage site_config = self.config["backup_sites"][site] results = storage.list_path( os.path.join(site_config["prefix"], "basebackup")) for entry in results: self.patch_basebackup_info(entry=entry, site_config=site_config) results.sort(key=lambda entry: entry["metadata"]["start-time"]) return results
def _remote_file_fetch_loop(app_config, task_queue, result_queue): transfers = {} while True: task = task_queue.get() if not task: return try: site, key, target_path = task transfer = transfers.get(site) if not transfer: transfer = get_transfer(get_object_storage_config(app_config, site)) transfers[site] = transfer file_size, metadata = FileFetcher(app_config, transfer).fetch(site, key, target_path) result_queue.put((task, file_size, metadata)) except Exception as e: # pylint: disable=broad-except result_queue.put((task, e))
def _test_create_basebackup(self, capsys, db, pghoard, mode, replica=False, active_backup_mode='archive_command'): pghoard.create_backup_site_paths(pghoard.test_site) basebackup_path = os.path.join(pghoard.config["backup_location"], pghoard.test_site, "basebackup") q = Queue() pghoard.config["backup_sites"][pghoard.test_site]["basebackup_mode"] = mode pghoard.config["backup_sites"][pghoard.test_site]["active_backup_mode"] = active_backup_mode pghoard.create_basebackup(pghoard.test_site, db.user, basebackup_path, q) result = q.get(timeout=60) assert result["success"] # make sure it shows on the list Restore().run([ "list-basebackups", "--config", pghoard.config_path, "--site", pghoard.test_site, "--verbose", ]) out, _ = capsys.readouterr() assert pghoard.test_site in out assert "pg-version" in out assert "start-wal-segment" in out if mode == "local-tar": assert "end-time" in out if replica is False: assert "end-wal-segment" in out storage_config = common.get_object_storage_config(pghoard.config, pghoard.test_site) storage = get_transfer(storage_config) backups = storage.list_path(os.path.join(pghoard.config["backup_sites"][pghoard.test_site]["prefix"], "basebackup")) for backup in backups: assert "start-wal-segment" in backup["metadata"] assert "start-time" in backup["metadata"] assert dateutil.parser.parse(backup["metadata"]["start-time"]).tzinfo # pylint: disable=no-member if mode == "local-tar": if replica is False: assert "end-wal-segment" in backup["metadata"] assert "end-time" in backup["metadata"] assert dateutil.parser.parse(backup["metadata"]["end-time"]).tzinfo # pylint: disable=no-member
def test_storage_config(tmpdir): config = {} assert get_object_storage_config(config, "default") is None site_config = config.setdefault("backup_sites", {}).setdefault("default", {}) assert get_object_storage_config(config, "default") is None config["backup_location"] = tmpdir.strpath local_type_conf = {"directory": tmpdir.strpath, "storage_type": "local"} assert get_object_storage_config(config, "default") == local_type_conf site_config["object_storage"] = {} with pytest.raises(errors.InvalidConfigurationError) as excinfo: get_object_storage_config(config, "default") assert "storage_type not defined in site 'default'" in str(excinfo.value) site_config["object_storage"] = {"storage_type": "foo", "other": "bar"} foo_type_conf = get_object_storage_config(config, "default") assert foo_type_conf == {"storage_type": "foo", "other": "bar"} with pytest.raises(errors.InvalidConfigurationError) as excinfo: get_transfer(foo_type_conf) assert "unsupported storage type 'foo'" in str(excinfo.value)
def _get_object_storage(self, site, pgdata): storage_config = get_object_storage_config(self.config, site) storage = get_transfer(storage_config) return ObjectStore(storage, self.config["path_prefix"], site, pgdata)
def _test_restore_basebackup(self, db, pghoard, tmpdir, active_backup_mode="archive_command"): backup_out = tmpdir.join("test-restore").strpath # Restoring to empty directory works os.makedirs(backup_out) Restore().run([ "get-basebackup", "--config", pghoard.config_path, "--site", pghoard.test_site, "--target-dir", backup_out, ]) # Restoring on top of another $PGDATA doesn't with pytest.raises(RestoreError) as excinfo: Restore().run([ "get-basebackup", "--config", pghoard.config_path, "--site", pghoard.test_site, "--target-dir", backup_out, ]) assert "--overwrite not specified" in str(excinfo.value) # Until we use the --overwrite flag Restore().run([ "get-basebackup", "--config", pghoard.config_path, "--site", pghoard.test_site, "--target-dir", backup_out, "--overwrite", ]) check_call([os.path.join(db.pgbin, "pg_controldata"), backup_out]) # TODO: check that the backup is valid # there should only be a single backup so lets compare what was in the metadata with what # was in the backup label storage_config = common.get_object_storage_config( pghoard.config, pghoard.test_site) storage = get_transfer(storage_config) backups = storage.list_path( os.path.join( pghoard.config["backup_sites"][pghoard.test_site]["prefix"], "basebackup")) # lets grab the backup label details for what we restored pgb = PGBaseBackup(config=None, site="foosite", connection_info=None, basebackup_path=None, compression_queue=None, transfer_queue=None, metrics=metrics.Metrics(statsd={})) path = os.path.join(backup_out, "backup_label") with open(path, "r") as myfile: data = myfile.read() start_wal_segment, start_time = pgb.parse_backup_label(data) assert start_wal_segment == backups[0]['metadata']['start-wal-segment'] assert start_time == backups[0]['metadata']['start-time'] # for a standalone hot backup, the start wal file will be in the pg_xlog / pg_wal directory wal_dir = "pg_xlog" if float(db.pgver) >= float("10.0"): wal_dir = "pg_wal" path = os.path.join(backup_out, wal_dir, backups[0]['metadata']['start-wal-segment']) if active_backup_mode == "standalone_hot_backup": assert os.path.isfile(path) is True else: assert os.path.isfile(path) is False