async def determine_kept_backups(self, *, retention, backups): if retention.minimum_backups is not None and retention.minimum_backups >= len(backups): return backups now = utils.now() manifests = sorted(await self._download_backup_manifests(backups), key=lambda manifest: manifest.start, reverse=True) while manifests: if retention.maximum_backups is not None: if retention.maximum_backups < len(manifests): manifests.pop() continue # Ok, so now we have at most <maximum_backups> (if set) backups # Do we have too _few_ backups to delete any more? if retention.minimum_backups is not None: if retention.minimum_backups >= len(manifests): break if retention.keep_days is not None: manifest = manifests[-1] if (now - manifest.end).days > retention.keep_days: manifests.pop() continue # We don't have any other criteria to filter the backup manifests with break return set(manifest.filename for manifest in manifests)
def _ssresults(*kwarg_list): return [ ipc.SnapshotResult(progress=_progress_done, hostname="host-{i}", start=utils.now(), **kw) for i, kw in enumerate(kwarg_list, 1) ]
def test_node_to_backup_index(node_azlist, backup_azlist, expected_index, exception): nodes = [CoordinatorNode(url="unused", az=az) for az in node_azlist] manifest = ipc.BackupManifest( start=utils.now(), attempt=1, snapshot_results=[ipc.SnapshotResult(az=az) for az in backup_azlist], upload_results=[], plugin="files") op = DummyRestoreOp(nodes, manifest) with exception: op.assert_node_to_backup_index_is(expected_index)
def snapshot(self): # 'snapshotter' is global; ensure we have sole access to it with self.snapshotter.lock: self.check_op_id() self.snapshotter.snapshot(progress=self.result.progress) self.result.state = self.snapshotter.get_snapshot_state() self.result.hashes = [ ipc.SnapshotHash(hexdigest=ssfile.hexdigest, size=ssfile.file_size) for ssfile in self.result.state.files if ssfile.hexdigest ] self.result.files = len(self.result.state.files) self.result.total_size = sum(ssfile.file_size for ssfile in self.result.state.files) self.result.end = utils.now() self.result.progress.done()
def test_partial_node_to_backup_index(partial_node_spec, expected_index, exception): num_nodes = 3 nodes = [CoordinatorNode(url=f"url{i}") for i in range(num_nodes)] manifest = ipc.BackupManifest(start=utils.now(), attempt=1, snapshot_results=[ ipc.SnapshotResult(hostname=f"host{i}") for i in range(num_nodes) ], upload_results=[], plugin="files") op = DummyRestoreOp(nodes, manifest) with exception: op.req.partial_restore_nodes = [ ipc.PartialRestoreRequestNode.parse_obj(partial_node_spec) ] op.assert_node_to_backup_index_is(expected_index)
async def run_attempts(self, attempts): name = self.__class__.__name__ try: for attempt in range(1, attempts + 1): logger.debug("%s - attempt #%d/%d", name, attempt, attempts) self.attempt = attempt self.attempt_start = utils.now() async with self.stats.async_timing_manager( "astacus_attempt_duration", { "op": name, "attempt": str(attempt) }): try: if await self.try_run(): return except exceptions.TransientException as ex: logger.info("%s - trasient failure: %r", name, ex) except exceptions.PermanentException as ex: logger.info("%s - permanent failure: %r", name, ex) self.set_status_fail()