def fixate_removal_date(self, datasets, task): property_name = self.middleware.call_sync("pool.snapshottask.removal_date_property") zettarepl_task = PeriodicSnapshotTask.from_data(None, self.middleware.call_sync( "zettarepl.periodic_snapshot_task_definition", task, )) for dataset, snapshots in datasets.items(): for snapshot in snapshots: try: parsed_snapshot_name = parse_snapshot_name(snapshot, task["naming_schema"]) except ValueError as e: self.middleware.logger.error("Unexpected error parsing snapshot name %r with naming schema %r: %r", snapshot, task["naming_schema"], e) else: destroy_at = parsed_snapshot_name.datetime + zettarepl_task.lifetime k1 = dataset.split("/")[0] k2 = f"{dataset}@{snapshot}" existing_destroy_at = self.removal_dates.get(k1, {}).get(k2) if existing_destroy_at is not None and existing_destroy_at >= destroy_at: continue try: subprocess.run( ["zfs", "set", f"{property_name}={destroy_at.isoformat()}", f"{dataset}@{snapshot}"], check=True, capture_output=True, encoding="utf-8", errors="ignore", ) except subprocess.CalledProcessError as e: self.middleware.logger.warning("Error setting snapshot %s@%s removal date: %r", dataset, snapshot, e.stderr) else: self.removal_dates[k1][k2] = destroy_at
def periodic_snapshot_task_snapshots(self, task): snapshots = list_snapshots(LocalShell(), task["dataset"], task["recursive"]) zettarepl_task = PeriodicSnapshotTask.from_data(None, self.middleware.call_sync( "zettarepl.periodic_snapshot_task_definition", task, )) snapshot_owner = PeriodicSnapshotTaskSnapshotOwner(datetime.utcnow(), zettarepl_task) task_snapshots = set() for snapshot in snapshots: if snapshot_owner.owns_dataset(snapshot.dataset): try: parsed_snapshot_name = parse_snapshot_name(snapshot.name, task["naming_schema"]) except ValueError: pass else: if snapshot_owner.owns_snapshot(snapshot.dataset, parsed_snapshot_name): task_snapshots.add(str(snapshot)) return task_snapshots
def _run_periodic_snapshot_tasks(self, now, tasks): scheduled_tasks = [] for task in tasks: snapshot_name = get_snapshot_name(now, task.naming_schema) try: parsed_snapshot_name = parse_snapshot_name( snapshot_name, task.naming_schema) except ValueError as e: logger.warning( "Unable to parse snapshot name %r with naming schema %r: %s. Skipping task %r", snapshot_name, task.naming_schema, str(e), task, ) notify( self.observer, PeriodicSnapshotTaskError( task.id, "Unable to parse snapshot name %r: %s" % ( snapshot_name, str(e), ))) continue scheduled_tasks.append( ScheduledPeriodicSnapshotTask( task, snapshot_name, parsed_snapshot_name, )) scheduled_tasks = sorted( scheduled_tasks, key=lambda scheduled_task: ( # Common sorting order parsed_snapshot_sort_key(scheduled_task.parsed_snapshot_name), # Recursive snapshot with same name as non-recursive should go first 0 if scheduled_task.task.recursive else 1, # Recursive snapshots without exclude should go first 0 if not scheduled_task.task.exclude else 1, )) tasks_with_snapshot_names = [(scheduled_task.task, scheduled_task.snapshot_name) for scheduled_task in scheduled_tasks] created_snapshots = set() for task, snapshot_name in tasks_with_snapshot_names: snapshot = Snapshot(task.dataset, snapshot_name) if snapshot in created_snapshots: notify(self.observer, PeriodicSnapshotTaskSuccess(task.id)) continue options = notify(self.observer, PeriodicSnapshotTaskStart(task.id)) try: create_snapshot(self.local_shell, snapshot, task.recursive, task.exclude, options.properties) except CreateSnapshotError as e: logger.warning("Error creating %r: %r", snapshot, e) notify(self.observer, PeriodicSnapshotTaskError(task.id, str(e))) else: logger.info("Created %r", snapshot) created_snapshots.add(snapshot) notify(self.observer, PeriodicSnapshotTaskSuccess(task.id)) empty_snapshots = get_empty_snapshots_for_deletion( self.local_shell, tasks_with_snapshot_names) if empty_snapshots: logger.info("Destroying empty snapshots: %r", empty_snapshots) destroy_snapshots(self.local_shell, empty_snapshots)
def annotate_snapshots(self, snapshots): property_name = self.middleware.call_sync("pool.snapshottask.removal_date_property") zettarepl_tasks = [ PeriodicSnapshotTask.from_data(task["id"], self.middleware.call_sync( "zettarepl.periodic_snapshot_task_definition", task, )) for task in self.middleware.call_sync("pool.snapshottask.query", [["enabled", "=", True]]) ] snapshot_owners = [ PeriodicSnapshotTaskSnapshotOwner(datetime.utcnow(), zettarepl_task) for zettarepl_task in zettarepl_tasks ] for snapshot in snapshots: task_destroy_at = None task_destroy_at_id = None for snapshot_owner in snapshot_owners: if snapshot_owner.owns_dataset(snapshot["dataset"]): try: parsed_snapshot_name = parse_snapshot_name( snapshot["snapshot_name"], snapshot_owner.periodic_snapshot_task.naming_schema ) except ValueError: pass else: if snapshot_owner.owns_snapshot(snapshot["dataset"], parsed_snapshot_name): destroy_at = parsed_snapshot_name.datetime + snapshot_owner.periodic_snapshot_task.lifetime if task_destroy_at is None or task_destroy_at < destroy_at: task_destroy_at = destroy_at task_destroy_at_id = snapshot_owner.periodic_snapshot_task.id property_destroy_at = None if property_name in snapshot["properties"]: try: property_destroy_at = isodate.parse_datetime(snapshot["properties"][property_name]["value"]) except Exception as e: self.middleware.logger.warning("Error parsing snapshot %r %s: %r", snapshot["name"], property_name, e) if task_destroy_at is not None and property_destroy_at is not None: if task_destroy_at < property_destroy_at: task_destroy_at = None else: property_destroy_at = None if task_destroy_at is not None: snapshot["retention"] = { "datetime": task_destroy_at, "source": "periodic_snapshot_task", "periodic_snapshot_task_id": task_destroy_at_id, } elif property_destroy_at is not None: snapshot["retention"] = { "datetime": property_destroy_at, "source": "property", } else: snapshot["retention"] = None return snapshots