def do_prune(self, args): """Prune repository archives according to specified rules""" repository = self.open_repository(args.repository, exclusive=True) manifest, key = Manifest.load(repository) cache = Cache(repository, key, manifest) archives = list( sorted(Archive.list_archives(repository, key, manifest, cache), key=attrgetter('ts'), reverse=True)) if args.hourly + args.daily + args.weekly + args.monthly + args.yearly == 0 and args.within is None: self.print_error( 'At least one of the "within", "hourly", "daily", "weekly", "monthly" or "yearly" ' 'settings must be specified') return 1 if args.prefix: archives = [ archive for archive in archives if archive.name.startswith(args.prefix) ] keep = [] if args.within: keep += prune_within(archives, args.within) if args.hourly: keep += prune_split(archives, '%Y-%m-%d %H', args.hourly, keep) if args.daily: keep += prune_split(archives, '%Y-%m-%d', args.daily, keep) if args.weekly: keep += prune_split(archives, '%G-%V', args.weekly, keep) if args.monthly: keep += prune_split(archives, '%Y-%m', args.monthly, keep) if args.yearly: keep += prune_split(archives, '%Y', args.yearly, keep) keep.sort(key=attrgetter('ts'), reverse=True) to_delete = [a for a in archives if a not in keep] stats = Statistics() for archive in keep: self.print_verbose('Keeping archive: %s' % format_archive(archive)) for archive in to_delete: if args.dry_run: self.print_verbose('Would prune: %s' % format_archive(archive)) else: self.print_verbose('Pruning archive: %s' % format_archive(archive)) archive.delete(stats) if to_delete and not args.dry_run: manifest.write() repository.commit() cache.commit() if args.stats: stats.print_('Deleted data:', cache) return self.exit_code
def do_delete(self, args): """Delete an existing archive""" repository = self.open_repository(args.archive, exclusive=True) manifest, key = Manifest.load(repository) cache = Cache(repository, key, manifest) archive = Archive(repository, key, manifest, args.archive.archive, cache=cache) stats = Statistics() archive.delete(stats) manifest.write() repository.commit() cache.commit() if args.stats: stats.print_('Deleted data:', cache) return self.exit_code
def __init__(self, repository, key, manifest, name, cache=None, create=False, checkpoint_interval=300, numeric_owner=False, progress=False): self.cwd = os.getcwd() self.key = key self.repository = repository self.cache = cache self.manifest = manifest self.hard_links = {} self.stats = Statistics() self.show_progress = progress self.last_progress = time.time() self.name = name self.checkpoint_interval = checkpoint_interval self.numeric_owner = numeric_owner self.pipeline = DownloadPipeline(self.repository, self.key) if create: self.items_buffer = CacheChunkBuffer(self.cache, self.key, self.stats) self.chunker = Chunker(WINDOW_SIZE, CHUNK_MASK, CHUNK_MIN, CHUNK_MAX, self.key.chunk_seed) if name in manifest.archives: raise self.AlreadyExists(name) self.last_checkpoint = time.time() i = 0 while True: self.checkpoint_name = '%s.checkpoint%s' % (name, i and ('.%d' % i) or '') if self.checkpoint_name not in manifest.archives: break i += 1 else: if name not in self.manifest.archives: raise self.DoesNotExist(name) info = self.manifest.archives[name] self.load(info[b'id'])
def __init__(self, repository, key, manifest, name, cache=None, create=False, checkpoint_interval=300, numeric_owner=False): self.cwd = os.getcwd() self.key = key self.repository = repository self.cache = cache self.manifest = manifest self.hard_links = {} self.stats = Statistics() self.name = name self.checkpoint_interval = checkpoint_interval self.numeric_owner = numeric_owner self.items_buffer = CacheChunkBuffer(self.cache, self.key, self.stats) self.pipeline = DownloadPipeline(self.repository, self.key) if create: if name in manifest.archives: raise self.AlreadyExists(name) self.last_checkpoint = time.time() i = 0 while True: self.checkpoint_name = '%s.checkpoint%s' % (name, i and ('.%d' % i) or '') if not self.checkpoint_name in manifest.archives: break i += 1 else: if name not in self.manifest.archives: raise self.DoesNotExist(name) info = self.manifest.archives[name] self.load(info[b'id'])
def calc_stats(self, cache): def add(id): count, size, csize = self.cache.chunks[id] stats.update(size, csize, count == 1) self.cache.chunks[id] = count - 1, size, csize def add_file_chunks(chunks): for id, _, _ in chunks: add(id) # This function is a bit evil since it abuses the cache to calculate # the stats. The cache transaction must be rolled back afterwards unpacker = msgpack.Unpacker(use_list=False) cache.begin_txn() stats = Statistics() add(self.id) for id, chunk in zip(self.metadata[b'items'], self.repository.get_many( self.metadata[b'items'])): add(id) unpacker.feed(self.key.decrypt(id, chunk)) for item in unpacker: if b'chunks' in item: stats.nfiles += 1 add_file_chunks(item[b'chunks']) cache.rollback() return stats
def do_prune(self, args): """Prune repository archives according to specified rules """ repository = self.open_repository(args.repository) manifest, key = Manifest.load(repository) cache = Cache(repository, key, manifest) archives = list(sorted(Archive.list_archives(repository, key, manifest, cache), key=attrgetter('ts'), reverse=True)) if args.hourly + args.daily + args.weekly + args.monthly + args.yearly == 0 and args.within is None: self.print_error('At least one of the "within", "hourly", "daily", "weekly", "monthly" or "yearly" ' 'settings must be specified') return 1 if args.prefix: archives = [archive for archive in archives if archive.name.startswith(args.prefix)] keep = [] if args.within: keep += prune_within(archives, args.within) if args.hourly: keep += prune_split(archives, '%Y-%m-%d %H', args.hourly, keep) if args.daily: keep += prune_split(archives, '%Y-%m-%d', args.daily, keep) if args.weekly: keep += prune_split(archives, '%G-%V', args.weekly, keep) if args.monthly: keep += prune_split(archives, '%Y-%m', args.monthly, keep) if args.yearly: keep += prune_split(archives, '%Y', args.yearly, keep) keep.sort(key=attrgetter('ts'), reverse=True) to_delete = [a for a in archives if a not in keep] stats = Statistics() for archive in keep: self.print_verbose('Keeping archive: %s' % format_archive(archive)) for archive in to_delete: if args.dry_run: self.print_verbose('Would prune: %s' % format_archive(archive)) else: self.print_verbose('Pruning archive: %s' % format_archive(archive)) archive.delete(stats) if to_delete and not args.dry_run: manifest.write() repository.commit() cache.commit() if args.stats: stats.print_('Deleted data:', cache) return self.exit_code
def do_delete(self, args): """Delete an existing repository or archive""" repository = self.open_repository(args.target, exclusive=True) manifest, key = Manifest.load(repository) cache = Cache(repository, key, manifest, do_files=args.cache_files) if args.target.archive: archive = Archive(repository, key, manifest, args.target.archive, cache=cache) stats = Statistics() archive.delete(stats) manifest.write() repository.commit() cache.commit() if args.stats: stats.print_('Deleted data:', cache) else: print("You requested to completely DELETE the repository *including* all archives it contains:") for archive in sorted(Archive.list_archives(repository, key, manifest), key=attrgetter('ts')): print(format_archive(archive)) print("""Type "YES" if you understand this and want to continue.\n""") if input('Do you want to continue? ') == 'YES': repository.destroy() cache.destroy() print("Repository and corresponding cache were deleted.") return self.exit_code
class Archive: class DoesNotExist(Error): """Archive {} does not exist""" class AlreadyExists(Error): """Archive {} already exists""" class IncompatibleFilesystemEncodingError(Error): """Failed to encode filename "{}" into file system encoding "{}". Consider configuring the LANG environment variable.""" def __init__(self, repository, key, manifest, name, cache=None, create=False, checkpoint_interval=300, numeric_owner=False, progress=False): self.cwd = os.getcwd() self.key = key self.repository = repository self.cache = cache self.manifest = manifest self.hard_links = {} self.stats = Statistics() self.show_progress = progress self.last_progress = time.time() self.name = name self.checkpoint_interval = checkpoint_interval self.numeric_owner = numeric_owner self.pipeline = DownloadPipeline(self.repository, self.key) if create: self.items_buffer = CacheChunkBuffer(self.cache, self.key, self.stats) self.chunker = Chunker(WINDOW_SIZE, CHUNK_MASK, CHUNK_MIN, CHUNK_MAX, self.key.chunk_seed) if name in manifest.archives: raise self.AlreadyExists(name) self.last_checkpoint = time.time() i = 0 while True: self.checkpoint_name = '%s.checkpoint%s' % (name, i and ('.%d' % i) or '') if self.checkpoint_name not in manifest.archives: break i += 1 else: if name not in self.manifest.archives: raise self.DoesNotExist(name) info = self.manifest.archives[name] self.load(info[b'id']) def _load_meta(self, id): data = self.key.decrypt(id, self.repository.get(id)) metadata = msgpack.unpackb(data) if metadata[b'version'] != 1: raise Exception('Unknown archive metadata version') return metadata def load(self, id): self.id = id self.metadata = self._load_meta(self.id) decode_dict(self.metadata, (b'name', b'hostname', b'username', b'time')) self.metadata[b'cmdline'] = [arg.decode('utf-8', 'surrogateescape') for arg in self.metadata[b'cmdline']] self.name = self.metadata[b'name'] @property def ts(self): """Timestamp of archive creation in UTC""" return parse_timestamp(self.metadata[b'time']) def __repr__(self): return 'Archive(%r)' % self.name def iter_items(self, filter=None, preload=False): for item in self.pipeline.unpack_many(self.metadata[b'items'], filter=filter, preload=preload): yield item def add_item(self, item): if self.show_progress and time.time() - self.last_progress > 0.2: self.stats.show_progress(item=item) self.last_progress = time.time() self.items_buffer.add(item) if time.time() - self.last_checkpoint > self.checkpoint_interval: self.write_checkpoint() self.last_checkpoint = time.time() def write_checkpoint(self): self.save(self.checkpoint_name) del self.manifest.archives[self.checkpoint_name] self.cache.chunk_decref(self.id, self.stats) def save(self, name=None, timestamp=None): name = name or self.name if name in self.manifest.archives: raise self.AlreadyExists(name) self.items_buffer.flush(flush=True) if timestamp is None: timestamp = datetime.utcnow() metadata = StableDict({ 'version': 1, 'name': name, 'items': self.items_buffer.chunks, 'cmdline': sys.argv, 'hostname': socket.gethostname(), 'username': getuser(), 'time': timestamp.isoformat(), }) data = msgpack.packb(metadata, unicode_errors='surrogateescape') self.id = self.key.id_hash(data) self.cache.add_chunk(self.id, data, self.stats) self.manifest.archives[name] = {'id': self.id, 'time': metadata['time']} self.manifest.write() self.repository.commit() self.cache.commit() def calc_stats(self, cache): def add(id): count, size, csize = cache.chunks[id] stats.update(size, csize, count == 1) cache.chunks[id] = count - 1, size, csize def add_file_chunks(chunks): for id, _, _ in chunks: add(id) # This function is a bit evil since it abuses the cache to calculate # the stats. The cache transaction must be rolled back afterwards unpacker = msgpack.Unpacker(use_list=False) cache.begin_txn() stats = Statistics() add(self.id) for id, chunk in zip(self.metadata[b'items'], self.repository.get_many(self.metadata[b'items'])): add(id) unpacker.feed(self.key.decrypt(id, chunk)) for item in unpacker: if b'chunks' in item: stats.nfiles += 1 add_file_chunks(item[b'chunks']) cache.rollback() return stats def extract_item(self, item, restore_attrs=True, dry_run=False, stdout=False, sparse=False): if dry_run or stdout: if b'chunks' in item: for data in self.pipeline.fetch_many([c[0] for c in item[b'chunks']], is_preloaded=True): if stdout: sys.stdout.buffer.write(data) if stdout: sys.stdout.buffer.flush() return dest = self.cwd if item[b'path'].startswith('/') or item[b'path'].startswith('..'): raise Exception('Path should be relative and local') path = os.path.join(dest, item[b'path']) # Attempt to remove existing files, ignore errors on failure try: st = os.lstat(path) if stat.S_ISDIR(st.st_mode): os.rmdir(path) else: os.unlink(path) except UnicodeEncodeError: raise self.IncompatibleFilesystemEncodingError(path, sys.getfilesystemencoding()) except OSError: pass mode = item[b'mode'] if stat.S_ISDIR(mode): if not os.path.exists(path): os.makedirs(path) if restore_attrs: self.restore_attrs(path, item) elif stat.S_ISREG(mode): if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) # Hard link? if b'source' in item: source = os.path.join(dest, item[b'source']) if os.path.exists(path): os.unlink(path) os.link(source, path) else: with open(path, 'wb') as fd: ids = [c[0] for c in item[b'chunks']] for data in self.pipeline.fetch_many(ids, is_preloaded=True): if sparse and ZEROS.startswith(data): # all-zero chunk: create a hole in a sparse file fd.seek(len(data), 1) else: fd.write(data) pos = fd.tell() fd.truncate(pos) fd.flush() self.restore_attrs(path, item, fd=fd.fileno()) elif stat.S_ISFIFO(mode): if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) os.mkfifo(path) self.restore_attrs(path, item) elif stat.S_ISLNK(mode): if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) source = item[b'source'] if os.path.exists(path): os.unlink(path) os.symlink(source, path) self.restore_attrs(path, item, symlink=True) elif stat.S_ISCHR(mode) or stat.S_ISBLK(mode): os.mknod(path, item[b'mode'], item[b'rdev']) self.restore_attrs(path, item) else: raise Exception('Unknown archive item type %r' % item[b'mode']) def restore_attrs(self, path, item, symlink=False, fd=None): xattrs = item.get(b'xattrs') if xattrs: for k, v in xattrs.items(): try: xattr.setxattr(fd or path, k, v, follow_symlinks=False) except OSError as e: if e.errno != errno.ENOTSUP: raise uid = gid = None if not self.numeric_owner: uid = user2uid(item[b'user']) gid = group2gid(item[b'group']) uid = item[b'uid'] if uid is None else uid gid = item[b'gid'] if gid is None else gid # This code is a bit of a mess due to os specific differences try: if fd: os.fchown(fd, uid, gid) else: os.lchown(path, uid, gid) except OSError: pass if fd: os.fchmod(fd, item[b'mode']) elif not symlink: os.chmod(path, item[b'mode']) elif has_lchmod: # Not available on Linux os.lchmod(path, item[b'mode']) mtime = bigint_to_int(item[b'mtime']) if fd and utime_supports_fd: # Python >= 3.3 os.utime(fd, None, ns=(mtime, mtime)) elif utime_supports_follow_symlinks: # Python >= 3.3 os.utime(path, None, ns=(mtime, mtime), follow_symlinks=False) elif not symlink: os.utime(path, (mtime / 1e9, mtime / 1e9)) acl_set(path, item, self.numeric_owner) # Only available on OS X and FreeBSD if has_lchflags and b'bsdflags' in item: try: os.lchflags(path, item[b'bsdflags']) except OSError: pass def rename(self, name): if name in self.manifest.archives: raise self.AlreadyExists(name) metadata = StableDict(self._load_meta(self.id)) metadata[b'name'] = name data = msgpack.packb(metadata, unicode_errors='surrogateescape') new_id = self.key.id_hash(data) self.cache.add_chunk(new_id, data, self.stats) self.manifest.archives[name] = {'id': new_id, 'time': metadata[b'time']} self.cache.chunk_decref(self.id, self.stats) del self.manifest.archives[self.name] def delete(self, stats): unpacker = msgpack.Unpacker(use_list=False) for items_id, data in zip(self.metadata[b'items'], self.repository.get_many(self.metadata[b'items'])): unpacker.feed(self.key.decrypt(items_id, data)) self.cache.chunk_decref(items_id, stats) for item in unpacker: if b'chunks' in item: for chunk_id, size, csize in item[b'chunks']: self.cache.chunk_decref(chunk_id, stats) self.cache.chunk_decref(self.id, stats) del self.manifest.archives[self.name] def stat_attrs(self, st, path): item = { b'mode': st.st_mode, b'uid': st.st_uid, b'user': uid2user(st.st_uid), b'gid': st.st_gid, b'group': gid2group(st.st_gid), b'mtime': int_to_bigint(st_mtime_ns(st)) } if self.numeric_owner: item[b'user'] = item[b'group'] = None xattrs = xattr.get_all(path, follow_symlinks=False) if xattrs: item[b'xattrs'] = StableDict(xattrs) if has_lchflags and st.st_flags: item[b'bsdflags'] = st.st_flags acl_get(path, item, st, self.numeric_owner) return item def process_dir(self, path, st): item = {b'path': make_path_safe(path)} item.update(self.stat_attrs(st, path)) self.add_item(item) return 'd' # directory def process_fifo(self, path, st): item = {b'path': make_path_safe(path)} item.update(self.stat_attrs(st, path)) self.add_item(item) return 'f' # fifo def process_dev(self, path, st): item = {b'path': make_path_safe(path), b'rdev': st.st_rdev} item.update(self.stat_attrs(st, path)) self.add_item(item) if stat.S_ISCHR(st.st_mode): return 'c' # char device elif stat.S_ISBLK(st.st_mode): return 'b' # block device def process_symlink(self, path, st): source = os.readlink(path) item = {b'path': make_path_safe(path), b'source': source} item.update(self.stat_attrs(st, path)) self.add_item(item) return 's' # symlink def process_stdin(self, path, cache): uid, gid = 0, 0 fd = sys.stdin.buffer # binary chunks = [] for chunk in self.chunker.chunkify(fd): chunks.append(cache.add_chunk(self.key.id_hash(chunk), chunk, self.stats)) self.stats.nfiles += 1 item = { b'path': path, b'chunks': chunks, b'mode': 0o100660, # regular file, ug=rw b'uid': uid, b'user': uid2user(uid), b'gid': gid, b'group': gid2group(gid), b'mtime': int_to_bigint(int(time.time()) * 1000000000) } self.add_item(item) def process_file(self, path, st, cache): status = None safe_path = make_path_safe(path) # Is it a hard link? if st.st_nlink > 1: source = self.hard_links.get((st.st_ino, st.st_dev)) if (st.st_ino, st.st_dev) in self.hard_links: item = self.stat_attrs(st, path) item.update({b'path': safe_path, b'source': source}) self.add_item(item) status = 'h' # regular file, hardlink (to already seen inodes) return status else: self.hard_links[st.st_ino, st.st_dev] = safe_path path_hash = self.key.id_hash(os.path.join(self.cwd, path).encode('utf-8', 'surrogateescape')) ids = cache.file_known_and_unchanged(path_hash, st) chunks = None if ids is not None: # Make sure all ids are available for id_ in ids: if not cache.seen_chunk(id_): break else: chunks = [cache.chunk_incref(id_, self.stats) for id_ in ids] status = 'U' # regular file, unchanged else: status = 'A' # regular file, added # Only chunkify the file if needed if chunks is None: fh = Archive._open_rb(path, st) with os.fdopen(fh, 'rb') as fd: chunks = [] for chunk in self.chunker.chunkify(fd, fh): chunks.append(cache.add_chunk(self.key.id_hash(chunk), chunk, self.stats)) cache.memorize_file(path_hash, st, [c[0] for c in chunks]) status = status or 'M' # regular file, modified (if not 'A' already) item = {b'path': safe_path, b'chunks': chunks} item.update(self.stat_attrs(st, path)) self.stats.nfiles += 1 self.add_item(item) return status @staticmethod def list_archives(repository, key, manifest, cache=None): for name, info in manifest.archives.items(): yield Archive(repository, key, manifest, name, cache=cache) @staticmethod def _open_rb(path, st): flags_normal = os.O_RDONLY | getattr(os, 'O_BINARY', 0) flags_noatime = flags_normal | getattr(os, 'NO_ATIME', 0) euid = None def open_simple(p, s): return os.open(p, flags_normal) def open_noatime(p, s): return os.open(p, flags_noatime) def open_noatime_if_owner(p, s): if euid == 0 or s.st_uid == euid: # we are root or owner of file return open_noatime(p, s) else: return open_simple(p, s) def open_noatime_with_fallback(p, s): try: fd = os.open(p, flags_noatime) except PermissionError: # Was this EPERM due to the O_NOATIME flag? fd = os.open(p, flags_normal) # Yes, it was -- otherwise the above line would have thrown # another exception. nonlocal euid euid = os.geteuid() # So in future, let's check whether the file is owned by us # before attempting to use O_NOATIME. Archive._open_rb = open_noatime_if_owner return fd if flags_noatime != flags_normal: # Always use O_NOATIME version. Archive._open_rb = open_noatime_with_fallback else: # Always use non-O_NOATIME version. Archive._open_rb = open_simple return Archive._open_rb(path, st)