def list_subvolume(options, _fuse): """ @param options: Commandline options @type options: object @param _fuse: FUSE wrapper @type _fuse: dedupsqlfs.fuse.dedupfs.DedupFS """ _fuse.setOption("gc_umount_enabled", False) _fuse.setOption("gc_vacuum_enabled", False) _fuse.setOption("gc_enabled", False) _fuse.setReadonly(True) from dedupsqlfs.fuse.subvolume import Subvolume sv = Subvolume(_fuse.operations) sv.list(_fuse.getOption("subvol_list_with_stats")) _fuse.operations.destroy() return
def count_snapshot_created_today(options, _fuse): """ @param options: Commandline options @type options: object @param _fuse: FUSE wrapper @type _fuse: dedupsqlfs.fuse.dedupfs.DedupFS """ _fuse.setOption("gc_umount_enabled", False) _fuse.setOption("gc_vacuum_enabled", False) _fuse.setOption("gc_enabled", False) _fuse.setReadonly(True) from dedupsqlfs.fuse.subvolume import Subvolume sv = Subvolume(_fuse.operations) sv.count_today_created_subvols(True) _fuse.operations.destroy() return
def compress_subvol_tables(options, _fuse): """ @param options: Commandline options @type options: object @param _fuse: FUSE wrapper @type _fuse: dedupsqlfs.fuse.dedupfs.DedupFS """ _fuse.setOption("gc_umount_enabled", False) _fuse.setOption("gc_vacuum_enabled", False) _fuse.setOption("gc_enabled", False) _fuse.setReadonly(True) from dedupsqlfs.fuse.subvolume import Subvolume sv = Subvolume(_fuse.operations) sv.compress_all_non_root_tables() _fuse.operations.destroy() return
def calc_subvol_diff(options, _fuse): """ @param options: Commandline options @type options: object @param _fuse: FUSE wrapper @type _fuse: dedupsqlfs.fuse.dedupfs.DedupFS """ _fuse.setOption("gc_umount_enabled", False) _fuse.setOption("gc_vacuum_enabled", False) _fuse.setOption("gc_enabled", False) _fuse.setReadonly(True) from dedupsqlfs.fuse.subvolume import Subvolume sv = Subvolume(_fuse.operations) sv.get_root_diff(options.subvol_diff.encode('utf8')) _fuse.operations.destroy() return
def remove_subvolume(options, _fuse): """ @param options: Commandline options @type options: object @param _fuse: FUSE wrapper @type _fuse: dedupsqlfs.fuse.dedupfs.DedupFS """ _fuse.setOption("gc_umount_enabled", False) _fuse.setOption("gc_vacuum_enabled", False) _fuse.setOption("gc_enabled", False) _fuse.setOption("use_transactions", True) _fuse.setReadonly(False) from dedupsqlfs.fuse.subvolume import Subvolume sv = Subvolume(_fuse.operations) sv.remove(options.subvol_remove.encode('utf8')) _fuse.operations.destroy() return
def report_disk_usage(self): # {{{3 from dedupsqlfs.fuse.subvolume import Subvolume subv = Subvolume(self.operations) manager = self.operations.getManager() self.getLogger().info("--" * 39) tableSubvol = manager.getTable("subvolume", True) disk_usage = 0 disk_usage += manager.getTable("hash", True).getFileSize() disk_usage += manager.getTable("hash_compression_type", True).getFileSize() disk_usage += manager.getTable("hash_sizes", True).getFileSize() disk_usage += manager.getTable("compression_type", True).getFileSize() disk_usage += manager.getTable("name", True).getFileSize() disk_usage += manager.getTable("name_pattern_option", True).getFileSize() disk_usage += manager.getTable("option", True).getFileSize() disk_usage += manager.getTable("subvolume", True).getFileSize() disk_usage += manager.getTable("block", True).getFileSize() apparentSize = 0 dataSize = 0 uniqueSize = 0 compressedSize = 0 compressedUniqueSize = 0 compMethods = {} for subvol_id in tableSubvol.get_ids(): subvol = tableSubvol.get(subvol_id) apparentSize += subv.get_apparent_size_fast(subvol["name"]) disk_usage += manager.getTable("inode_" + subvol["hash"], True).getFileSize() disk_usage += manager.getTable("inode_option_" + subvol["hash"], True).getFileSize() disk_usage += manager.getTable( "inode_hash_block_" + subvol["hash"], True).getFileSize() disk_usage += manager.getTable("link_" + subvol["hash"], True).getFileSize() disk_usage += manager.getTable("xattr_" + subvol["hash"], True).getFileSize() disk_usage += manager.getTable("tree_" + subvol["hash"], True).getFileSize() tableHCT = manager.getTable('hash_compression_type', True) tableHS = manager.getTable('hash_sizes', True) hashCount = subv.prepareIndexHashIdCount() hashIds = tuple(hashCount.keys()) current = 0 pageSize = 20000 while True: items = hashIds[current:current + pageSize] if not len(items): break current += pageSize hash_ids = ",".join(set(str(item) for item in items)) hashTypes = tableHCT.get_types_by_hash_ids(hash_ids) hashSizes = tableHS.get_sizes_by_hash_ids(hash_ids) for hash_id in items: hash_cnt = hashCount[hash_id] method = self.operations.getCompressionTypeName( hashTypes[hash_id]) compMethods[method] = compMethods.get(method, 0) + 1 hszItem = hashSizes[hash_id] uniqueSize += hszItem[0] compressedUniqueSize += hszItem[1] dataSize += hszItem[0] * hash_cnt compressedSize += hszItem[1] * hash_cnt sparseSize = apparentSize - dataSize dedupSize = dataSize - uniqueSize count_all = 0 comp_types = {} for method, cnt in compMethods.items(): count_all += cnt comp_types[cnt] = method if apparentSize: self.getLogger().info("Apparent size is %s, unique %s.", format_size(apparentSize), format_size(uniqueSize)) self.getLogger().info("Deduped size is %s, ratio is %.2f%%.", format_size(dedupSize), 100.0 * dedupSize / apparentSize) self.getLogger().info("Sparse size is %s, ratio is %.2f%%.", format_size(sparseSize), 100.0 * sparseSize / apparentSize) self.getLogger().info("Databases take up %s, ratio is %.2f%%.", format_size(disk_usage), 100.0 * disk_usage / uniqueSize) self.getLogger().info( "Compressed data take up %s:\n- unique %s,\n- saved apparent space is %.2f%%,\n- use of database space: %.2f%%).", format_size(compressedSize), format_size(compressedUniqueSize), 100.0 * (apparentSize - compressedSize) / apparentSize, 100.0 * compressedUniqueSize / disk_usage, ) self.getLogger().info( "Meta data and indexes take up %s:\n- ratio over apparent is %.2f%%,\n- use of database space: %.2f%%).", format_size(disk_usage - compressedUniqueSize), 100.0 * (disk_usage - compressedUniqueSize) / uniqueSize, 100.0 * (disk_usage - compressedUniqueSize) / disk_usage, ) else: self.getLogger().info("Apparent size is %s.", format_size(apparentSize)) self.getLogger().info("Compressed size is %s.", format_size(compressedSize)) self.getLogger().info("Databases take up %s.", format_size(disk_usage)) if compressedSize: self.getLogger().info("--" * 39) self.getLogger().info("Compression by types:") keys = list(comp_types.keys()) keys.sort(reverse=True) for key in keys: compression = comp_types[key] self.getLogger().info(" %8s used by %.2f%% blocks", compression, 100.0 * int(key) / count_all) return