def main(): parser = arg_parser() args = parser.parse_args() unit = None binary = True if args.raw: unit = '' if args.human_readable: unit = None if args.human_si: unit = None binary = False if args.iec: binary = True if args.si: binary = False if args.kbytes: unit = 'K' if args.mbytes: unit = 'M' if args.gbytes: unit = 'G' if args.tbytes: unit = 'T' if len(args.junk) > 0: parser.print_help(sys.stderr) return with btrfs.FileSystem(args.path[0]) as fs: for space in fs.space_info(): print("{}, {}: total={}, used={}".format( btrfs.utils.space_type_description(space.flags), btrfs.utils.space_profile_description(space.flags), btrfs.utils.pretty_size(space.total_bytes, unit, binary), btrfs.utils.pretty_size(space.used_bytes, unit, binary)))
def __init__(self, host, path, mode, dryrun): """ Initialize. host is ignored. path is the file system location of the read-only subvolumes. """ # Don't lose a trailing slash -- it's significant path = os.path.abspath(path) + ("/" if path.endswith("/") else "") super(ButterStore, self).__init__(host, path, mode, dryrun) if not os.path.isdir(self.userPath): raise Exception("'%s' is not an existing directory" % (self.userPath)) self.isDiffStore = True self.butter = Butter.Butter( dryrun) # subprocess command-line interface self.btrfs = btrfs.FileSystem(self.userPath) # ioctl interface # Dict of {uuid: <btrfs.Volume>} self.butterVolumes = {} # Volumes to be deleted using the '--delete' option. # Initialized to hold all volumes inside destination directory, # Then volumes in source are "kept", and removed from extraVolumes. self.extraVolumes = {}
def main(): parser = arg_parser() args = parser.parse_args() unit = None binary = True if args.raw is True: unit = '' if args.human_readable is True: unit = None if args.human_si is True: unit = None binary = False if args.iec is True: binary = True if args.si is True: binary = False if args.kbytes is True: unit = 'K' if args.mbytes is True: unit = 'M' if args.gbytes is True: unit = 'G' if args.tbytes is True: unit = 'T' if len(args.junk) > 0: parser.print_help(sys.stderr) return fs = btrfs.FileSystem(args.path[0]) for space in fs.space_info(): print("{0}, {1}: total={2}, used={3}".format( btrfs.utils.block_group_type_str(space.flags), btrfs.utils.block_group_profile_str(space.flags), btrfs.utils.pretty_size(space.total_bytes, unit, binary), btrfs.utils.pretty_size(space.used_bytes, unit, binary)))
def main(): args = parse_args() path = args.mountpoint verbose = 0 if args.verbose is not None: verbose += args.verbose if args.quiet is not None: verbose -= args.quiet try: fs = btrfs.FileSystem(path) filename_parts = ['fsid', fs.fsid] if args.curve != 'hilbert': filename_parts.append(args.curve) bg_vaddr = args.blockgroup if bg_vaddr is None: if args.sort == 'physical': grid = walk_dev_extents(fs, order=args.order, size=args.size, verbose=verbose, curve=args.curve) elif args.sort == 'virtual': filename_parts.append('chunks') grid = walk_chunks(fs, order=args.order, size=args.size, verbose=verbose, curve=args.curve) else: raise HeatmapError("Invalid sort option {}".format(args.sort)) else: try: block_group = fs.block_group(bg_vaddr) except IndexError: raise HeatmapError( "No block group at vaddr {}!".format(bg_vaddr)) grid = walk_extents(fs, [block_group], order=args.order, size=args.size, verbose=verbose, curve=args.curve) filename_parts.extend(['blockgroup', block_group.vaddr]) except OSError as e: if e.errno == errno.EPERM: raise HeatmapError( "Insufficient permissions to use the btrfs kernel API. " "Hint: Try running the script as root user.".format(e)) elif e.errno == errno.ENOTTY: raise HeatmapError( "Unable to retrieve data. Hint: Not a btrfs file system?") raise try: filename = generate_png_file_name(args.output, filename_parts) grid.write_png(filename) except Exception as e: raise HeatmapError("Unable to write output file {}: {}".format( filename, e))
def main(): number_of_blocks_to_balance = int(sys.argv[1]) fs = btrfs.FileSystem(sys.argv[2]) for i in range(0, number_of_blocks_to_balance): min_size_block = analyze_block_groups(fs) if min_size_block != None: balance_block_group(fs, min_size_block) else: break
def main(): fs= btrfs.FileSystem(sys.argv[1]) while balance_needed(fs): min_size_block = analyze_block_groups(fs) if min_size_block != None: balance_block_group(fs,min_size_block) else: raise NameError("Balance wants to run, but no data will be moved!") else: print("No balance is needed")
def main(): max_used_pct = int(sys.argv[1]) fs = btrfs.FileSystem(sys.argv[2]) min_used_pct, block_groups = load_block_groups(fs, max_used_pct) if len(block_groups) == 0: print("Nothing to do, least used block group has used_pct {}".format( min_used_pct)) return while len(block_groups) > 0: balance_one_block_group(fs, block_groups, max_used_pct)
def generate_extents(path, tree): #print("Parsing subvolume:",tree,path) #pool = multiprocessing.Pool(processes=1) fs = btrfs.FileSystem(path) min_key = btrfs.ctree.Key(0, btrfs.ctree.EXTENT_DATA_KEY, 0) for header, _ in btrfs.ioctl.search_v2(fs.fd, tree, min_key): if header.type == btrfs.ctree.EXTENT_DATA_KEY: yield header, path, tree os.close(fs.fd) del fs return
def disk_parse(data_tree, path, tree): #print("Parsing subvolume:",tree) #pool = multiprocessing.Pool(processes=1) fs = btrfs.FileSystem(path) min_key = btrfs.ctree.Key(0, btrfs.ctree.EXTENT_DATA_KEY, 0) for header, data in btrfs.ioctl.search_v2(fs.fd, tree, min_key): if header.type == btrfs.ctree.EXTENT_DATA_KEY: datum = btrfs.ctree.FileExtentItem(header, data) inode = datum.key.objectid if datum.type != btrfs.ctree.FILE_EXTENT_INLINE: # and datum.disk_bytenr !=0: #file=btrfs.ioctl.ino_lookup(fs.fd,tree,inode) key = unique_number(datum.disk_bytenr, datum.disk_num_bytes) stop = datum.offset + datum.num_bytes data_tree.add(key, datum.offset, stop, inode)
def actual_extent_parsing(item): header, path, tree = item result = None fs = btrfs.FileSystem(path) key = btrfs.ctree.Key(header.objectid, btrfs.ctree.EXTENT_DATA_KEY, header.offset) for header, data in btrfs.ioctl.search_v2(fs.fd, tree, key, nr_items=1): datum = btrfs.ctree.FileExtentItem(header, data) if datum.type != btrfs.ctree.FILE_EXTENT_INLINE: # and datum.disk_bytenr !=0: key = unique_number(datum.disk_bytenr, datum.disk_num_bytes) stop = datum.offset + datum.num_bytes result = (key, datum.offset, stop, datum.key.objectid) os.close(fs.fd) del fs return result
def main(): parser = argparse.ArgumentParser() parser.add_argument( "-r", "--root", type=int, default=5, help="current active subvolume to analyze first, default is 5") parser.add_argument("path", type=str, help="path of the btrfs filesystem") parser.add_argument( "-o", '--output', nargs='?', type=argparse.FileType('w'), help="File to write results for all files that are impacted be dedup") args = parser.parse_args() data_tree = TreeWrapper() #print("Out:",args.output) disk_parse(data_tree, args.path, args.root) data_tree.transform() total_unique_data, uniquedatainode = data_tree.find_unique() total_data = len(data_tree) print("Disk space gained by dedup/reflink:", btrfs.utils.pretty_size(total_data - total_unique_data)) print("Disk space used only by one file:", btrfs.utils.pretty_size(total_unique_data)) print("Total disk space used by files:", btrfs.utils.pretty_size(total_data)) print("Percentage gained by dedup {:.2%}".format( (total_data - total_unique_data) / total_data)) if args.output != None: fs = btrfs.FileSystem(args.path) for inode, unique_size in uniquedatainode.items(): file = None try: file = btrfs.ioctl.ino_lookup(fs.fd, args.root, inode) except: continue filename = args.path + "/" + file.name_bytes.decode('utf-8') filename = filename[:-1] #print(filename) full_size = os.path.getsize(filename) percentage = unique_size / full_size if percentage < 1.0: line = "{:>9} {:>9} {:>7.2%} {}\n".format( btrfs.utils.pretty_size(unique_size), btrfs.utils.pretty_size(full_size), percentage, filename) args.output.write(line)
def disk_parse_pipe(pipe, path, tree): print("Parsing subvolume:", tree) fs = btrfs.FileSystem(path) min_key = btrfs.ctree.Key(0, btrfs.ctree.EXTENT_DATA_KEY, 0) for header, data in btrfs.ioctl.search_v2(fs.fd, tree, min_key): if header.type == btrfs.ctree.EXTENT_DATA_KEY: datum = btrfs.ctree.FileExtentItem(header, data) if datum.type != btrfs.ctree.FILE_EXTENT_INLINE: # and datum.disk_bytenr !=0: key = unique_number(datum.disk_bytenr, datum.disk_num_bytes) #key = pool.apply(unique_number, (datum.disk_bytenr,datum.disk_num_bytes,)) stop = datum.offset + datum.num_bytes #key=res.get() pipe.send((key, datum.offset, stop, datum.key.objectid)) pipe.send(None) pipe.close() os.close(fs.fd) del fs
def main(): if len(sys.argv) != 4: print(f"""Usage: {sys.argv[0]} <btrfs_dev_id> <physical_address> <btrfs_mount_point> The devid can be gotten from `btrfs fi show` command""") exit(1) devid = int(sys.argv[1]) paddr = int(sys.argv[2]) path = sys.argv[3] with btrfs.FileSystem(path) as fs: dev_extent = get_dev_extent(fs, devid, paddr, path) print(f"INF: devid {devid} paddr {paddr} is part of {dev_extent}") print("NOTE: this won't work for RAID56!") way_into = paddr - dev_extent.paddr print(f"paddr is {way_into} bytes into that dev extent") print(f"logical/virtual address is {dev_extent.chunk_offset + way_into}")
def main(): """ Main program. """ args = command.parse_args() with btrfs.FileSystem(args.dir) as mount: # mount.rescanSizes() fInfo = mount.FS_INFO() pprint.pprint(fInfo) vols = mount.subvolumes # for dev in mount.devices: # pprint.pprint(dev) for vol in vols: print(vol) return 0
def __init__(self, host, path, mode, dryrun): """ Initialize. host is ignored. path is the file system location of the read-only subvolumes. """ # Don't lose a trailing slash -- it's significant path = os.path.abspath(path) + ("/" if path.endswith("/") else "") super(ButterStore, self).__init__(host, path, mode, dryrun) if not os.path.isdir(self.userPath): raise Exception("'%s' is not an existing directory" % (self.userPath)) self.butter = Butter.Butter( dryrun) # subprocess command-line interface self.btrfs = btrfs.FileSystem(self.userPath) # ioctl interface self.butterVolumes = {} # Dict of {uuid: <btrfs.Volume>} self.extraVolumes = {}
def main(): multiprocessing.set_start_method('spawn') parser = argparse.ArgumentParser() parser.add_argument( "-u", "--unique", action='store_true', help= "calculate only unique data, -r argument makes no sense if -u is active" ) parser.add_argument("-f", "--files", action='store_true', help="find filenames that exist in unique extents") parser.add_argument("path", type=str, help="path of the btrfs filesystem") parser.add_argument( "-r", "--root", type=int, default=5, help="current active subvolume to analyze first, default is 5") group = parser.add_mutually_exclusive_group() group.add_argument('-i', '--ignore', action='store_true', help="Do not analyze the specified subvolumes") group.add_argument('-o', '--only', action='store_true', help="Analyze only the specified subvolumes") parser.add_argument('subvolume', nargs='*', type=int, help='Subvolumes to ingore or analyze') args = parser.parse_args() #find subvolumes to parse, make sure -r subvolume stays first parse_trees = [5] if args.root != 5: parse_trees = [args.root, 5] fs = btrfs.FileSystem(args.path) for subvol in fs.subvolumes(): if subvol.key.objectid != args.root: parse_trees.append(subvol.key.objectid) #these are the subvolumes specified by the user, these will be either ignored #or all the other subvolumes will be ingored special_subvolumes = set(args.subvolume) #if no argument specified then assume that the user wanted to ingore the speficied subvolumes if args.ignore == False and args.only == False: args.ignore = True #remove the unneeded subvolumes if args.ignore: for item in special_subvolumes: try: parse_trees.remove(item) except: pass else: for tree in parse_trees[:]: if tree not in special_subvolumes: parse_trees.remove(tree) data_tree = TreeWrapper() #move the root subvolume in the end #older subvolumes must be first changed_snapshots = deque(parse_trees) changed_snapshots.rotate(-1) parse_trees = list(changed_snapshots) data_tree.add_snapshots(parse_trees) #parse the trees from newer to older parse_trees = list(reversed(parse_trees)) pool = multiprocessing.Pool(processes=4) print("Subvolumes to parse:", parse_trees) for tree in parse_trees: #disk_parse(data_tree,fs,tree,args.files) disk_parse_parallel(pool, data_tree, args.path, tree, args.files) #pipe_add(data_tree,args.path,tree,args.files) pool.close() pool.join() data_tree.transform() unique_sum = 0 unique_data, files = data_tree.find_unique(fs, args.files) #if unique analysis is only needed, do not calculate differences if args.unique: current_data = Counter() previous_data = Counter() else: current_data = data_tree.find_snapshot_size_to_current() previous_data = data_tree.find_snapshot_size_to_previous() print(" Unique File Extents Extents added ontop Extents added ontop of") print(" per subvolume of previous subvolume current(act) subvolume") print("---------------------|---------------------|----------------------") print("SubvolumId Size Size Size") for snapshot in parse_trees: print("{:>10} {:>10} {:>10} {:>10}".format( snapshot, btrfs.utils.pretty_size(unique_data[snapshot]), btrfs.utils.pretty_size(previous_data[snapshot]), btrfs.utils.pretty_size(current_data[snapshot]))) #print(files[snapshot]) unique_sum += unique_data[snapshot] total_data = len(data_tree) print("Unique Data size of subvolumes:", btrfs.utils.pretty_size(unique_sum), "Total size:", btrfs.utils.pretty_size(total_data), "Volatility:", "{:.2%}".format(unique_sum / total_data)) if args.files: print() print("Possible Unique Files:") for file, myset in files.items(): print(file, ":", myset)
def __init__(self, device, mount): self.device = device self.mount = mount self.btrfs = btrfs.FileSystem(mount)
print("{} is not the start of a subvolume (inum {} != 256)".format(subvol_path, inum)) sys.exit(1) subvol_fd = os.open(subvol_path, os.O_RDONLY) tree, _ = btrfs.ioctl.ino_lookup(subvol_fd) def print_subvol_info(root): print(" subvol_id: {}".format(root.key.objectid)) print(" received_uuid: {}".format(root.received_uuid)) print(" stime: {}".format(root.stime)) print(" stransid: {}".format(root.stransid)) print(" rtime: {}".format(root.rtime)) print(" rtransid: {}".format(root.rtransid)) print() with btrfs.FileSystem(subvol_path) as fs: print("Current subvolume information:") root = list(fs.subvolumes(min_id=tree, max_id=tree))[0] print_subvol_info(root) print("Setting received subvolume...") print() rtransid, rtime = btrfs.ioctl.set_received_subvol(subvol_fd, received_uuid, stransid, stime) os.close(subvol_fd) print("Resulting subvolume information:") root = list(fs.subvolumes(min_id=tree, max_id=tree))[0] print_subvol_info(root)
#!/usr/bin/python from __future__ import print_function import btrfs import sys for space in btrfs.FileSystem(sys.argv[1]).space_info(): print(space)
tree = int(sys.argv[1]) except ValueError: tree = { 'root': btrfs.ctree.ROOT_TREE_OBJECTID, 'extent': btrfs.ctree.EXTENT_TREE_OBJECTID, 'chunk': btrfs.ctree.CHUNK_TREE_OBJECTID, 'dev': btrfs.ctree.DEV_TREE_OBJECTID, 'fs': btrfs.ctree.FS_TREE_OBJECTID, 'csum': btrfs.ctree.CSUM_TREE_OBJECTID, 'quota': btrfs.ctree.QUOTA_TREE_OBJECTID, 'uuid': btrfs.ctree.UUID_TREE_OBJECTID, 'free_space': btrfs.ctree.FREE_SPACE_TREE_OBJECTID, 'tree_log': btrfs.ctree.TREE_LOG_OBJECTID, 'tree_log_fixup': btrfs.ctree.TREE_LOG_FIXUP_OBJECTID, 'tree_reloc': btrfs.ctree.TREE_RELOC_OBJECTID, 'data_reloc': btrfs.ctree.DATA_RELOC_TREE_OBJECTID, }.get(sys.argv[1].lower(), None) if tree is None: print( "ERROR: specify tree number or short name (e.g. root, extent, fs)") sys.exit(1) with btrfs.FileSystem(sys.argv[2]) as fs: try: btrfs.utils.pretty_print( (btrfs.ctree.classify(header, data) for header, data in btrfs.ioctl.search_v2(fs.fd, tree))) except FileNotFoundError: print("ERROR: tree {} does not exist".format(tree)) sys.exit(1)
def main(): with btrfs.FileSystem(sys.argv[1]) as fs: results = inspect_from(fs)
import btrfs def subvolumes_inside(fs, parent_tree): min_key = btrfs.ctree.Key(parent_tree, btrfs.ctree.ROOT_REF_KEY, 0) max_key = btrfs.ctree.Key(parent_tree, btrfs.ctree.ROOT_REF_KEY + 1, 0) - 1 for header, data in btrfs.ioctl.search_v2(fs.fd, 1, min_key, max_key): ref = btrfs.ctree.RootRef(header, data) path = (btrfs.ioctl.ino_lookup( fs.fd, ref.parent_tree, ref.dirid).name_bytes + ref.name).decode() yield ref.tree, path def print_subvolumes_inside(fs, parent_tree=btrfs.ctree.FS_TREE_OBJECTID, parent_path=""): for tree, path in subvolumes_inside(fs, parent_tree): sub_path = "{}/{}".format(parent_path, path) print("ID {} parent {} path {}".format(tree, parent_tree, sub_path)) print_subvolumes_inside(fs, tree, sub_path) with btrfs.FileSystem('/home/fgervais/btrfs') as fs: # for subvol in fs.subvolumes(): # path = (btrfs.ioctl.ino_lookup(fs.fd, ref.parent_tree, ref.dirid).name_bytes + # ref.name).decode() # print(type(subvol)) # break print_subvolumes_inside(fs)
#!/usr/bin/python3 import btrfs import sys if len(sys.argv) < 2: print("Usage: {} <mountpoint>".format(sys.argv[0])) sys.exit(1) fs = btrfs.FileSystem(sys.argv[1]) fs_info = fs.fs_info() print(fs_info) for device in fs.devices(): print(fs.dev_info(device.devid)) print(fs.dev_stats(device.devid))
def main(): args = parse_args() path = args.mountpoint order = args.order bg_vaddr = args.blockgroup scope = 'filesystem' if bg_vaddr is None else 'blockgroup' pngfile = args.pngfile if pngfile is not None and os.path.isdir(pngfile): pngdir = pngfile pngfile = None else: pngdir = None fs = btrfs.FileSystem(path) fs_info = fs.fs_info() print(fs_info) if scope == 'filesystem': total_bytes, dev_offset = device_size_offsets(fs) if order is None: import math order = min( 10, int( math.ceil( math.log(math.sqrt(total_bytes / (32 * 1048576)), 2)))) if pngfile is None: import time pngfile = "fsid_{0}_at_{1}.png".format(fs.fsid, int(time.time())) elif scope == 'blockgroup': try: block_group = fs.block_group(bg_vaddr) except IndexError: print("Error: no block group at vaddr {0}!".format(bg_vaddr), file=sys.stderr) sys.exit(1) total_bytes = block_group.length if order is None: import math order = int( math.ceil( math.log( math.sqrt(block_group.length / fs_info.sectorsize), 2))) if pngfile is None: import time pngfile = "fsid_{0}_blockgroup_{1}_at_{2}.png".format( fs.fsid, block_group.vaddr, int(time.time())) else: raise Exception("Scope {0} not implemented!".format(scope)) if pngdir is not None: pngfile = os.path.join(pngdir, pngfile) size = args.size if size is None: size = 10 elif size < order: if args.order is None: order = size else: print("Error: size {0} needs to be at least as bit as order {1}". format(size, order), file=sys.stderr) sys.exit(1) verbose = args.verbose if args.verbose is not None else 0 curve_type = args.curve if curve_type == 'hilbert': import hilbert curve = hilbert.curve(order) elif curve_type == 'linear': import linear curve = linear.notsocurvy(order) else: raise Exception( "Space filling curve type {0} not implemented!".format(curve_type)) print("scope {0} curve {1} order {2} size {3} pngfile {4}".format( scope, curve_type, order, size, pngfile)) grid = Grid(curve, total_bytes, verbose) if scope == 'filesystem': walk_dev_extents(fs, total_bytes, dev_offset, grid, verbose) elif scope == 'blockgroup': print(block_group) walk_extents(fs, block_group, grid, verbose) if size > order: scale = 2**(size - order) png_grid = grid.grid(int(grid.height * scale), int(grid.width * scale)) else: png_grid = grid.grid() png.from_array(png_grid, 'L').save(pngfile)