def load(self): log.debug("loading mapable drive {0}".format(self.path)) try: if not re.search(r"block special", str(sh.file(self.path).stdout, 'utf8'), flags=re.IGNORECASE): self.lodev = sh.losetup("-f").split()[0] sh.losetup(self.lodev, self.path) sh.blkid(self.lodev) try: sh.partprobe(self.lodev) except: pass else: sh.blkid(self.path) try: sh.partprobe(self.path) except: pass sh.sync("/dev/") self.process_devicemap() except Exception as e: log.exception(e) return False return True
def losetup_ctxmgr(): args = [ "-f", "--show", "--offset", "0", "--sizelimit", "50000000000", IMAGE ] device = losetup(*args).stdout.decode("utf8").strip() yield device losetup("-d", device)
def DeleteLoopDev(self, filepath): self.logger.info('Deleting loopback device [{0}] for file [{1}]'.format(self.containers[filepath].loop_dev, filepath)) if not os.path.isfile(filepath): raise RuntimeError('Path to file [{0}] does not exist or is not a file!'.format(filepath)) if not filepath in self.containers: raise RuntimeError('No loop device currently active for file [{0}]'.format(filepath)) sh.losetup('-d', self.containers[filepath].loop_dev) self.logger.info('Deleted loopback device [{0}] for file [{1}]'.format(self.containers[filepath].loop_dev, filepath))
def CreateLoopDev(self, filepath): loop_dev = sh.losetup('-f').strip() self.logger.info('Creating loopback device [{0}] for file [{1}]'.format(loop_dev, filepath)) if not loop_dev: raise RuntimeError('No available loop devices') if not os.path.isfile(filepath): raise RuntimeError('Path to file [{0}] does not exists or is not a file!'.format(filepath)) self.containers[filepath].loop_dev = loop_dev sh.losetup(loop_dev, filepath) self.logger.info('Created loopback device [{0}] for file [{1}]'.format(loop_dev, filepath))
def unload(self): try: if self.lodev: sh.losetup("-d", self.lodev) try: sh.partprobe(self.lodev) sh.rm("{0}p*".format(self.lodev)) except: pass self.children = [] sh.sync("/dev/") return True except sh.ErrorReturnCode: sh.sync("/dev/") return False
def _setup_image_file(self, prefix_image_path: str) -> None: if not os.path.exists(prefix_image_path): print('Creating BTRFS-image...') # create image file print(' > Create image-file') sh.truncate(prefix_image_path, s='10G') # install BTRFS print(' > Install BTRFS') reader = io.StringIO() with sh.contrib.sudo: sh.losetup('--show', '--find', prefix_image_path, _out=reader) device = reader.getvalue().strip() print(f' > Using device "{device}"') with sh.contrib.sudo: sh.Command('mkfs.btrfs')(f=device) else: print('Use existing BTRFS-image...')
def losetup_context_manager(image, offset=None, size=None): args = ["-f", "--show"] if offset is not None: args.extend(["--offset", offset]) if size is not None: args.extend(["--sizelimit", size]) args.append(image) hint = """\ Hint: If using a Virtual Machine, consider increasing the number of processors. If using Docker Desktop for Windows or macOS, it may require restarting.""" lo_wrap, lo_wrap_key = retry_counter.wrap(losetup, hint, *args, **SH_OPTS) # In the case of slow hardware the kernel might be in the middle of # tearing down internal structure device = retry_call(lo_wrap, fargs=args, tries=10, delay=3, max_delay=30, backoff=2).stdout.decode("utf8").strip() retry_counter.clear(lo_wrap_key) yield device losetup("-d", device, **SH_OPTS)
def LicheePiImage(workdir, boot_files, kernel_files, rootfs_files): mkdir('-p', workdir) IMAGE_NAME = 'sdcard.img' IMAGE_PATH = str(Path(workdir).joinpath(IMAGE_NAME)) dd('if=/dev/zero', 'of={}'.format(IMAGE_PATH), 'bs=1M', 'count=300') loop_dev = str(losetup('-f')).split()[0] losetup(loop_dev, IMAGE_PATH) sfdisk(cat(_in='1M,16M,c\n,,L'), loop_dev) partx('-u', loop_dev) mkfsvfat('{}p1'.format(loop_dev)) mkfsext4('{}p2'.format(loop_dev)) dd('if=/dev/zero', 'of={}'.format(loop_dev), 'bs=1K', 'seek=1', 'count=1023') dd('if={}'.format(boot_files['bootstrap']), 'of={}'.format(loop_dev), 'bs=1K', 'seek=8') sync() mkdir('-p', '/tmp/p1') mkdir('-p', '/tmp/p2') mount('{}p1'.format(loop_dev), '/tmp/p1') mount('{}p2'.format(loop_dev), '/tmp/p2') cp(boot_files['bin'], '/tmp/p1/') cp(kernel_files['bin'], '/tmp/p1/') cp(kernel_files['dtb'], '/tmp/p1/') mkimage('-C', 'none', '-A', 'arm', '-T', 'script', '-d', './resources/boot.cmd', '/tmp/p1/boot.scr') rsync('-r', '--links', rootfs_files['rootdir'] + '/', '/tmp/p2/') mkdir('-p', '/tmp/p2/etc/init.d') mkdir('-p', '/tmp/p2/proc') mkdir('-p', '/tmp/p2/dev') mkdir('-p', '/tmp/p2/sys') mkdir('-p', '/tmp/p2/var') touch('/tmp/p2/etc/init.d/rcS') chown('-R', 'root:root', '/tmp/p2/')
def _mount(self, file: str, mode: str, mount_point: str): if self.__lomode == 'mount': sh.sudo.mount(file, mount_point, o=f'offset={self.__offset}', _fg=True) return elif self.__lomode == 'losetup': self.__dev = str(sh.losetup(f=True)) sh.sudo.losetup(self.__dev, file, _fg=True) elif self.__lomode == 'udisksctl': self.__dev = udisksctl_losetup(file) else: raise ValueError print('mount: ' + self.__dev) assert self.__dev if self.__mntmode == 'mount': sh.sudo.mount(self.__dev, mount_point, o=f'loop,offset={self.__offset}', _fg=True) elif self.__mntmode == 'udisksctl': mount_point = udisksctl_mount(self.__dev) else: raise ValueError
def cleanup_loop_device(device): print(sh.losetup("-d", device))
def cli( ctx, destination_folder: Path, zpool_size_mb: int, recordsize: str, large_dnode: bool, no_acl: bool, verbose: Union[bool, int, float], verbose_inf: bool, loopback: bool, record_count: int, ipython: bool, ): tty, verbose = tv( ctx=ctx, verbose=verbose, verbose_inf=verbose_inf, ) if os.getuid() != 0: ic("must be root") sys.exit(1) if zpool_size_mb < 64: raise ValueError("minimum zpool size is 64MB") timestamp = str(time.time()) if verbose: ic(timestamp) if loopback: free_loop = sh.losetup("--find").splitlines() loop = Path(free_loop[0]) if not path_is_block_special(loop): raise ValueError(f"loop device path {loop} is not block special") loops_in_use = sh.losetup("-l").splitlines() # ic(loops_in_use) for line in loops_in_use: if loop in loops_in_use: raise ValueError(f"loop device {loop} already in use") destination = Path(destination_folder) / Path(f"zfstester_{timestamp}") os.makedirs(destination) destination_pool_file = destination / Path(f"test_pool_{timestamp}") destination_pool_file = destination_pool_file.resolve() if verbose: ic(destination_pool_file) sh.dd( "if=/dev/zero", f"of={destination_pool_file.as_posix()}", f"bs={zpool_size_mb}M", "count=1", ) # dd if=/dev/urandom of=temp_zfs_key bs=32 count=1 || exit 1 # key_path=`readlink -f temp_zfs_key` if loopback: sh.losetup(loop, destination_pool_file, loop) atexit.register(cleanup_loop_device, loop) if verbose: ic(sh.losetup("-l")) zpool_name = destination_pool_file.name if verbose: ic(zpool_name) zpool_create_command = sh.Command("zpool") zpool_create_command = zpool_create_command.bake( "create", "-O", "atime=off", "-O", "compression=lz4", "-O", "mountpoint=none", "-O", f"recordsize={recordsize}", zpool_name, ) if loopback: zpool_create_command = zpool_create_command.bake(loop) else: zpool_create_command = zpool_create_command.bake(destination_pool_file) zpool_create_command_result = zpool_create_command().splitlines() ic(zpool_create_command_result) # run_command(zpool_create_command, verbose=True) # atexit.register(destroy_zfs_pool, zpool_name) zfs_mountpoint = Path(f"{destination_pool_file.as_posix()}_mountpoint") zfs_filesystem = f"{zpool_name}/spacetest" zfs_create_command = sh.Command("zfs") zfs_create_command = zfs_create_command.bake( "create", "-o", f"mountpoint={zfs_mountpoint.as_posix()}", "-o", f"recordsize={recordsize}", ) if large_dnode: zfs_create_command = zfs_create_command.bake("-o", "dnodesize=auto") if no_acl: zfs_create_command = zfs_create_command.bake("-o", "acl=off") zfs_create_command = zfs_create_command.bake(zfs_filesystem) zfs_create_command_result = zfs_create_command().splitlines() ic(zfs_create_command_result) atexit.register(umount_zfs_filesystem, zfs_mountpoint) # atexit.register(destroy_zfs_filesystem, zfs_filesystem) # disabled just for pure space tests # zfs create -o encryption=on -o keyformat=raw -o keylocation=file://"${key_path}" -o mountpoint=/"${destination_pool_file}"/spacetest_enc "${destination_pool_file}"/spacetest_enc || exit 1 check_df(destination_pool_file) try: make_things(root=zfs_mountpoint, count=None, thing_function=os.makedirs) except Exception as e: ic(e) ic(sh.ls("-alh", zfs_mountpoint)) check_df(destination_pool_file) sh.sync() pathstat_results = pathstat(path=zfs_mountpoint, verbose=verbose) display_results(pathstat_results, verbose=verbose) # 128K recordsize: 81266 # 512 recordsize: 80894 zfs_get_all_command_results_interesting_lines = [] zfs_get_all_command = sh.Command("zfs") zfs_get_all_command = zfs_get_all_command.bake("get", "all") zfs_get_all_command_results = zfs_get_all_command().splitlines() interesting_fields = [ "used", "available", "referenced", "compressratio", "recordsize", "checksum", "compression", "xattr", "copies", "version", "usedbysnapshots", "usedbydataset", "usedbychildren", "usedbyrefreservation", "dedup", "dnodesize", "refcompressratio", "written", "logicalused", "logicalreferenced", "acltype", "redundant_metadata", "encryption", "snapshot_count", "special_small_blocks", ] for line in zfs_get_all_command_results: if destination_pool_file.name in line: if line.split()[1] in interesting_fields: zfs_get_all_command_results_interesting_lines.append(line) print(line) print("\nInteresting lines from above:") for line in zfs_get_all_command_results_interesting_lines: print(line) df_inodes = str(sh.df("-i")) # ic(df_inodes) print() for index, line in enumerate(df_inodes.splitlines()): if index == 0: print(line) # df -i header if destination_pool_file.name in line: df_line = line print(df_line) Inodes, IUsed, IFree, IUse = df_line.split()[1:5] destination_pool_file_rzip = destination_pool_file.as_posix() + ".rz" sh.rzip("-k", "-9", "-o", destination_pool_file_rzip, destination_pool_file.as_posix()) compressed_file_size = os.stat(destination_pool_file_rzip).st_size destination_pool_file_sparse_copy = Path(destination_pool_file.as_posix() + ".sparse") sh.cp( "-v", "-i", "--sparse=always", destination_pool_file, destination_pool_file_sparse_copy, ) destination_pool_file_sparse_copy_file_size = ( os.stat(destination_pool_file_sparse_copy).st_blocks * 512) # ic(compressed_file_size) print("\nSummary:") # ic(pathstat_results) print("pool file:") os.system(" ".join(["/bin/ls", "-al", destination_pool_file.as_posix()])) bytes_in_names = pathstat_results["bytes_in_names"] objects_created = pathstat_results[4] print() print( f"The {zpool_size_mb}MB pool ran out of free inodes (there are {IFree} out of {Inodes} left) after {bytes_in_names} bytes were written by creating {objects_created} empty directories (with random uncompressable names, under the root).\nCompressed, the pool file takes {compressed_file_size} bytes." ) compression_ratio = (compressed_file_size / (zpool_size_mb * 1024 * 1024)) * 100 print("compresson ratio:", str(round(compression_ratio, 2)) + "x") print( f"A sparse copy of the pool file is {destination_pool_file_sparse_copy_file_size}B (~{int(destination_pool_file_sparse_copy_file_size/1024/1024)}MB)" ) if ipython: import IPython IPython.embed()