def close_stream(self): """ Zamykanie urządzenia i kasowanie plików tymczasowych """ if self.source_mount is not None: sh.sync() sh.umount(self.source_mount, self.destination_mount) sh.rm(self.mount_folder, '-rf')
def load(self): log.debug("loading mapable drive {0}".format(self.path)) try: if not re.search(r"block special", str(sh.file(self.path).stdout, 'utf8'), flags=re.IGNORECASE): self.lodev = sh.losetup("-f").split()[0] sh.losetup(self.lodev, self.path) sh.blkid(self.lodev) try: sh.partprobe(self.lodev) except: pass else: sh.blkid(self.path) try: sh.partprobe(self.path) except: pass sh.sync("/dev/") self.process_devicemap() except Exception as e: log.exception(e) return False return True
def run(self): try: output = sh.test("-e", "%s/reboot-failed" % self.path_root) if output.exit_code == 0: self.publish.event.put("/system/reboot", data={ "code": "REBOOT_FAIL", "type": "event" }) sh.rm("-rf", "%s/reboot-failed" % self.path_root) except sh.ErrorReturnCode_1: pass try: output = sh.test("-e", "%s/rebooting" % self.path_root) if output.exit_code == 0: _logger.info("Reboot success!") self.publish.event.put("/system/reboot", data={ "code": "REBOOT_SUCCESS", "type": "event" }) sh.rm("-rf", "%s/rebooting" % self.path_root) except sh.ErrorReturnCode_1: pass sh.sync()
def cleanup(): if os.path.exists(DISK_DIR): shutil.rmtree(DISK_DIR) if os.path.exists(NVM_DIR): shutil.rmtree(NVM_DIR) os.makedirs(DISK_DIR) os.makedirs(NVM_DIR) sh.sync()
def __exit__(self, type, value, traceback): sh.sync() # Try to umount and ignore any errors sh.sudo.umount(self.mount_point, '-R', '-l', _ok_code=range(255), _fg=True) logger.debug('Unmount {}'.format(self.mount_point))
def try_unmount(mount_point, user=False): sh.sync() try: sh.mountpoint(mount_point, '-q') if user: sh.fusermount('-quz', mount_point, _fg=True) else: sh.sudo.umount(mount_point, '-R', '-l', _fg=True) except sh.ErrorReturnCode: pass
def system_shutdown(): with sh.sudo: try: sh.sync() sh.halt() except sh.ErrorReturnCode_1: flash('Unable to shutdown device!', 'error') return redirect(url_for('settings.host')) flash('Shutting device down!', 'success') return redirect(url_for('settings.host'))
def system_reboot(): with sh.sudo: try: sh.sync() sh.reboot() except sh.ErrorReturnCode_1: flash('Unable to reboot device!', 'error') return redirect(url_for('settings.host')) flash('Rebooting device!', 'success') return redirect(url_for('settings.host'))
def __exit__(self, type, value, traceback): sh.sync() if self.image_type == 'CPIO': os.system( 'cd {} && sudo find . | sudo cpio -H newc --quiet -o > "{}"'. format(self.mount_point, self.image_file)) safely_clean_dir(self.mount_point) else: # Try to umount and ignore any errors subprocess.Popen('sudo umount -R -l {}'.format( self.mount_point).split()) logger.debug('Clean/Unmount {}'.format(self.mount_point))
def unload(self): try: if self.lodev: sh.losetup("-d", self.lodev) try: sh.partprobe(self.lodev) sh.rm("{0}p*".format(self.lodev)) except: pass self.children = [] sh.sync("/dev/") return True except sh.ErrorReturnCode: sh.sync("/dev/") return False
def gphoto_backup(device): if device is None: return number_of_copies = ptp_copy.rsync_all_cameras(BACKUP_PATH) # No file copied if number_of_copies <= 0: return # Flush disk buffers sh.sync() # Schedule a lychee sync for now global next_lychee_sync next_lychee_sync = datetime.now()
def reboot(self): # TODO: double check web notification with Zack*2 self.publish.event.put( "/system/reboot", data={"code": "REBOOTING", "type": "event"}) sh.touch("%s/rebooting" % self.path_root) sh.sync() # Waiting for web to log out # time.sleep(5) _logger.debug("Turn off the ready led.") subprocess.call(self.set_to_not_ready, shell=True) _logger.info("Rebooting...") returncode = subprocess.call(self.call_reboot, shell=True) if returncode != 0: _logger.info("Reboot failed!") sh.touch("%s/reboot-failed" % self.path_root) sh.sync()
def sync_lychee(complete_sync=False): log.info("Starting Lychee synchronization") if complete_sync: # Delete broken links log.info("Removing broken symlinks for Lychee") sh.find(LYCHEE_DATA_PATH, "-xtype", "l", "-delete") exclusive_mode = 'replace' else: exclusive_mode = 'normal' try: perform_sync(False, exclusive_mode, True, False, True, False, BACKUP_PATH, LYCHEE_DATA_PATH, LYCHEESYNC_CONF_FILE) except Exception as e: log.exception("Unable to perform Lychee synchronization") # Flush disk buffers sh.sync() log.info("Finished Lychee synchronization")
def reboot(self): # TODO: double check web notification with Zack*2 self.publish.event.put("/system/reboot", data={ "code": "REBOOTING", "type": "event" }) sh.touch("%s/rebooting" % self.path_root) sh.sync() # Waiting for web to log out # time.sleep(5) _logger.debug("Turn off the ready led.") subprocess.call(self.set_to_not_ready, shell=True) _logger.info("Rebooting...") returncode = subprocess.call(self.call_reboot, shell=True) if returncode != 0: _logger.info("Reboot failed!") sh.touch("%s/reboot-failed" % self.path_root) sh.sync()
def mass_storage_backup(source_path): if source_path is None: return unique_id = get_unique_name(source_path) destination_path = os.path.join(BACKUP_PATH, unique_id) + os.sep # Create the folder os.makedirs(destination_path, exist_ok=True) log.info("Starting backup for %s to %s", source_path, destination_path) # File synchronization sh.rsync("-a", "--chmod=Du=rwx,Dgo=rwx,Fu=rw,Fog=rw", source_path + os.sep, destination_path) # Flush disk buffers sh.sync() log.info("Finished backup for %s", source_path) # Schedule a lychee sync for now global next_lychee_sync next_lychee_sync = datetime.now()
def make_or_wipe_server_side_subversion_repo(svn_parent_root, repo_name, compression, deltification, rep_sharing): if not str(svn_parent_root).endswith("/"): svn_parent_root += "/" # Wipe the Subversion repo sh.rm("-rf", svn_parent_root + repo_name) if not os.path.exists(svn_parent_root): os.makedirs(svn_parent_root) sh.svnadmin("create", svn_parent_root + repo_name) sh.chown("-R", "www-data:www-data", svn_parent_root + repo_name) sh.chmod("-R", "755", svn_parent_root + repo_name) sh.sync() for line in fileinput.FileInput(svn_parent_root + repo_name + "/db/fsfs.conf", inplace=True): if compression and "# compression-level" in line: print("compression-level = 0") elif deltification and "# max-deltification-walk" in line: print("max-deltification-walk = 0") elif rep_sharing and "# enable-rep-sharing" in line: print("enable-rep-sharing = false") else: print(line)
def run(self): try: output = sh.test("-e", "%s/reboot-failed" % self.path_root) if output.exit_code == 0: self.publish.event.put( "/system/reboot", data={"code": "REBOOT_FAIL", "type": "event"}) sh.rm("-rf", "%s/reboot-failed" % self.path_root) except sh.ErrorReturnCode_1: pass try: output = sh.test("-e", "%s/rebooting" % self.path_root) if output.exit_code == 0: _logger.info("Reboot success!") self.publish.event.put( "/system/reboot", data={"code": "REBOOT_SUCCESS", "type": "event"}) sh.rm("-rf", "%s/rebooting" % self.path_root) except sh.ErrorReturnCode_1: pass sh.sync()
def LicheePiImage(workdir, boot_files, kernel_files, rootfs_files): mkdir('-p', workdir) IMAGE_NAME = 'sdcard.img' IMAGE_PATH = str(Path(workdir).joinpath(IMAGE_NAME)) dd('if=/dev/zero', 'of={}'.format(IMAGE_PATH), 'bs=1M', 'count=300') loop_dev = str(losetup('-f')).split()[0] losetup(loop_dev, IMAGE_PATH) sfdisk(cat(_in='1M,16M,c\n,,L'), loop_dev) partx('-u', loop_dev) mkfsvfat('{}p1'.format(loop_dev)) mkfsext4('{}p2'.format(loop_dev)) dd('if=/dev/zero', 'of={}'.format(loop_dev), 'bs=1K', 'seek=1', 'count=1023') dd('if={}'.format(boot_files['bootstrap']), 'of={}'.format(loop_dev), 'bs=1K', 'seek=8') sync() mkdir('-p', '/tmp/p1') mkdir('-p', '/tmp/p2') mount('{}p1'.format(loop_dev), '/tmp/p1') mount('{}p2'.format(loop_dev), '/tmp/p2') cp(boot_files['bin'], '/tmp/p1/') cp(kernel_files['bin'], '/tmp/p1/') cp(kernel_files['dtb'], '/tmp/p1/') mkimage('-C', 'none', '-A', 'arm', '-T', 'script', '-d', './resources/boot.cmd', '/tmp/p1/boot.scr') rsync('-r', '--links', rootfs_files['rootdir'] + '/', '/tmp/p2/') mkdir('-p', '/tmp/p2/etc/init.d') mkdir('-p', '/tmp/p2/proc') mkdir('-p', '/tmp/p2/dev') mkdir('-p', '/tmp/p2/sys') mkdir('-p', '/tmp/p2/var') touch('/tmp/p2/etc/init.d/rcS') chown('-R', 'root:root', '/tmp/p2/')
def cli( ctx, destination_folder: Path, zpool_size_mb: int, recordsize: str, large_dnode: bool, no_acl: bool, verbose: Union[bool, int, float], verbose_inf: bool, loopback: bool, record_count: int, ipython: bool, ): tty, verbose = tv( ctx=ctx, verbose=verbose, verbose_inf=verbose_inf, ) if os.getuid() != 0: ic("must be root") sys.exit(1) if zpool_size_mb < 64: raise ValueError("minimum zpool size is 64MB") timestamp = str(time.time()) if verbose: ic(timestamp) if loopback: free_loop = sh.losetup("--find").splitlines() loop = Path(free_loop[0]) if not path_is_block_special(loop): raise ValueError(f"loop device path {loop} is not block special") loops_in_use = sh.losetup("-l").splitlines() # ic(loops_in_use) for line in loops_in_use: if loop in loops_in_use: raise ValueError(f"loop device {loop} already in use") destination = Path(destination_folder) / Path(f"zfstester_{timestamp}") os.makedirs(destination) destination_pool_file = destination / Path(f"test_pool_{timestamp}") destination_pool_file = destination_pool_file.resolve() if verbose: ic(destination_pool_file) sh.dd( "if=/dev/zero", f"of={destination_pool_file.as_posix()}", f"bs={zpool_size_mb}M", "count=1", ) # dd if=/dev/urandom of=temp_zfs_key bs=32 count=1 || exit 1 # key_path=`readlink -f temp_zfs_key` if loopback: sh.losetup(loop, destination_pool_file, loop) atexit.register(cleanup_loop_device, loop) if verbose: ic(sh.losetup("-l")) zpool_name = destination_pool_file.name if verbose: ic(zpool_name) zpool_create_command = sh.Command("zpool") zpool_create_command = zpool_create_command.bake( "create", "-O", "atime=off", "-O", "compression=lz4", "-O", "mountpoint=none", "-O", f"recordsize={recordsize}", zpool_name, ) if loopback: zpool_create_command = zpool_create_command.bake(loop) else: zpool_create_command = zpool_create_command.bake(destination_pool_file) zpool_create_command_result = zpool_create_command().splitlines() ic(zpool_create_command_result) # run_command(zpool_create_command, verbose=True) # atexit.register(destroy_zfs_pool, zpool_name) zfs_mountpoint = Path(f"{destination_pool_file.as_posix()}_mountpoint") zfs_filesystem = f"{zpool_name}/spacetest" zfs_create_command = sh.Command("zfs") zfs_create_command = zfs_create_command.bake( "create", "-o", f"mountpoint={zfs_mountpoint.as_posix()}", "-o", f"recordsize={recordsize}", ) if large_dnode: zfs_create_command = zfs_create_command.bake("-o", "dnodesize=auto") if no_acl: zfs_create_command = zfs_create_command.bake("-o", "acl=off") zfs_create_command = zfs_create_command.bake(zfs_filesystem) zfs_create_command_result = zfs_create_command().splitlines() ic(zfs_create_command_result) atexit.register(umount_zfs_filesystem, zfs_mountpoint) # atexit.register(destroy_zfs_filesystem, zfs_filesystem) # disabled just for pure space tests # zfs create -o encryption=on -o keyformat=raw -o keylocation=file://"${key_path}" -o mountpoint=/"${destination_pool_file}"/spacetest_enc "${destination_pool_file}"/spacetest_enc || exit 1 check_df(destination_pool_file) try: make_things(root=zfs_mountpoint, count=None, thing_function=os.makedirs) except Exception as e: ic(e) ic(sh.ls("-alh", zfs_mountpoint)) check_df(destination_pool_file) sh.sync() pathstat_results = pathstat(path=zfs_mountpoint, verbose=verbose) display_results(pathstat_results, verbose=verbose) # 128K recordsize: 81266 # 512 recordsize: 80894 zfs_get_all_command_results_interesting_lines = [] zfs_get_all_command = sh.Command("zfs") zfs_get_all_command = zfs_get_all_command.bake("get", "all") zfs_get_all_command_results = zfs_get_all_command().splitlines() interesting_fields = [ "used", "available", "referenced", "compressratio", "recordsize", "checksum", "compression", "xattr", "copies", "version", "usedbysnapshots", "usedbydataset", "usedbychildren", "usedbyrefreservation", "dedup", "dnodesize", "refcompressratio", "written", "logicalused", "logicalreferenced", "acltype", "redundant_metadata", "encryption", "snapshot_count", "special_small_blocks", ] for line in zfs_get_all_command_results: if destination_pool_file.name in line: if line.split()[1] in interesting_fields: zfs_get_all_command_results_interesting_lines.append(line) print(line) print("\nInteresting lines from above:") for line in zfs_get_all_command_results_interesting_lines: print(line) df_inodes = str(sh.df("-i")) # ic(df_inodes) print() for index, line in enumerate(df_inodes.splitlines()): if index == 0: print(line) # df -i header if destination_pool_file.name in line: df_line = line print(df_line) Inodes, IUsed, IFree, IUse = df_line.split()[1:5] destination_pool_file_rzip = destination_pool_file.as_posix() + ".rz" sh.rzip("-k", "-9", "-o", destination_pool_file_rzip, destination_pool_file.as_posix()) compressed_file_size = os.stat(destination_pool_file_rzip).st_size destination_pool_file_sparse_copy = Path(destination_pool_file.as_posix() + ".sparse") sh.cp( "-v", "-i", "--sparse=always", destination_pool_file, destination_pool_file_sparse_copy, ) destination_pool_file_sparse_copy_file_size = ( os.stat(destination_pool_file_sparse_copy).st_blocks * 512) # ic(compressed_file_size) print("\nSummary:") # ic(pathstat_results) print("pool file:") os.system(" ".join(["/bin/ls", "-al", destination_pool_file.as_posix()])) bytes_in_names = pathstat_results["bytes_in_names"] objects_created = pathstat_results[4] print() print( f"The {zpool_size_mb}MB pool ran out of free inodes (there are {IFree} out of {Inodes} left) after {bytes_in_names} bytes were written by creating {objects_created} empty directories (with random uncompressable names, under the root).\nCompressed, the pool file takes {compressed_file_size} bytes." ) compression_ratio = (compressed_file_size / (zpool_size_mb * 1024 * 1024)) * 100 print("compresson ratio:", str(round(compression_ratio, 2)) + "x") print( f"A sparse copy of the pool file is {destination_pool_file_sparse_copy_file_size}B (~{int(destination_pool_file_sparse_copy_file_size/1024/1024)}MB)" ) if ipython: import IPython IPython.embed()
def __exit__(self, type, value, traceback): sh.sync() os.system('cd {} && sudo find . | sudo cpio -H newc --quiet -o > "{}"'. format(self.mount_point, self.image_file)) safely_clean_dir(self.mount_point) logger.debug('Clean {}'.format(self.mount_point))