def step_impl(context): sh.mkdir("tmp").wait() for row in context.table: sh.dd( "if=/dev/zero", "of=tmp/" + row["name"], "bs=1048576", "count=" + row["count"] )
def disk_wipe(): """ Wipe all filesystem and parition data from the disk. """ print('Destroying filesystem and partition data') sh.wipefs('-a', config.disk_dev) sh.sgdisk('--zap-all', config.disk_dev) sh.sgdisk('-og', config.disk_dev) print('Writing 10GB of zeros to the drive, this may take seconds or minutes depending on disk speed') sh.dd('bs=10M', 'count=1024', 'if=/dev/zero', f'of={config.disk_dev}', 'conv=fdatasync', _out=sys.stdout)
def step_impl(context): sh.mkdir("tmp").wait() for row in context.table: dirs = row["name"].split("/") if len(dirs) > 1: sh.mkdir("tmp/" + dirs[0]).wait() sh.dd("if=/dev/zero", "of=tmp/{filename}".format(filename=row["name"]), "bs=1048576", "count=1").wait()
def step_impl(context): sh.mkdir("tmp").wait() for row in context.table: dirs = row["name"].split("/") if len(dirs) > 1: sh.mkdir("tmp/" + dirs[0]).wait() sh.dd( "if=/dev/zero", "of=tmp/{filename}".format(filename=row["name"]), "bs=1048576", "count=1" ).wait()
def log_copy_operation(copy_source, copy_destination, file_size, nr_of_files, log_dir, sample_files_dir, unit, mountpoint, reading=False, timelimit_in_min=None): ''':returns: amount of files successfully written to the file system''' #check="$6" #check copy destination with the file of the name "file_sizeMB" in $SAMPLE_FILES_DIR success = 0 errors = 0 corruption = 0 if timelimit_in_min is None: timelimit_in_min = float("inf") timelimit_in_s = timelimit_in_min * 60 timeout = False if reading: LOG_DIR = log_dir + "/read" else: LOG_DIR = log_dir + "/write" os.makedirs(LOG_DIR) copy_stats_process = Process(target=periodic_copy_stats, args=(LOG_DIR, mountpoint)) copy_stats_process.start() ifstats = Ifstats() time_before_operation = datetime.datetime.now() - datetime.timedelta(0) subdir = 0 for nr in range(1, nr_of_files+1): # from 1 to file quantity filename = file_size+unit+'_'+str(nr) if nr % 500 == 0: subdir = subdir + 1 if reading: src = copy_source+'/'+str(subdir)+'/'+filename dst = copy_destination+'/'+filename else: src = copy_source+'/'+filename dst = copy_destination+'/'+str(subdir)+'/'+filename tries = 2 while True: try: dd('if='+src, 'of='+dst, 'bs=131072') success += 1 animate_line("copy "+src+" -> "+dst) except ErrorReturnCode, e: tries -= 1 errors += 1 if tries == 0: break print "\nError occured during copying - retrying:%s\n" % repr(e) #traceback.print_exc() continue break # stop loop if command succeeded timeout = timelimit_in_s < (datetime.datetime.now() - time_before_operation).seconds if timeout: break
def trial(num_bins=1, size_bin=500, after_rm=None, max_delta=0.05): from sh import imgbase, rm, ls def img_free(): return float(imgbase("layout", "--free-space")) imgbase = imgbase.bake("--debug") a = img_free() [dd(B, size_bin) for B in iter(range(0, num_bins))] print("Files which were created") print(ls("-shal", *glob.glob("/var/tmp/*.bin"))) b = img_free() print("Files are getting removed") rm("-f", *glob.glob("/var/tmp/*.bin")) after_rm() c = img_free() ratio = a / c print(a, b, c, ratio) delta = 1 - ratio assert delta < max_delta, \ "Delta %s is larger than %s" % (delta, max_delta)
def main(): parser = MyParser() parser.add_argument('directory') args = parser.parse_args() directory = args.directory if not os.path.exists(directory): os.makedirs(directory) filesize_arr = [1,2,3,4,5,6] filequantity_arr = [10000,5000,3333,2500,2000,1667] idx = 0 for size in filesize_arr: for nr in range(1,filequantity_arr[idx]+1): # from 1 to file quantity filename = directory+'/'+str(size)+'MB_'+str(nr) print "writing "+directory+'/'+str(size)+'MB_'+str(nr) dd('if=/dev/zero', 'of='+filename, 'bs=1MB', 'count='+str(size)) idx += 1
def _create_bootstrap_img(self): self._bootstrap_dir = tempfile.mkdtemp() sh.chmod("o+rwx", self._bootstrap_dir) self._bootstrap_files = os.path.join(self._bootstrap_dir, "files") os.makedirs(self._bootstrap_files) with open(os.path.join(os.path.dirname(__file__), "bootstrap.py"), "r") as f: bootstrap_contents = f.read() with open(os.path.join(self._bootstrap_files, "bootstrap.py"), "wb") as f: f.write(bootstrap_contents) with open(os.path.join(self._bootstrap_files, "config.json"), "wb") as f: f.write(self._make_config()) self._bootstrap_img = os.path.join(self._bootstrap_dir, "bootstrap.img") sh.dd("if=/dev/null", "bs=1K", "of={}".format(self._bootstrap_img), "seek=2040") sh.Command("mkfs.ntfs")("-F", self._bootstrap_img) #sh.Command("mkfs.vfat")(self._bootstrap_img) mounted_dir = os.path.join(self._bootstrap_dir, "mounted") os.makedirs(mounted_dir) output = sh.mount("-t", "ntfs", "-o", "loop", self._bootstrap_img, mounted_dir) #output = sh.mount("-t", "vfat", "-o", "loop", self._bootstrap_img, mounted_dir) #self._log.debug("mount output: " + str(output)) shutil.copy(os.path.join(self._bootstrap_files, "bootstrap.py"), mounted_dir) shutil.copy(os.path.join(self._bootstrap_files, "config.json"), mounted_dir) try: sh.umount(mounted_dir) except: pass return self._bootstrap_img
def _hotplug_empty_disk(self): self._tmpdir = tempfile.mkdtemp() sh.chmod("o+rwx", self._tmpdir) self._tmpdisk = os.path.join(self._tmpdir, "tmpdisk.img") sh.dd("if=/dev/null", "bs=1K", "of={}".format(self._tmpdisk), "seek=1030") sh.Command("mkfs.ntfs")("-F", self._tmpdisk) disk_file = os.path.join(self._tmpdir, "disk.xml") with open(disk_file, "wb") as f: f.write(""" <disk type="file" device="disk"> <driver name="qemu" type="raw" cache="none" io="native"/> <source file="{}"/> <target dev="sda" bus="usb"/> </disk> """.format(self._tmpdisk)) sh.virsh("attach-device", self._domain, disk_file)
def get_disk_throughput(device): """Tests a disk for read throughput. Returns formatted result. :param device: The device to test (Expects full path) """ throughput = 0 unit = "" for line in sh.dd("if={}".format(device), "of=/dev/zero", "bs=1M", "count=1000", _err_to_out=True): s = re.search(' copied,.*, (\S+) (\S+)$', line) if s: throughput = s.group(1) unit = s.group(2) break return "{} {}".format(throughput, unit)
def log_copy_operation(copy_source, copy_destination, file_size, nr_of_files, log_file, sample_files_dir, unit, check=False): #check="$6" #check copy destination with the file of the name "file_sizeMB" in $SAMPLE_FILES_DIR now = datetime.datetime.now() time_of_multiple_operations = now - now success = 0 time_before_operation = datetime.datetime.now() - datetime.timedelta(0) for nr in range(1, nr_of_files+1): # from 1 to file quantity operation_succeeded=0 while True: try: start = datetime.datetime.now() dd('if='+copy_source+str(nr), 'of='+copy_destination+str(nr), 'bs=131072') end = datetime.datetime.now() except ErrorReturnCode: import traceback sys.stderr.write("Error occured during copying - retrying:") traceback.print_exc() continue break # stop loop if command succeeded time_of_operation = end - start time_of_multiple_operations += time_of_operation print "time gone until now: "+str(time_of_multiple_operations) success += 1 if check: if is_equal(copy_destination+str(nr), sample_files_dir+'/'+str(file_size)+unit+'_'+str(nr)): success -= 1 time_after_operations = time_before_operation + time_of_multiple_operations print "time_before_operation %s - time_after_operation %s" % (time_before_operation, time_after_operations) average_transfer_rate = (1.0*int(file_size) * nr_of_files)*(1/time_of_multiple_operations.total_seconds()) #"single_file_size total_size time_for_operation_in_seconds average_transfer_rate nr_of_files success" if not os.path.exists(log_file): with open(log_file, 'w') as f: f.write("single file size\ttotal size\ttime for operation [s]\taverage transfer rate [%s/s]\tnumber of files\tsuccess\n" % unit) with open(log_file, 'a') as f: total_size = int(file_size)*nr_of_files f.write("%s\t%s\t%s\t%s\t%s\t%s\n" % (file_size, total_size, time_of_multiple_operations.total_seconds(), average_transfer_rate, nr_of_files, success))
def trial(num_bins=1, size_bin=500, after_rm=None, max_delta=0.05): from sh import imgbase, rm, ls imgbase = imgbase.bake("--debug") img_free = lambda: float(imgbase("layout", "--free-space")) a = img_free() [dd(B, size_bin) for B in iter(range(0, num_bins))] print("Files which were created") print(ls("-shal", *glob.glob("/var/tmp/*.bin"))) b = img_free() print("Files are getting removed") rm("-f", *glob.glob("/var/tmp/*.bin")) after_rm() c = img_free() ratio = a / c print(a, b, c, ratio) delta = 1 - ratio assert delta < max_delta, \ "Delta %s is larger than %s" % (delta, max_delta)
def LicheePiImage(workdir, boot_files, kernel_files, rootfs_files): mkdir('-p', workdir) IMAGE_NAME = 'sdcard.img' IMAGE_PATH = str(Path(workdir).joinpath(IMAGE_NAME)) dd('if=/dev/zero', 'of={}'.format(IMAGE_PATH), 'bs=1M', 'count=300') loop_dev = str(losetup('-f')).split()[0] losetup(loop_dev, IMAGE_PATH) sfdisk(cat(_in='1M,16M,c\n,,L'), loop_dev) partx('-u', loop_dev) mkfsvfat('{}p1'.format(loop_dev)) mkfsext4('{}p2'.format(loop_dev)) dd('if=/dev/zero', 'of={}'.format(loop_dev), 'bs=1K', 'seek=1', 'count=1023') dd('if={}'.format(boot_files['bootstrap']), 'of={}'.format(loop_dev), 'bs=1K', 'seek=8') sync() mkdir('-p', '/tmp/p1') mkdir('-p', '/tmp/p2') mount('{}p1'.format(loop_dev), '/tmp/p1') mount('{}p2'.format(loop_dev), '/tmp/p2') cp(boot_files['bin'], '/tmp/p1/') cp(kernel_files['bin'], '/tmp/p1/') cp(kernel_files['dtb'], '/tmp/p1/') mkimage('-C', 'none', '-A', 'arm', '-T', 'script', '-d', './resources/boot.cmd', '/tmp/p1/boot.scr') rsync('-r', '--links', rootfs_files['rootdir'] + '/', '/tmp/p2/') mkdir('-p', '/tmp/p2/etc/init.d') mkdir('-p', '/tmp/p2/proc') mkdir('-p', '/tmp/p2/dev') mkdir('-p', '/tmp/p2/sys') mkdir('-p', '/tmp/p2/var') touch('/tmp/p2/etc/init.d/rcS') chown('-R', 'root:root', '/tmp/p2/')
def dd(N, count=100): from sh import dd dd("if=/dev/zero", "of=/var/tmp/%s.bin" % N, "bs=1M", "count=%d" % count)
def reserve_space(folder, reserve_size): path = dummy_file(folder) sh.dd('if=/dev/zero', 'of=%s' % path, 'bs=%lu' % (reserve_size / 1024), 'count=1024')
def generate(filename, bs, count=1): return sh.dd('if=/dev/urandom', 'of={}'.format(filename), 'bs={}'.format(bs), 'count={}'.format(count))
def ddd(**kwargs): # dd helper return dd( *("{}={}".format(k.lstrip("_"), v) for k, v in kwargs.items()), **SH_OPTS, )
def cli( ctx, destination_folder: Path, zpool_size_mb: int, recordsize: str, large_dnode: bool, no_acl: bool, verbose: Union[bool, int, float], verbose_inf: bool, loopback: bool, record_count: int, ipython: bool, ): tty, verbose = tv( ctx=ctx, verbose=verbose, verbose_inf=verbose_inf, ) if os.getuid() != 0: ic("must be root") sys.exit(1) if zpool_size_mb < 64: raise ValueError("minimum zpool size is 64MB") timestamp = str(time.time()) if verbose: ic(timestamp) if loopback: free_loop = sh.losetup("--find").splitlines() loop = Path(free_loop[0]) if not path_is_block_special(loop): raise ValueError(f"loop device path {loop} is not block special") loops_in_use = sh.losetup("-l").splitlines() # ic(loops_in_use) for line in loops_in_use: if loop in loops_in_use: raise ValueError(f"loop device {loop} already in use") destination = Path(destination_folder) / Path(f"zfstester_{timestamp}") os.makedirs(destination) destination_pool_file = destination / Path(f"test_pool_{timestamp}") destination_pool_file = destination_pool_file.resolve() if verbose: ic(destination_pool_file) sh.dd( "if=/dev/zero", f"of={destination_pool_file.as_posix()}", f"bs={zpool_size_mb}M", "count=1", ) # dd if=/dev/urandom of=temp_zfs_key bs=32 count=1 || exit 1 # key_path=`readlink -f temp_zfs_key` if loopback: sh.losetup(loop, destination_pool_file, loop) atexit.register(cleanup_loop_device, loop) if verbose: ic(sh.losetup("-l")) zpool_name = destination_pool_file.name if verbose: ic(zpool_name) zpool_create_command = sh.Command("zpool") zpool_create_command = zpool_create_command.bake( "create", "-O", "atime=off", "-O", "compression=lz4", "-O", "mountpoint=none", "-O", f"recordsize={recordsize}", zpool_name, ) if loopback: zpool_create_command = zpool_create_command.bake(loop) else: zpool_create_command = zpool_create_command.bake(destination_pool_file) zpool_create_command_result = zpool_create_command().splitlines() ic(zpool_create_command_result) # run_command(zpool_create_command, verbose=True) # atexit.register(destroy_zfs_pool, zpool_name) zfs_mountpoint = Path(f"{destination_pool_file.as_posix()}_mountpoint") zfs_filesystem = f"{zpool_name}/spacetest" zfs_create_command = sh.Command("zfs") zfs_create_command = zfs_create_command.bake( "create", "-o", f"mountpoint={zfs_mountpoint.as_posix()}", "-o", f"recordsize={recordsize}", ) if large_dnode: zfs_create_command = zfs_create_command.bake("-o", "dnodesize=auto") if no_acl: zfs_create_command = zfs_create_command.bake("-o", "acl=off") zfs_create_command = zfs_create_command.bake(zfs_filesystem) zfs_create_command_result = zfs_create_command().splitlines() ic(zfs_create_command_result) atexit.register(umount_zfs_filesystem, zfs_mountpoint) # atexit.register(destroy_zfs_filesystem, zfs_filesystem) # disabled just for pure space tests # zfs create -o encryption=on -o keyformat=raw -o keylocation=file://"${key_path}" -o mountpoint=/"${destination_pool_file}"/spacetest_enc "${destination_pool_file}"/spacetest_enc || exit 1 check_df(destination_pool_file) try: make_things(root=zfs_mountpoint, count=None, thing_function=os.makedirs) except Exception as e: ic(e) ic(sh.ls("-alh", zfs_mountpoint)) check_df(destination_pool_file) sh.sync() pathstat_results = pathstat(path=zfs_mountpoint, verbose=verbose) display_results(pathstat_results, verbose=verbose) # 128K recordsize: 81266 # 512 recordsize: 80894 zfs_get_all_command_results_interesting_lines = [] zfs_get_all_command = sh.Command("zfs") zfs_get_all_command = zfs_get_all_command.bake("get", "all") zfs_get_all_command_results = zfs_get_all_command().splitlines() interesting_fields = [ "used", "available", "referenced", "compressratio", "recordsize", "checksum", "compression", "xattr", "copies", "version", "usedbysnapshots", "usedbydataset", "usedbychildren", "usedbyrefreservation", "dedup", "dnodesize", "refcompressratio", "written", "logicalused", "logicalreferenced", "acltype", "redundant_metadata", "encryption", "snapshot_count", "special_small_blocks", ] for line in zfs_get_all_command_results: if destination_pool_file.name in line: if line.split()[1] in interesting_fields: zfs_get_all_command_results_interesting_lines.append(line) print(line) print("\nInteresting lines from above:") for line in zfs_get_all_command_results_interesting_lines: print(line) df_inodes = str(sh.df("-i")) # ic(df_inodes) print() for index, line in enumerate(df_inodes.splitlines()): if index == 0: print(line) # df -i header if destination_pool_file.name in line: df_line = line print(df_line) Inodes, IUsed, IFree, IUse = df_line.split()[1:5] destination_pool_file_rzip = destination_pool_file.as_posix() + ".rz" sh.rzip("-k", "-9", "-o", destination_pool_file_rzip, destination_pool_file.as_posix()) compressed_file_size = os.stat(destination_pool_file_rzip).st_size destination_pool_file_sparse_copy = Path(destination_pool_file.as_posix() + ".sparse") sh.cp( "-v", "-i", "--sparse=always", destination_pool_file, destination_pool_file_sparse_copy, ) destination_pool_file_sparse_copy_file_size = ( os.stat(destination_pool_file_sparse_copy).st_blocks * 512) # ic(compressed_file_size) print("\nSummary:") # ic(pathstat_results) print("pool file:") os.system(" ".join(["/bin/ls", "-al", destination_pool_file.as_posix()])) bytes_in_names = pathstat_results["bytes_in_names"] objects_created = pathstat_results[4] print() print( f"The {zpool_size_mb}MB pool ran out of free inodes (there are {IFree} out of {Inodes} left) after {bytes_in_names} bytes were written by creating {objects_created} empty directories (with random uncompressable names, under the root).\nCompressed, the pool file takes {compressed_file_size} bytes." ) compression_ratio = (compressed_file_size / (zpool_size_mb * 1024 * 1024)) * 100 print("compresson ratio:", str(round(compression_ratio, 2)) + "x") print( f"A sparse copy of the pool file is {destination_pool_file_sparse_copy_file_size}B (~{int(destination_pool_file_sparse_copy_file_size/1024/1024)}MB)" ) if ipython: import IPython IPython.embed()