def test_get_file_system_type_no_fstype_found(self): """Check error raised when supplied file doesnt exist""" directory = os.path.dirname(__file__) fobj = os.path.join(directory, "BmapHelpers/file/does/not/exist") with self.assertRaises(BmapHelpers.Error): BmapHelpers.get_file_system_type(fobj)
def _generate_compressed_files(file_path, delete=True): """ This is a generator which yields compressed versions of a file 'file_path'. The 'delete' argument specifies whether the compressed files that this generator yields have to be automatically deleted. """ # Make sure the temporary files start with the same name as 'file_obj' in # order to simplify debugging. prefix = os.path.splitext(os.path.basename(file_path))[0] + '.' # Put the temporary files in the directory with 'file_obj' directory = os.path.dirname(file_path) compressors = [ ("bzip2", None, ".bz2", "-c -k"), ("pbzip2", None, ".p.bz2", "-c -k"), ("gzip", None, ".gz", "-c"), ("pigz", None, ".p.gz", "-c -k"), ("xz", None, ".xz", "-c -k"), ("lzop", None, ".lzo", "-c -k"), ("lz4", None, ".lz4", "-c -k"), ("zstd", None, ".zst", ""), # The "-P -C /" trick is used to avoid silly warnings: # "tar: Removing leading `/' from member names" ("bzip2", "tar", ".tar.bz2", "-c -j -O -P -C /"), ("gzip", "tar", ".tar.gz", "-c -z -O -P -C /"), ("xz", "tar", ".tar.xz", "-c -J -O -P -C /"), ("lzop", "tar", ".tar.lzo", "-c --lzo -O -P -C /"), ("lz4", "tar", ".tar.lz4", "-c -Ilz4 -O -P -C /"), ("zstd", "tar", ".tar.zst", "-c -Izstd -O -P -C /"), ("zip", None, ".zip", "-q -j -") ] for decompressor, archiver, suffix, options in compressors: if not BmapHelpers.program_is_available(decompressor): continue if archiver and not BmapHelpers.program_is_available(archiver): continue tmp_file_obj = tempfile.NamedTemporaryFile('wb+', prefix=prefix, delete=delete, dir=directory, suffix=suffix) if archiver: args = archiver + " " + options + " " + file_path else: args = decompressor + " " + options + " " + file_path child_process = subprocess.Popen(args, shell=True, stderr=subprocess.PIPE, stdout=tmp_file_obj) child_process.wait() tmp_file_obj.flush() yield tmp_file_obj.name tmp_file_obj.close()
def test_is_zfs_configuration_compatible_invalid_read_value(self): """Check error raised if any content of zfs config file invalid""" with tempfile.NamedTemporaryFile("a", prefix="testfile_", delete=True, dir=".", suffix=".txt") as fobj: mockobj = mock.patch.object(BmapHelpers, "ZFS_COMPAT_PARAM_PATH", fobj.name) with self.assertRaises(BmapHelpers.Error): with mockobj: BmapHelpers.is_zfs_configuration_compatible()
def _generate_compressed_files(file_path, delete=True): """ This is a generator which yields compressed versions of a file 'file_path'. The 'delete' argument specifies whether the compressed files that this generator yields have to be automatically deleted. """ # Make sure the temporary files start with the same name as 'file_obj' in # order to simplify debugging. prefix = os.path.splitext(os.path.basename(file_path))[0] + '.' # Put the temporary files in the directory with 'file_obj' directory = os.path.dirname(file_path) compressors = [("bzip2", None, ".bz2", "-c -k"), ("pbzip2", None, ".p.bz2", "-c -k"), ("gzip", None, ".gz", "-c"), ("pigz", None, ".p.gz", "-c -k"), ("xz", None, ".xz", "-c -k"), ("lzop", None, ".lzo", "-c -k"), ("lz4", None, ".lz4", "-c -k"), # The "-P -C /" trick is used to avoid silly warnings: # "tar: Removing leading `/' from member names" ("bzip2", "tar", ".tar.bz2", "-c -j -O -P -C /"), ("gzip", "tar", ".tar.gz", "-c -z -O -P -C /"), ("xz", "tar", ".tar.xz", "-c -J -O -P -C /"), ("lzop", "tar", ".tar.lzo", "-c --lzo -O -P -C /"), ("lz4", "tar", ".tar.lz4", "-c -Ilz4 -O -P -C /"), ("zip", None, ".zip", "-q -j -")] for decompressor, archiver, suffix, options in compressors: if not BmapHelpers.program_is_available(decompressor): continue if archiver and not BmapHelpers.program_is_available(archiver): continue tmp_file_obj = tempfile.NamedTemporaryFile('wb+', prefix=prefix, delete=delete, dir=directory, suffix=suffix) if archiver: args = archiver + " " + options + " " + file_path else: args = decompressor + " " + options + " " + file_path child_process = subprocess.Popen(args, shell=True, stderr=subprocess.PIPE, stdout=tmp_file_obj) child_process.wait() tmp_file_obj.flush() yield tmp_file_obj.name tmp_file_obj.close()
def __init__(self, image): """ Initialize a class instance. The 'image' argument is full path to the file or file object to operate on. """ self._f_image_needs_close = False if hasattr(image, "fileno"): self._f_image = image self._image_path = image.name else: self._image_path = image self._open_image_file() try: self.image_size = os.fstat(self._f_image.fileno()).st_size except IOError as err: raise Error("cannot get information about file '%s': %s" % (self._f_image.name, err)) try: self.block_size = BmapHelpers.get_block_size(self._f_image) except IOError as err: raise Error("cannot get block size for '%s': %s" % (self._image_path, err)) self.blocks_cnt = (self.image_size + self.block_size - 1) // self.block_size try: self._f_image.flush() except IOError as err: raise Error("cannot flush image file '%s': %s" % (self._image_path, err)) try: os.fsync(self._f_image.fileno()), except OSError as err: raise Error("cannot synchronize image file '%s': %s " % (self._image_path, err.strerror)) if not BmapHelpers.is_compatible_file_system(self._image_path): fstype = BmapHelpers.get_file_system_type(self._image_path) raise Error( "image file on incompatible file system '%s': '%s': see docs for fix" % (self._image_path, fstype)) _log.debug("opened image \"%s\"" % self._image_path) _log.debug("block size %d, blocks count %d, image size %d" % (self.block_size, self.blocks_cnt, self.image_size))
def test_get_file_system_type(self): """Check a file system type is returned when used with a file""" with tempfile.NamedTemporaryFile("r", prefix="testfile_", delete=True, dir=".", suffix=".img") as fobj: fstype = BmapHelpers.get_file_system_type(fobj.name) self.assertTrue(fstype)
def test_is_zfs_configuration_compatible_unreadable_file(self, mock_open): """Check error raised if any IO errors when checking zfs config file""" mock_open.side_effect = IOError with self.assertRaises(BmapHelpers.Error): if not BmapHelpers.is_zfs_configuration_compatible(): raise BmapHelpers.Error
def test_is_zfs_configuration_compatible_notinstalled(self): """Check compatiblilty check passes when zfs not installed""" directory = os.path.dirname(__file__) filepath = os.path.join(directory, "BmapHelpers/file/does/not/exist") mockobj = mock.patch.object(BmapHelpers, "ZFS_COMPAT_PARAM_PATH", filepath) with mockobj: self.assertFalse(BmapHelpers.is_zfs_configuration_compatible())
def test_is_compatible_file_system_ext4(self, mock_get_fs_type): #pylint: disable=unused-argument """Check non-zfs file systems pass compatiblilty checks""" with tempfile.NamedTemporaryFile("w+", prefix="testfile_", delete=True, dir=".", suffix=".img") as fobj: self.assertTrue(BmapHelpers.is_compatible_file_system(fobj.name))
def test_is_compatible_file_system_zfs_invalid(self, mock_get_fs_type): #pylint: disable=unused-argument """Check compatiblilty check fails when zfs param is set incorrectly""" with tempfile.NamedTemporaryFile("w+", prefix="testfile_", delete=True, dir=".", suffix=".img") as fobj: fobj.write("0") fobj.flush() mockobj = mock.patch.object(BmapHelpers, "ZFS_COMPAT_PARAM_PATH", fobj.name) with mockobj: self.assertFalse(BmapHelpers.is_compatible_file_system(fobj.name))
def test_get_file_system_type_symlink(self): """Check a file system type is returned when used with a symlink""" with btempfile.TemporaryDirectory(prefix="testdir_", dir=".") as directory: fobj = tempfile.NamedTemporaryFile("r", prefix="testfile_", delete=False, dir=directory, suffix=".img") lnk = os.path.join(directory, "test_symlink") os.symlink(fobj.name, lnk) fstype = BmapHelpers.get_file_system_type(lnk) self.assertTrue(fstype)
def test_is_zfs_configuration_compatible_disabled(self): """Check compatiblilty check is false when zfs param is set incorrectly""" with tempfile.NamedTemporaryFile("w+", prefix="testfile_", delete=True, dir=".", suffix=".txt") as fobj: fobj.write("0") fobj.flush() mockobj = mock.patch.object(BmapHelpers, "ZFS_COMPAT_PARAM_PATH", fobj.name) with mockobj: self.assertFalse(BmapHelpers.is_zfs_configuration_compatible())
def _create_random_sparse_file(file_obj, size): """ Create a sparse file with randomly distributed holes. The mapped areas are filled with semi-random data. Returns a tuple containing 2 lists: 1. a list of mapped block ranges, same as 'Filemap.get_mapped_ranges()' 2. a list of unmapped block ranges (holes), same as 'Filemap.get_unmapped_ranges()' """ file_obj.truncate(0) block_size = BmapHelpers.get_block_size(file_obj) blocks_cnt = (size + block_size - 1) / block_size def process_block(block): """ This is a helper function which processes a block. It randomly decides whether the block should be filled with random data or should become a hole. Returns 'True' if the block was mapped and 'False' otherwise. """ map_the_block = random.getrandbits(1) if map_the_block: # Randomly select how much we are going to write seek = random.randint(0, block_size - 1) write = random.randint(1, block_size - seek) assert seek + write <= block_size file_obj.seek(block * block_size + seek) file_obj.write(chr(random.getrandbits(8)) * write) else: file_obj.truncate(block * block_size) return map_the_block mapped = [] unmapped = [] iterator = xrange(0, blocks_cnt) for was_mapped, group in itertools.groupby(iterator, process_block): # Start of a mapped region or a hole. Find the last element in the # group. first = group.next() last = first for last in group: pass if was_mapped: mapped.append((first, last)) else: unmapped.append((first, last)) file_obj.truncate(size) file_obj.flush() return (mapped, unmapped)
def _create_random_sparse_file(file_obj, size): """ Create a sparse file with randomly distributed holes. The mapped areas are filled with semi-random data. Returns a tuple containing 2 lists: 1. a list of mapped block ranges, same as 'Filemap.get_mapped_ranges()' 2. a list of unmapped block ranges (holes), same as 'Filemap.get_unmapped_ranges()' """ file_obj.truncate(size) block_size = BmapHelpers.get_block_size(file_obj) blocks_cnt = (size + block_size - 1) / block_size def process_block(block): """ This is a helper function which processes a block. It randomly decides whether the block should be filled with random data or should become a hole. Returns 'True' if the block was mapped and 'False' otherwise. """ map_the_block = random.getrandbits(1) if map_the_block: # Randomly select how much we are going to write seek = random.randint(0, block_size - 1) write = random.randint(1, block_size - seek) assert seek + write <= block_size file_obj.seek(block * block_size + seek) file_obj.write(chr(random.getrandbits(8)) * write) return map_the_block mapped = [] unmapped = [] iterator = xrange(0, blocks_cnt) for was_mapped, group in itertools.groupby(iterator, process_block): # Start of a mapped region or a hole. Find the last element in the # group. first = group.next() last = first for last in group: pass if was_mapped: mapped.append((first, last)) else: unmapped.append((first, last)) file_obj.truncate(size) file_obj.flush() return (mapped, unmapped)
def __init__(self, image, log=None): """ Initialize a class instance. The 'image' argument is full path to the file or file object to operate on. """ self._log = log if self._log is None: self._log = logging.getLogger(__name__) self._f_image_needs_close = False if hasattr(image, "fileno"): self._f_image = image self._image_path = image.name else: self._image_path = image self._open_image_file() try: self.image_size = os.fstat(self._f_image.fileno()).st_size except IOError as err: raise Error("cannot get information about file '%s': %s" % (self._f_image.name, err)) try: self.block_size = BmapHelpers.get_block_size(self._f_image) except IOError as err: raise Error("cannot get block size for '%s': %s" % (self._image_path, err)) self.blocks_cnt = self.image_size + self.block_size - 1 self.blocks_cnt /= self.block_size try: self._f_image.flush() except IOError as err: raise Error("cannot flush image file '%s': %s" % (self._image_path, err)) try: os.fsync(self._f_image.fileno()), except OSError as err: raise Error("cannot synchronize image file '%s': %s " % (self._image_path, err.strerror)) self._log.debug("opened image \"%s\"" % self._image_path) self._log.debug("block size %d, blocks count %d, image size %d" % (self.block_size, self.blocks_cnt, self.image_size))
def __init__(self, image): """ Initialize a class instance. The 'image' argument is full path to the file or file object to operate on. """ self._f_image_needs_close = False if hasattr(image, "fileno"): self._f_image = image self._image_path = image.name else: self._image_path = image self._open_image_file() try: self.image_size = os.fstat(self._f_image.fileno()).st_size except IOError as err: raise Error("cannot get information about file '%s': %s" % (self._f_image.name, err)) try: self.block_size = BmapHelpers.get_block_size(self._f_image) except IOError as err: raise Error("cannot get block size for '%s': %s" % (self._image_path, err)) self.blocks_cnt = self.image_size + self.block_size - 1 self.blocks_cnt /= self.block_size try: self._f_image.flush() except IOError as err: raise Error("cannot flush image file '%s': %s" % (self._image_path, err)) try: os.fsync(self._f_image.fileno()), except OSError as err: raise Error("cannot synchronize image file '%s': %s " % (self._image_path, err.strerror)) _log.debug("opened image \"%s\"" % self._image_path) _log.debug("block size %d, blocks count %d, image size %d" % (self.block_size, self.blocks_cnt, self.image_size))
def generate_test_files(max_size=4*1024*1024, directory=None, delete=True): """ This is a generator which yields files which other tests use as the input for the testing. The generator tries to yield "interesting" files which cover various corner-cases. For example, a large hole file, a file with no holes, files of unaligned length, etc. The 'directory' argument specifies the directory path where the yielded test files should be created. The 'delete' argument specifies whether the yielded test files have to be automatically deleted. The generator yields tuples consisting of the following elements: 1. the test file object 2. file size in bytes 3. a list of mapped block ranges, same as 'Filemap.get_mapped_ranges()' 4. a list of unmapped block ranges (holes), same as 'Filemap.get_unmapped_ranges()' """ # # Generate sparse files with one single hole spanning the entire file # # A block-sized hole file_obj = tempfile.NamedTemporaryFile("wb+", prefix="4Khole_", delete=delete, dir=directory, suffix=".img") block_size = BmapHelpers.get_block_size(file_obj) file_obj.truncate(block_size) yield (file_obj, block_size, [], [(0, 0)]) file_obj.close() # A block size + 1 byte hole file_obj = tempfile.NamedTemporaryFile("wb+", prefix="4Khole_plus_1_", delete=delete, dir=directory, suffix=".img") file_obj.truncate(block_size + 1) yield (file_obj, block_size + 1, [], [(0, 1)]) file_obj.close() # A block size - 1 byte hole file_obj = tempfile.NamedTemporaryFile("wb+", prefix="4Khole_minus_1_", delete=delete, dir=directory, suffix=".img") file_obj.truncate(block_size - 1) yield (file_obj, block_size - 1, [], [(0, 0)]) file_obj.close() # A 1-byte hole file_obj = tempfile.NamedTemporaryFile("wb+", prefix="1byte_hole_", delete=delete, dir=directory, suffix=".img") file_obj.truncate(1) yield (file_obj, 1, [], [(0, 0)]) file_obj.close() # And 10 holes of random size for i in xrange(10): size = random.randint(1, max_size) file_obj = tempfile.NamedTemporaryFile("wb+", suffix=".img", delete=delete, dir=directory, prefix="rand_hole_%d_"%i) file_obj.truncate(size) blocks_cnt = (size + block_size - 1) / block_size yield (file_obj, size, [], [(0, blocks_cnt - 1)]) file_obj.close() # # Generate a random sparse files # # The maximum size file_obj = tempfile.NamedTemporaryFile("wb+", prefix="sparse_", delete=delete, dir=directory, suffix=".img") mapped, unmapped = _create_random_sparse_file(file_obj, max_size) yield (file_obj, max_size, mapped, unmapped) file_obj.close() # The maximum size + 1 byte file_obj = tempfile.NamedTemporaryFile("wb+", prefix="sparse_plus_1_", delete=delete, dir=directory, suffix=".img") mapped, unmapped = _create_random_sparse_file(file_obj, max_size + 1) yield (file_obj, max_size + 1, mapped, unmapped) file_obj.close() # The maximum size - 1 byte file_obj = tempfile.NamedTemporaryFile("wb+", prefix="sparse_minus_1_", delete=delete, dir=directory, suffix=".img") mapped, unmapped = _create_random_sparse_file(file_obj, max_size - 1) yield (file_obj, max_size - 1, mapped, unmapped) file_obj.close() # And 10 files of random size for i in xrange(10): size = random.randint(1, max_size) file_obj = tempfile.NamedTemporaryFile("wb+", suffix=".img", delete=delete, dir=directory, prefix="sparse_%d_"%i) mapped, unmapped = _create_random_sparse_file(file_obj, size) yield (file_obj, size, mapped, unmapped) file_obj.close() # # Generate random fully-mapped files # # A block-sized file file_obj = tempfile.NamedTemporaryFile("wb+", prefix="4Kmapped_", delete=delete, dir=directory, suffix=".img") _create_random_file(file_obj, block_size) yield (file_obj, block_size, [(0, 0)], []) file_obj.close() # A block size + 1 byte file file_obj = tempfile.NamedTemporaryFile("wb+", prefix="4Kmapped_plus_1_", delete=delete, dir=directory, suffix=".img") _create_random_file(file_obj, block_size + 1) yield (file_obj, block_size + 1, [(0, 1)], []) file_obj.close() # A block size - 1 byte file file_obj = tempfile.NamedTemporaryFile("wb+", prefix="4Kmapped_minus_1_", delete=delete, dir=directory, suffix=".img") _create_random_file(file_obj, block_size - 1) yield (file_obj, block_size - 1, [(0, 0)], []) file_obj.close() # A 1-byte file file_obj = tempfile.NamedTemporaryFile("wb+", prefix="1byte_mapped_", delete=delete, dir=directory, suffix=".img") _create_random_file(file_obj, 1) yield (file_obj, 1, [(0, 0)], []) file_obj.close() # And 10 mapped files of random size for i in xrange(10): size = random.randint(1, max_size) file_obj = tempfile.NamedTemporaryFile("wb+", suffix=".img", delete=delete, dir=directory, prefix="rand_mapped_%d_" % i) _create_random_file(file_obj, size) blocks_cnt = (size + block_size - 1) / block_size yield (file_obj, size, [(0, blocks_cnt - 1)], []) file_obj.close()
def _open_url_ssh(self, parsed_url): """ This function opens a file on a remote host using SSH. The URL has to have this format: "ssh://username@hostname:path". Currently we only support password-based authentication. """ username = parsed_url.username password = parsed_url.password path = parsed_url.path hostname = parsed_url.hostname if username: hostname = username + "@" + hostname # Make sure the ssh client program is installed if not BmapHelpers.program_is_available("ssh"): raise Error("the \"ssh\" program is not available but it is " "required for downloading over the ssh protocol") # Prepare the commands that we are going to run if password: # In case of password we have to use the sshpass tool to pass the # password to the ssh client utility popen_args = ["sshpass", "-p" + password, "ssh", "-o StrictHostKeyChecking=no", "-o PubkeyAuthentication=no", "-o PasswordAuthentication=yes", hostname] # Make sure the sshpass program is installed if not BmapHelpers.program_is_available("ssh"): raise Error("the \"sshpass\" program is not available but it " "is required for password-based SSH authentication") else: popen_args = ["ssh", "-o StrictHostKeyChecking=no", "-o PubkeyAuthentication=yes", "-o PasswordAuthentication=no", "-o BatchMode=yes", hostname] # Test if we can successfully connect child_process = subprocess.Popen(popen_args + ["true"]) child_process.wait() retcode = child_process.returncode if retcode != 0: decoded = _decode_sshpass_exit_code(retcode) raise Error("cannot connect to \"%s\": %s (error code %d)" % (hostname, decoded, retcode)) # Test if file exists by running "test -f path && test -r path" on the # host command = "test -f " + path + " && test -r " + path child_process = subprocess.Popen(popen_args + [command], bufsize=1024*1024, stdout=subprocess.PIPE) child_process.wait() if child_process.returncode != 0: raise Error("\"%s\" on \"%s\" cannot be read: make sure it " "exists, is a regular file, and you have read " "permissions" % (path, hostname)) # Read the entire file using 'cat' child_process = subprocess.Popen(popen_args + ["cat " + path], stdout=subprocess.PIPE) # Now the contents of the file should be available from sub-processes # stdout self._f_objs.append(child_process.stdout) self._child_processes.append(child_process) self.is_url = True self._force_fake_seek = True
def _open_compressed_file(self): """ Detect file compression type and open it with the corresponding compression module, or just plain 'open() if the file is not compressed. """ def is_gzip(name): """Returns 'True' if file 'name' is compressed with 'gzip'.""" if name.endswith('.gzip') or \ (name.endswith('.gz') and not name.endswith('.tar.gz')): return True return False def is_bzip2(name): """Returns 'True' if file 'name' is compressed with 'bzip2'.""" if name.endswith('.bz2') and not name.endswith('.tar.bz2'): return True return False def is_xz(name): """Returns 'True' if file 'name' is compressed with 'xz'.""" if name.endswith('.xz') and not name.endswith('.tar.xz'): return True return False def is_lzop(name): """Returns 'True' if file 'name' is compressed with 'lzop'.""" if name.endswith('.lzo') and not name.endswith('.tar.lzo'): return True return False def is_tar_gz(name): """ Returns 'True' if file 'name' is a tar archive compressed with 'gzip'. """ if name.endswith('.tar.gz') or name.endswith('.tgz'): return True return False def is_tar_bz2(name): """ Returns 'True' if file 'name' is a tar archive compressed with 'bzip2'. """ if name.endswith('.tar.bz2') or name.endswith('.tbz') or \ name.endswith('.tbz2') or name.endswith('.tb2'): return True return False def is_tar_xz(name): """ Returns 'True' if file 'name' is a tar archive compressed with 'xz'. """ if name.endswith('.tar.xz') or name.endswith('.txz'): return True return False def is_tar_lzo(name): """ Returns 'True' if file 'name' is a tar archive compressed with 'lzop'. """ if name.endswith('.tar.lzo') or name.endswith('.tzo'): return True return False archiver = None if is_tar_gz(self.name) or is_gzip(self.name): self.compression_type = 'gzip' if BmapHelpers.program_is_available("pigz"): decompressor = "pigz" else: decompressor = "gzip" if is_gzip(self.name): args = "-d -c" else: archiver = "tar" args = "-x -z -O" elif is_tar_bz2(self.name) or is_bzip2(self.name): self.compression_type = 'bzip2' if BmapHelpers.program_is_available("pbzip2"): decompressor = "pbzip2" else: decompressor = "bzip2" if is_bzip2(self.name): args = "-d -c" else: archiver = "tar" args = "-x -j -O" elif is_tar_xz(self.name) or is_xz(self.name): self.compression_type = 'xz' decompressor = "xz" if is_xz(self.name): args = "-d -c" else: archiver = "tar" args = "-x -J -O" elif is_tar_lzo(self.name) or is_lzop(self.name): self.compression_type = 'lzo' decompressor = "lzop" if is_lzop(self.name): args = "-d -c" else: archiver = "tar" args = "-x --lzo -O" else: if not self.is_url: self.size = os.fstat(self._f_objs[-1].fileno()).st_size return # Make sure decompressor and the archiver programs are available if not BmapHelpers.program_is_available(decompressor): raise Error("the \"%s\" program is not available but it is " "required decompressing \"%s\"" % (decompressor, self.name)) if archiver and not BmapHelpers.program_is_available(archiver): raise Error("the \"%s\" program is not available but it is " "required reading \"%s\"" % (archiver, self.name)) # Start the decompressor process. We'll send the data to its stdin and # read the decompressed data from its stdout. if archiver: args = archiver + " " + args else: args = decompressor + " " + args child_process = subprocess.Popen(args, shell=True, bufsize=1024*1024, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) args = (self._f_objs[-1], child_process.stdin, ) self._rthread = threading.Thread(target=self._read_thread, args=args) self._rthread.daemon = True self._rthread.start() self._force_fake_seek = True self._f_objs.append(child_process.stdout) self._child_processes.append(child_process)
def copy_command(args): """Copy an image to a block device or a regular file using bmap.""" if args.nobmap and args.bmap: error_out("--nobmap and --bmap cannot be used together") if args.bmap_sig and args.no_sig_verify: error_out("--bmap-sig and --no-sig-verify cannot be used together") image_obj, dest_obj, bmap_obj, bmap_path, image_size, dest_is_blkdev = \ open_files(args) if args.bmap_sig and not bmap_obj: error_out("the bmap signature file was specified, but bmap file was " "not found") f_obj = verify_bmap_signature(args, bmap_obj, bmap_path) if f_obj: bmap_obj.close() bmap_obj = f_obj if bmap_obj: bmap_obj = NamedFile(bmap_obj, bmap_path) try: if dest_is_blkdev: dest_str = "block device '%s'" % args.dest # For block devices, use the specialized class writer = BmapCopy.BmapBdevCopy(image_obj, dest_obj, bmap_obj, image_size) else: dest_str = "file '%s'" % os.path.basename(args.dest) writer = BmapCopy.BmapCopy(image_obj, dest_obj, bmap_obj, image_size) except BmapCopy.Error as err: error_out(err) # Print the progress indicator while copying if not args.quiet and not args.debug and \ sys.stderr.isatty() and sys.stdout.isatty(): writer.set_progress_indicator(sys.stderr, "bmaptool: info: %d%% copied") start_time = time.time() if not bmap_obj: if args.nobmap: log.info("no bmap given, copy entire image to '%s'" % args.dest) else: error_out("bmap file not found, please, use --nobmap option to " "flash without bmap") else: log.info("block map format version %s" % writer.bmap_version) log.info("%d blocks of size %d (%s), mapped %d blocks (%s or %.1f%%)" % (writer.blocks_cnt, writer.block_size, writer.image_size_human, writer.mapped_cnt, writer.mapped_size_human, writer.mapped_percent)) log.info("copying image '%s' to %s using bmap file '%s'" % (os.path.basename(args.image), dest_str, os.path.basename(bmap_path))) try: try: writer.copy(False, not args.no_verify) except (BmapCopy.Error, TransRead.Error) as err: error_out(err) # Synchronize the block device log.info("synchronizing '%s'" % args.dest) try: writer.sync() except BmapCopy.Error as err: error_out(err) except KeyboardInterrupt: error_out("interrupted, exiting") copying_time = time.time() - start_time copying_speed = writer.mapped_size // copying_time log.info("copying time: %s, copying speed %s/sec" % (BmapHelpers.human_time(copying_time), BmapHelpers.human_size(copying_speed))) dest_obj.close() if bmap_obj: bmap_obj.close() image_obj.close()
def copy_command(args): """Copy an image to a block device or a regular file using bmap.""" if args.nobmap and args.bmap: log.error("--nobmap and --bmap cannot be used together") raise SystemExit(1) if args.bmap_sig and args.no_sig_verify: log.error("--bmap-sig and --no-sig-verify cannot be used together") raise SystemExit(1) image_obj, dest_obj, bmap_obj, bmap_path, image_size, dest_is_blkdev = \ open_files(args) if args.bmap_sig and not bmap_obj: log.error("the bmap signature file was specified, but bmap file " "was not found") raise SystemExit(1) f_obj = verify_bmap_signature(args, bmap_obj, bmap_path) if f_obj: bmap_obj.close() bmap_obj = f_obj if bmap_obj: bmap_obj = NamedFile(bmap_obj, bmap_path) try: if dest_is_blkdev: dest_str = "block device '%s'" % args.dest # For block devices, use the specialized class writer = BmapCopy.BmapBdevCopy(image_obj, dest_obj, bmap_obj, image_size) else: dest_str = "file '%s'" % os.path.basename(args.dest) writer = BmapCopy.BmapCopy(image_obj, dest_obj, bmap_obj, image_size) except BmapCopy.Error as err: log.error(str(err)) raise SystemExit(1) # Print the progress indicator while copying if not args.quiet and not args.debug and \ os.isatty(sys.stderr.fileno()) and os.isatty(sys.stdout.fileno()): writer.set_progress_indicator(sys.stderr, "bmaptool: info: %d%% copied") start_time = time.time() if not bmap_obj: if args.nobmap: log.info("no bmap given, copy entire image to '%s'" % args.dest) else: log.error("bmap file not found, please, use --nobmap option to " "flash without bmap") raise SystemExit(1) else: log.info("block map format version %s" % writer.bmap_version) log.info("%d blocks of size %d (%s), mapped %d blocks (%s or %.1f%%)" % (writer.blocks_cnt, writer.block_size, writer.image_size_human, writer.mapped_cnt, writer.mapped_size_human, writer.mapped_percent)) log.info("copying image '%s' to %s using bmap file '%s'" % (os.path.basename(args.image), dest_str, os.path.basename(bmap_path))) try: try: writer.copy(False, not args.no_verify) except (BmapCopy.Error, TransRead.Error) as err: log.error(str(err)) raise SystemExit(1) # Synchronize the block device log.info("synchronizing '%s'" % args.dest) try: writer.sync() except BmapCopy.Error as err: log.error(str(err)) raise SystemExit(1) except KeyboardInterrupt: log.error("the program is interrupted, exiting") raise SystemExit(1) copying_time = time.time() - start_time copying_speed = writer.mapped_size / copying_time log.info("copying time: %s, copying speed %s/sec" % (BmapHelpers.human_time(copying_time), BmapHelpers.human_size(copying_speed))) dest_obj.close() if bmap_obj: bmap_obj.close() image_obj.close()
def generate_test_files(max_size=4 * 1024 * 1024, directory=None, delete=True): """ This is a generator which yields files which other tests use as the input for the testing. The generator tries to yield "interesting" files which cover various corner-cases. For example, a large hole file, a file with no holes, files of unaligned length, etc. The 'directory' argument specifies the directory path where the yielded test files should be created. The 'delete' argument specifies whether the yielded test files have to be automatically deleted. The generator yields tuples consisting of the following elements: 1. the test file object 2. file size in bytes 3. a list of mapped block ranges, same as 'Filemap.get_mapped_ranges()' 4. a list of unmapped block ranges (holes), same as 'Filemap.get_unmapped_ranges()' """ # # Generate sparse files with one single hole spanning the entire file # # A block-sized hole file_obj = tempfile.NamedTemporaryFile("wb+", prefix="4Khole_", delete=delete, dir=directory, suffix=".img") block_size = BmapHelpers.get_block_size(file_obj) file_obj.truncate(block_size) yield (file_obj, block_size, [], [(0, 0)]) file_obj.close() # A block size + 1 byte hole file_obj = tempfile.NamedTemporaryFile("wb+", prefix="4Khole_plus_1_", delete=delete, dir=directory, suffix=".img") file_obj.truncate(block_size + 1) yield (file_obj, block_size + 1, [], [(0, 1)]) file_obj.close() # A block size - 1 byte hole file_obj = tempfile.NamedTemporaryFile("wb+", prefix="4Khole_minus_1_", delete=delete, dir=directory, suffix=".img") file_obj.truncate(block_size - 1) yield (file_obj, block_size - 1, [], [(0, 0)]) file_obj.close() # A 1-byte hole file_obj = tempfile.NamedTemporaryFile("wb+", prefix="1byte_hole_", delete=delete, dir=directory, suffix=".img") file_obj.truncate(1) yield (file_obj, 1, [], [(0, 0)]) file_obj.close() # And 10 holes of random size for i in xrange(10): size = random.randint(1, max_size) file_obj = tempfile.NamedTemporaryFile("wb+", suffix=".img", delete=delete, dir=directory, prefix="rand_hole_%d_" % i) file_obj.truncate(size) blocks_cnt = (size + block_size - 1) / block_size yield (file_obj, size, [], [(0, blocks_cnt - 1)]) file_obj.close() # # Generate a random sparse files # # The maximum size file_obj = tempfile.NamedTemporaryFile("wb+", prefix="sparse_", delete=delete, dir=directory, suffix=".img") mapped, unmapped = _create_random_sparse_file(file_obj, max_size) yield (file_obj, max_size, mapped, unmapped) file_obj.close() # The maximum size + 1 byte file_obj = tempfile.NamedTemporaryFile("wb+", prefix="sparse_plus_1_", delete=delete, dir=directory, suffix=".img") mapped, unmapped = _create_random_sparse_file(file_obj, max_size + 1) yield (file_obj, max_size + 1, mapped, unmapped) file_obj.close() # The maximum size - 1 byte file_obj = tempfile.NamedTemporaryFile("wb+", prefix="sparse_minus_1_", delete=delete, dir=directory, suffix=".img") mapped, unmapped = _create_random_sparse_file(file_obj, max_size - 1) yield (file_obj, max_size - 1, mapped, unmapped) file_obj.close() # And 10 files of random size for i in xrange(10): size = random.randint(1, max_size) file_obj = tempfile.NamedTemporaryFile("wb+", suffix=".img", delete=delete, dir=directory, prefix="sparse_%d_" % i) mapped, unmapped = _create_random_sparse_file(file_obj, size) yield (file_obj, size, mapped, unmapped) file_obj.close() # # Generate random fully-mapped files # # A block-sized file file_obj = tempfile.NamedTemporaryFile("wb+", prefix="4Kmapped_", delete=delete, dir=directory, suffix=".img") _create_random_file(file_obj, block_size) yield (file_obj, block_size, [(0, 0)], []) file_obj.close() # A block size + 1 byte file file_obj = tempfile.NamedTemporaryFile("wb+", prefix="4Kmapped_plus_1_", delete=delete, dir=directory, suffix=".img") _create_random_file(file_obj, block_size + 1) yield (file_obj, block_size + 1, [(0, 1)], []) file_obj.close() # A block size - 1 byte file file_obj = tempfile.NamedTemporaryFile("wb+", prefix="4Kmapped_minus_1_", delete=delete, dir=directory, suffix=".img") _create_random_file(file_obj, block_size - 1) yield (file_obj, block_size - 1, [(0, 0)], []) file_obj.close() # A 1-byte file file_obj = tempfile.NamedTemporaryFile("wb+", prefix="1byte_mapped_", delete=delete, dir=directory, suffix=".img") _create_random_file(file_obj, 1) yield (file_obj, 1, [(0, 0)], []) file_obj.close() # And 10 mapped files of random size for i in xrange(10): size = random.randint(1, max_size) file_obj = tempfile.NamedTemporaryFile("wb+", suffix=".img", delete=delete, dir=directory, prefix="rand_mapped_%d_" % i) _create_random_file(file_obj, size) blocks_cnt = (size + block_size - 1) / block_size yield (file_obj, size, [(0, blocks_cnt - 1)], []) file_obj.close()
def _open_compressed_file(self): """ Detect file compression type and open it with the corresponding compression module, or just plain 'open() if the file is not compressed. """ def is_gzip(name): """Returns 'True' if file 'name' is compressed with 'gzip'.""" if name.endswith('.gzip') or \ (name.endswith('.gz') and not name.endswith('.tar.gz')): return True return False def is_bzip2(name): """Returns 'True' if file 'name' is compressed with 'bzip2'.""" if name.endswith('.bz2') and not name.endswith('.tar.bz2'): return True return False def is_xz(name): """Returns 'True' if file 'name' is compressed with 'xz'.""" if name.endswith('.xz') and not name.endswith('.tar.xz'): return True return False def is_lzop(name): """Returns 'True' if file 'name' is compressed with 'lzop'.""" if name.endswith('.lzo') and not name.endswith('.tar.lzo'): return True return False def is_lz4(name): """Returns 'True' if file 'name' is compressed with 'lz4'.""" if name.endswith('.lz4') and not name.endswith('.tar.lz4'): return True return False def is_zst(name): """Returns 'True' if file 'name' is compressed with 'zstd'.""" if name.endswith('.zst') and not name.endswith('.tar.zst'): return True return False def is_tar_gz(name): """ Returns 'True' if file 'name' is a tar archive compressed with 'gzip'. """ if name.endswith('.tar.gz') or name.endswith('.tgz'): return True return False def is_tar_bz2(name): """ Returns 'True' if file 'name' is a tar archive compressed with 'bzip2'. """ if name.endswith('.tar.bz2') or name.endswith('.tbz') or \ name.endswith('.tbz2') or name.endswith('.tb2'): return True return False def is_tar_xz(name): """ Returns 'True' if file 'name' is a tar archive compressed with 'xz'. """ if name.endswith('.tar.xz') or name.endswith('.txz'): return True return False def is_tar_lzo(name): """ Returns 'True' if file 'name' is a tar archive compressed with 'lzop'. """ if name.endswith('.tar.lzo') or name.endswith('.tzo'): return True return False def is_tar_lz4(name): """ Returns 'True' if file 'name' is a tar archive compressed with 'lz4'. """ if name.endswith('.tar.lz4') or name.endswith('.tlz4'): return True return False def is_tar_zst(name): """ Returns 'True' if file 'name' is a tar archive compressed with 'zstd'. """ if name.endswith('.tar.zst') or name.endswith('.tzst'): return True return False archiver = None if is_tar_gz(self.name) or is_gzip(self.name): self.compression_type = 'gzip' if BmapHelpers.program_is_available("pigz"): decompressor = "pigz" else: decompressor = "gzip" if is_gzip(self.name): args = "-d -c" else: archiver = "tar" args = "-x -z -O" elif is_tar_bz2(self.name) or is_bzip2(self.name): self.compression_type = 'bzip2' if BmapHelpers.program_is_available("pbzip2"): decompressor = "pbzip2" else: decompressor = "bzip2" if is_bzip2(self.name): args = "-d -c" else: archiver = "tar" args = "-x -j -O" elif is_tar_xz(self.name) or is_xz(self.name): self.compression_type = 'xz' decompressor = "xz" if is_xz(self.name): args = "-d -c" else: archiver = "tar" args = "-x -J -O" elif is_tar_lzo(self.name) or is_lzop(self.name): self.compression_type = 'lzo' decompressor = "lzop" if is_lzop(self.name): args = "-d -c" else: archiver = "tar" args = "-x --lzo -O" elif self.name.endswith(".zip"): self.compression_type = 'zip' decompressor = "funzip" args = "" elif is_tar_lz4(self.name) or is_lz4(self.name): self.compression_type = 'lz4' decompressor = "lz4" if is_lz4(self.name): args = "-d -c" else: archiver = "tar" args = "-x -Ilz4 -O" elif is_tar_zst(self.name) or is_zst(self.name): self.compression_type = 'zst' decompressor = "zstd" if is_zst(self.name): args = "-d" else: archiver = "tar" args = "-x -Izstd -O" else: if not self.is_url: self.size = os.fstat(self._f_objs[-1].fileno()).st_size return if archiver == "tar": # This will get rid of messages like: # tar: Removing leading `/' from member names'. args += " -P -C /" # Make sure decompressor and the archiver programs are available if not BmapHelpers.program_is_available(decompressor): raise Error("the \"%s\" program is not available but it is " "required decompressing \"%s\"" % (decompressor, self.name)) if archiver and not BmapHelpers.program_is_available(archiver): raise Error("the \"%s\" program is not available but it is " "required reading \"%s\"" % (archiver, self.name)) # Start the decompressor process. We'll send the data to its stdin and # read the decompressed data from its stdout. if archiver: args = archiver + " " + args else: args = decompressor + " " + args if self.is_url: child_stdin = subprocess.PIPE else: child_stdin = self._f_objs[-1].fileno() child_process = subprocess.Popen(args, shell=True, bufsize=1024 * 1024, stdin=child_stdin, stdout=subprocess.PIPE) if child_stdin == subprocess.PIPE: # A separate reader thread is created only when we are reading via # urllib2. args = ( self._f_objs[-1], child_process.stdin, ) self._rthread = threading.Thread(target=self._read_thread, args=args) self._rthread.daemon = True self._rthread.start() self._fake_seek = True self._f_objs.append(child_process.stdout) self._child_processes.append(child_process)