class DirectoriesFilter(unittest.TestCase): def setUp(self): self.fs = MemoryFS() for i in range(5): self.fs.makedir('sub.{0}'.format(i)) with self.fs.opendir('sub.{0}'.format(i)) as d: d.makedir('sub.0') for i in range(5): self.fs.makedir('child.{0}'.format(i)) def tearDown(self): self.fs.close() def test_basic(self): ff = batch.DirectoriesFilter(include_filters=['sub*']) results = set(ff.lst(self.fs)) expected = {'sub.{}'.format(i) for i in range(5)} self.assertEqual(results, expected) def test_depth_1(self): ff = batch.DirectoriesFilter(include_filters=['sub*'], depth=1) results = set(ff.lst(self.fs)) expected = {'sub.{}'.format(i) for i in range(5) }.union({'sub.{}/sub.0'.format(i) for i in range(5)}) self.assertEqual(results, expected)
def test_makedir(self): mfs = MemoryFS() mfs.makedir('test') d = Directory('test', mfs) result = d.makedir('sub') self.assertTrue(mfs.exists('test/sub')) self.assertIsInstance(result, Directory) self.assertEqual(result.path.s, 'test/sub')
def test__when_copying_directory__but_directory_exists__should_copy_into_existing_directory( ): origin_fs = MemoryFS() sub_fs = origin_fs.makedir("sourcedir") origin_fs.makedir("targetdir") write_file_with_content(sub_fs, SOURCE, "content") sut = _TestFilesystemImpl(origin_fs) sut.copy("sourcedir", "targetdir") complete_path = f"targetdir/{SOURCE}" assert origin_fs.exists(complete_path)
class TestBoundWalkerBase(unittest.TestCase): def setUp(self): """ Sets up the following file system with empty files: / -foo1/ - -top1.txt - -top2.txt -foo2/ - -bar1/ - -bar2/ - - -bar3/ - - - -test.txt - -top3.bin -foo3/ """ self.fs = MemoryFS() self.fs.makedir("foo1") self.fs.makedir("foo2") self.fs.makedir("foo3") self.fs.create("foo1/top1.txt") self.fs.create("foo1/top2.txt") self.fs.makedir("foo1/bar1") self.fs.makedir("foo2/bar2") self.fs.makedir("foo2/bar2/bar3") self.fs.create("foo2/bar2/bar3/test.txt") self.fs.create("foo2/top3.bin")
def test_target(self): from fs.memoryfs import MemoryFS from dxl.fs import Directory from pygate.routine.base import RoutineOnDirectory from pygate.routine.merger import OpMerge import rx mfs = MemoryFS() d = Directory('.', mfs) mfs.makedir('sub1') mfs.makedir('sub2') mfs.touch('test.txt') r = RoutineOnDirectory(d) o = OperationOnFile('test.txt') self.assertEqual(o.target(r).path.s, 'test.txt')
def test_listdir(self): mount_fs = MountFS() self.assertEqual(mount_fs.listdir("/"), []) m1 = MemoryFS() m3 = MemoryFS() m4 = TempFS() mount_fs.mount("/m1", m1) mount_fs.mount("/m2", "temp://") mount_fs.mount("/m3", m3) with self.assertRaises(MountError): mount_fs.mount("/m3/foo", m4) self.assertEqual(sorted(mount_fs.listdir("/")), ["m1", "m2", "m3"]) m3.makedir("foo") self.assertEqual(sorted(mount_fs.listdir("/m3")), ["foo"])
def test_categorize_filesystem(self) -> None: m = MemoryFS() self.assertEqual(EMPTY_FS_TYPE, categorize_filesystem(m)) m = MemoryFS() m.makedir("/hst_12345$") self.assertEqual(SINGLE_VERSIONED_FS_TYPE, categorize_filesystem(m)) m = MemoryFS() m.makedirs("/hst_12345/v$1.0") self.assertEqual(MULTIVERSIONED_FS_TYPE, categorize_filesystem(m)) m = MemoryFS() m.makedir("/hst_12345") self.assertEqual(UNKNOWN_FS_TYPE, categorize_filesystem(m))
def test_listdir(self): mount_fs = MountFS() self.assertEqual(mount_fs.listdir('/'), []) m1 = MemoryFS() m2 = TempFS() m3 = MemoryFS() m4 = TempFS() mount_fs.mount('/m1', m1) mount_fs.mount('/m2', m2) mount_fs.mount('/m3', m3) with self.assertRaises(MountError): mount_fs.mount('/m3/foo', m4) self.assertEqual(sorted(mount_fs.listdir('/')), ['m1', 'm2', 'm3']) m3.makedir('foo') self.assertEqual(sorted(mount_fs.listdir('/m3')), ['foo'])
def test_list_only_matched_dirs(self): mfs = MemoryFS() mfs.touch('test.txt') for i in range(2): mfs.makedir('sub{}'.format(i)) mfs.makedir('foo') d = Directory('.', mfs) result = (d.listdir_as_observable().filter( lambda o: isinstance(o, Directory)).filter( lambda d: d.match(['sub*'])).to_list().to_blocking().first()) self.assertEqual(len(result), 2) for o in result: self.assertIsInstance(o, Directory) paths = [o.path.s for o in result] self.assertIn('sub0', paths) self.assertIn('sub1', paths)
def test_dryrun(self): mfs = MemoryFS() subs = ['sub.{}'.format(i) for i in range(3)] for d in subs: mfs.makedir(d) d = Directory('.', mfs) o0 = ini.OpAddToBroadcastFile('test1.txt') o1 = ini.OpAddToBroadcastFile('test2.txt') obc = ini.OpBroadcastFile(['sub*']) r = RoutineOnDirectory(d, [o0, o1], dryrun=True) r.work() files = ['test{}.txt'.format(i) for i in range(1, 3)] result = obc.dryrun(r) self.assertEqual(sorted(result[ini.KEYS.SUBDIRECTORIES]), sorted(subs)) self.assertEqual(sorted(result[ini.KEYS.TO_BROADCAST_FILES]), sorted(files))
def test_match(self): from fs.memoryfs import MemoryFS from dxl.fs import Directory mfs = MemoryFS() d = Directory('.', mfs) r = RoutineOnDirectory(d) for i in range(2): mfs.makedir('sub{}'.format(i)) mfs.makedir('testdir') mfs.touch('sub.txt') o = OperationOnSubdirectories(['sub*']) sub_dirs = (o.subdirectories(r).to_list().to_blocking().first()) self.assertEqual(len(sub_dirs), 2) paths = [d.path.s for d in sub_dirs] self.assertIn('sub0', paths) self.assertIn('sub1', paths)
def test_listdir(self): mfs = MemoryFS() mfs.touch('test_file.txt') mfs.makedir('test_dir') d = Directory('.', mfs) result = d.listdir() self.assertEqual(len(result), 2) cdir = None cfile = None for o in result: if isinstance(o, Directory): cdir = o else: cfile = o self.assertIsNotNone(cdir) self.assertIsNotNone(cfile) self.assertEqual(cdir.path.n, 'test_dir') self.assertEqual(cfile.path.n, 'test_file.txt')
def test_mountfile(self): """Test mounting a file""" quote = b"""If you wish to make an apple pie from scratch, you must first invent the universe.""" mem_fs = MemoryFS() mem_fs.makedir('foo') mem_fs.setcontents('foo/bar.txt', quote) foo_dir = mem_fs.opendir('foo') mount_fs = MountFS() mount_fs.mountfile('bar.txt', foo_dir.open, foo_dir.getinfo) self.assert_(mount_fs.isdir('/')) self.assert_(mount_fs.isdir('./')) self.assert_(mount_fs.isdir('')) # Check we can see the mounted file in the dir list self.assertEqual(mount_fs.listdir(), ["bar.txt"]) self.assert_(not mount_fs.exists('nobodyhere.txt')) self.assert_(mount_fs.exists('bar.txt')) self.assert_(mount_fs.isfile('bar.txt')) self.assert_(not mount_fs.isdir('bar.txt')) # Check open and getinfo callables self.assertEqual(mount_fs.getcontents('bar.txt'), quote) self.assertEqual(mount_fs.getsize('bar.txt'), len(quote)) # Check changes are written back mem_fs.setcontents('foo/bar.txt', 'baz') self.assertEqual(mount_fs.getcontents('bar.txt'), b'baz') self.assertEqual(mount_fs.getsize('bar.txt'), len('baz')) # Check changes are written to the original fs self.assertEqual(mem_fs.getcontents('foo/bar.txt'), b'baz') self.assertEqual(mem_fs.getsize('foo/bar.txt'), len('baz')) # Check unmount self.assert_(mount_fs.unmount("bar.txt")) self.assertEqual(mount_fs.listdir(), []) self.assert_(not mount_fs.exists('bar.txt')) # Check unount a second time is a null op, and returns False self.assertFalse(mount_fs.unmount("bar.txt"))
def test_apply(self): mfs = MemoryFS() subs = ['sub.{}'.format(i) for i in range(3)] for d in subs: mfs.makedir(d) d = Directory('.', mfs) mfs.touch('test1.txt') mfs.touch('test2.txt') o0 = ini.OpAddToBroadcastFile('test1.txt') o1 = ini.OpAddToBroadcastFile('test2.txt') obc = ini.OpBroadcastFile(['sub*']) r = RoutineOnDirectory(d, [o0, o1, obc]) r.work() target_files = [] for d in subs: for f in ['test1.txt', 'test2.txt']: target_files.append(d + '/' + f) for i, t in enumerate(target_files): with self.subTest(i): self.assertTrue(mfs.exists(t))
class StandardLayoutMixin(object): locales = ["en"] def setUp(self): super(StandardLayoutMixin, self).setUp() self.runner = CliRunner() self.locales_fs = MemoryFS() self.output_fs = MemoryFS() def get_locales_fs(path): return self.locales_fs.opendir(path) def get_output_fs(path): return self.output_fs.opendir(path) self.locales_fs_patcher = mock.patch('elm_fluent.cli.get_locales_fs', new=get_locales_fs) self.locales_fs_patcher.start() self.output_fs_patcher = mock.patch('elm_fluent.cli.get_output_fs', new=get_output_fs) self.output_fs_patcher.start() self.setup_fs() def tearDown(self): self.locales_fs_patcher.stop() super(StandardLayoutMixin, self).tearDown() def setup_fs(self): sub = self.locales_fs.makedir("locales") for l in self.locales: sub.makedir(l) def write_ftl_file(self, path, contents): self.locales_fs.writetext(path, dedent_ftl(contents)) def get_all_files(self, fs): return {p: fs.readtext(p) for p in fs.walk.files()} def assertFileSystemEquals(self, fs, files): all_files = self.get_all_files(fs) self.assertEqual({p: c.rstrip() for p, c in all_files.items()}, {p: c.rstrip() for p, c in files.items()}) def run_main(self, args=None): result = self.runner.invoke(cli.main, args=[] if args is None else args) if result.exception is not None and not isinstance( result.exception, SystemExit): exc_info = result.exc_info raise exc_info[0].with_traceback(exc_info[1], exc_info[2]) return result
def test__a__when_copying_directory__should_copy_entire_directory(): src_dir = "mydir" copy_dir = "copydir" mem_fs = MemoryFS() sub_fs = mem_fs.makedir(src_dir) write_file_with_content(sub_fs, SOURCE) sut = _TestFilesystemImpl(mem_fs) sut.copy(src_dir, copy_dir) assert mem_fs.exists(f"{copy_dir}/{SOURCE}")
class TestFilesFilter(unittest.TestCase): def setUp(self): self.fs = MemoryFS() for i in range(5): self.fs.makedir('sub.{0}'.format(i)) with self.fs.opendir('sub.{0}'.format(i)) as d: d.touch('result.txt') for i in range(5): self.fs.makedir('child.{0}'.format(i)) with self.fs.opendir('child.{0}'.format(i)) as d: d.touch('result.txt') for i in range(5): self.fs.touch('result.{}.txt'.format(i)) self.fs.touch('errors.{}.txt'.format(i)) def tearDown(self): self.fs.close() def test_basic(self): ff = batch.FilesFilter(include_filters=['result*']) results = set(ff.lst(self.fs)) expected = {'result.{}.txt'.format(i) for i in range(5)} self.assertEqual(results, expected) def test_multi_filters(self): ff = batch.FilesFilter(include_filters=['result*', 'errors*']) results = set(ff.lst(self.fs)) expected = {'result.{}.txt'.format(i) for i in range(5) }.union({'errors.{}.txt'.format(i) for i in range(5)}) self.assertEqual(results, expected) def test_with_directory_filter(self): results = set() (batch.DirectoriesFilter(['sub*']).obv( self.fs).flat_map(lambda p: batch.FilesFilter(['result*']).obv( self.fs, p)).subscribe(results.add)) expected = {'sub.{}/result.txt'.format(i) for i in range(5)} self.assertEqual(results, expected)
def test_copy_files(self): mfs = MemoryFS() mfs.touch('txt1.txt') mfs.touch('txt2.txt') mfs.makedir('sub1') mfs.makedir('sub2') new_files = [ 'sub1/txt1.txt', 'sub1/txt2.txt', 'sub2/txt1.txt', 'sub2/txt2.txt' ] for n in new_files: self.assertFalse(mfs.exists(n)) d = Directory('.', mfs) targets = d.listdir_as_observable().filter(match_directory(['sub*'])) sources = d.listdir_as_observable().filter(match_file(['txt*'])) sources.subscribe(lambda f: print(f.path.s)) sources_list = [] sources.subscribe(sources_list.append) results = (targets.flat_map( lambda d: d.sync(sources)).to_list().to_blocking().first()) self.assertEqual(len(results), 4) for n in new_files: self.assertTrue(mfs.exists(n))
def test__when_globbing__it_returns_matching_paths(): mem_fs = MemoryFS() mem_fs.create("hello.txt") mem_fs.create("world.txt") mem_fs.create("nope.gif") subfs = mem_fs.makedir("sub") subfs.create("nomatch.gif") subfs.create("match.txt") sut = _TestFilesystemImpl(mem_fs) actual = sut.glob("**/**.txt") assert actual == ["hello.txt", "world.txt", "sub/match.txt"]
def test__when_copying_directory_to_other_filesystem__should_copy_dir(): source_dir = "mydir" target_dir = "copydir" origin_fs = MemoryFS() sub_fs = origin_fs.makedir(source_dir) write_file_with_content(sub_fs, SOURCE, "content") sut = _TestFilesystemImpl(origin_fs) target_fs = MemoryFS() sut.copy(source_dir, target_dir, filesystem=_TestFilesystemImpl(target_fs)) complete_path = f"{target_dir}/{SOURCE}" assert target_fs.exists(complete_path) assert_file_content_equals(target_fs, complete_path, "content")
def _my_fs(module): """Create a mock filesystem to be used in examples.""" my_fs = MemoryFS() if module == "fs.base": my_fs.makedir("Desktop") my_fs.makedir("Videos") my_fs.touch("Videos/starwars.mov") my_fs.touch("file.txt") elif module == "fs.info": my_fs.touch("foo.tar.gz") my_fs.settext("foo.py", "print('Hello, world!')") my_fs.makedir("bar") elif module in {"fs.walk", "fs.glob"}: my_fs.makedir("dir1") my_fs.makedir("dir2") my_fs.settext("foo.py", "print('Hello, world!')") my_fs.touch("foo.pyc") my_fs.settext("bar.py", "print('ok')\n\n# this is a comment\n") my_fs.touch("bar.pyc") return my_fs
def test_non_file_error(self): mfs = MemoryFS() mfs.makedir('test') f = File('test', mfs) with self.assertRaises(NotAFileError) as target: f.exists()
class BigFS(FS): """A FileSystem that represents a BIG file.""" _meta = { 'virtual' : False, 'read_only' : True, 'unicode_paths' : True, 'case_insensitive_paths' : False, 'network' : False, } def __init__(self, filename, mode="r", thread_synchronize=True): """Create a FS that maps on to a big file. :param filename: A (system) path, or a file-like object :param mode: Mode to open file: 'r' for reading, 'w' and 'a' not supported :param thread_synchronize: -- Set to True (default) to enable thread-safety """ super(BigFS, self).__init__(thread_synchronize=thread_synchronize) if len(mode) > 1 or mode not in "r": raise ValueError("mode must be 'r'") self.file_mode = mode self.big_path = str(filename) self.entries = {} try: self.bf = open(filename, "rb") except IOError: raise ResourceNotFoundError(str(filename), msg="BIG file does not exist: %(path)s") self._path_fs = MemoryFS() if mode in 'ra': self._parse_resource_list(self.bf) def __str__(self): return "<BigFS: %s>" % self.big_path def __unicode__(self): return unicode(self.__str__()) def _parse_resource_list(self, g): magicWord = g.read(4) if magicWord != "BIGF" and magicWord != "BIG4": raise ValueError("Magic word of BIG file invalid: " + filename + " " + repr(magicWord)) header = g.read(12) header = unpack(">III", header) BIGSize = header[0] fileCount = header[1] bodyOffset = header[2] for i in range(fileCount): fileHeader = g.read(8) fileHeader = unpack(">II", fileHeader) pos = g.tell() buf = g.read(4096) marker = buf.find("\0") if marker == -1: raise ValueError("Could not parse filename in BIG file: Too long or invalid file") name = buf[:marker] # TODO: decode the encoding of name (or normalize the path?) isCompressed, uncompressedSize = self.__isCompressed(g, fileHeader[0], fileHeader[1]) be = BIGEntry(name, fileHeader[0], fileHeader[1], isCompressed, uncompressedSize) name = normpath(name) self.entries[name] = be self._add_resource(name) g.seek(pos + marker + 1) def __isCompressed(self, g, offset, size): g.seek(offset) buf = g.read(2) magic = unpack(">H", buf)[0] if (magic & 0x3EFF) == 0x10FB: # it is compressed if magic & 0x8000: # decompressed size is uint32 return True, unpack(">I", g.read(4))[0] else: # use only 3 bytes return True, unpack(">I", "\0" + g.read(3))[0] return False, size def _add_resource(self, path): if path.endswith('/'): path = path[:-1] if path: self._path_fs.makedir(path, recursive=True, allow_recreate=True) else: dirpath, filename = pathsplit(path) if dirpath: self._path_fs.makedir(dirpath, recursive=True, allow_recreate=True) f = self._path_fs.open(path, 'w') f.close() def close(self): """Finalizes the zip file so that it can be read. No further operations will work after this method is called.""" if hasattr(self, 'bf') and self.bf: self.bf.close() self.bf = _ExceptionProxy() @synchronize def open(self, path, mode="r", **kwargs): path = normpath(relpath(path)) if 'r' in mode: if self.file_mode not in 'ra': raise OperationFailedError("open file", path=path, msg="Big file must be opened for reading ('r') or appending ('a')") try: return self.entries[path].getfile(self.bf) except KeyError: raise ResourceNotFoundError(path) if 'w' in mode: raise OperationFailedError("open file", path=path, msg="Big file cannot be edited ATM") raise ValueError("Mode must contain be 'r' or 'w'") @synchronize def getcontents(self, path): if not self.exists(path): raise ResourceNotFoundError(path) path = normpath(path) try: contents = self.entries[path].getcontents(self.bf) except KeyError: raise ResourceNotFoundError(path) except RuntimeError: raise OperationFailedError("read file", path=path, msg="Big file must be oppened with 'r' or 'a' to read") return contents def desc(self, path): if self.isdir(path): return "Dir in big file: %s" % self.big_path else: return "File in big file: %s" % self.big_path def isdir(self, path): return self._path_fs.isdir(path) def isfile(self, path): return self._path_fs.isfile(path) def exists(self, path): return self._path_fs.exists(path) @synchronize def makedir(self, dirname, recursive=False, allow_recreate=False): dirname = normpath(dirname) if self.file_mode not in "wa": raise OperationFailedError("create directory", path=dirname, msg="Big file must be opened for writing ('w') or appending ('a')") if not dirname.endswith('/'): dirname += '/' self._add_resource(dirname) def listdir(self, path="/", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False): return self._path_fs.listdir(path, wildcard, full, absolute, dirs_only, files_only) @synchronize def getinfo(self, path): if not self.exists(path): raise ResourceNotFoundError(path) path = normpath(path).lstrip('/') info = {'size': 0} if path in self.entries: be = self.entries[path] info['size'] = be.realSize info['file_size'] = be.realSize info['stored_size'] = be.storedSize info['is_compressed'] = be.isCompressed info['offset'] = be.offset info['internal_filename'] = be.filename info['filename'] = path return info
import csv import json import tempfile from fs.memoryfs import MemoryFS import numpy as np fsys = MemoryFS() fsys.makedir('raw_data') fsys.makedir('result_data') fsys.makedir('result_data/dataset_200_step/') pure_path = 'result_data/dataset_200_step/' fsys.create(pure_path + 'a.tfrecord') fsys.create(pure_path + 'b.tfrecord') fsys.create(pure_path + 'log.log') fsys.tree() fsys.removetree(pure_path) fsys.tree()
class TestWalk(unittest.TestCase): def setUp(self): self.fs = MemoryFS() self.fs.makedir('foo1') self.fs.makedir('foo2') self.fs.makedir('foo3') self.fs.create('foo1/top1.txt') self.fs.create('foo1/top2.txt') self.fs.makedir('foo1/bar1') self.fs.makedir('foo2/bar2') self.fs.makedir('foo2/bar2/bar3') self.fs.create('foo2/bar2/bar3/test.txt') self.fs.create('foo2/top3.txt') def test_invalid(self): with self.assertRaises(ValueError): self.fs.walk(search='random') def test_repr(self): repr(self.fs.walk) def test_walk(self): walk = [] for path, dirs, files in self.fs.walk(): walk.append( (path, [info.name for info in dirs], [info.name for info in files])) expected = [(u'/', [u'foo1', u'foo2', u'foo3'], []), (u'/foo1', [u'bar1'], [u'top1.txt', u'top2.txt']), (u'/foo2', [u'bar2'], [u'top3.txt']), (u'/foo3', [], []), (u'/foo1/bar1', [], []), (u'/foo2/bar2', [u'bar3'], []), (u'/foo2/bar2/bar3', [], [u'test.txt'])] self.assertEqual(walk, expected) def test_walk_files(self): files = list(self.fs.walk.files()) self.assertEqual(files, [ '/foo1/top1.txt', '/foo1/top2.txt', '/foo2/top3.txt', '/foo2/bar2/bar3/test.txt', ]) files = list(self.fs.walk.files(search="depth")) self.assertEqual(files, [ '/foo1/top1.txt', '/foo1/top2.txt', '/foo2/bar2/bar3/test.txt', '/foo2/top3.txt', ]) def test_walk_dirs(self): dirs = list(self.fs.walk.dirs()) self.assertEqual(dirs, [ '/foo1', '/foo2', '/foo3', '/foo1/bar1', '/foo2/bar2', '/foo2/bar2/bar3' ]) dirs = list(self.fs.walk.dirs(search="depth")) self.assertEqual(dirs, [ '/foo1/bar1', '/foo2/bar2/bar3', '/foo2/bar2', '/foo1', '/foo2', '/foo3' ]) dirs = list(self.fs.walk.dirs(search="depth", exclude_dirs=['foo2'])) self.assertEqual(dirs, ['/foo1/bar1', '/foo1', '/foo3']) def test_walk_info(self): walk = [] for path, info in self.fs.walk.info(): walk.append((path, info.is_dir, info.name)) expected = [(u'/foo1', True, u'foo1'), (u'/foo2', True, u'foo2'), (u'/foo3', True, u'foo3'), (u'/foo1/bar1', True, u'bar1'), (u'/foo1/top1.txt', False, u'top1.txt'), (u'/foo1/top2.txt', False, u'top2.txt'), (u'/foo2/bar2', True, u'bar2'), (u'/foo2/top3.txt', False, u'top3.txt'), (u'/foo2/bar2/bar3', True, u'bar3'), (u'/foo2/bar2/bar3/test.txt', False, u'test.txt')] self.assertEqual(walk, expected) def test_broken(self): original_scandir = self.fs.scandir def broken_scandir(path, namespaces=None): if path == '/foo2': raise FSError("can't read dir") return original_scandir(path, namespaces=namespaces) self.fs.scandir = broken_scandir files = list(self.fs.walk.files(search="depth", ignore_errors=True)) self.assertEqual(files, ['/foo1/top1.txt', '/foo1/top2.txt']) with self.assertRaises(FSError): list(self.fs.walk.files(on_error=lambda path, error: False)) def test_on_error_invalid(self): with self.assertRaises(TypeError): walk.Walker(on_error='nope')
class TestWalk(unittest.TestCase): def setUp(self): self.fs = MemoryFS() self.fs.makedir('foo1') self.fs.makedir('foo2') self.fs.makedir('foo3') self.fs.create('foo1/top1.txt') self.fs.create('foo1/top2.txt') self.fs.makedir('foo1/bar1') self.fs.makedir('foo2/bar2') self.fs.makedir('foo2/bar2/bar3') self.fs.create('foo2/bar2/bar3/test.txt') self.fs.create('foo2/top3.bin') def test_invalid(self): with self.assertRaises(ValueError): self.fs.walk(search='random') def test_repr(self): repr(self.fs.walk) def test_walk(self): _walk = [] for step in self.fs.walk(): self.assertIsInstance(step, walk.Step) path, dirs, files = step _walk.append( (path, [info.name for info in dirs], [info.name for info in files])) expected = [(u'/', [u'foo1', u'foo2', u'foo3'], []), (u'/foo1', [u'bar1'], [u'top1.txt', u'top2.txt']), (u'/foo2', [u'bar2'], [u'top3.bin']), (u'/foo3', [], []), (u'/foo1/bar1', [], []), (u'/foo2/bar2', [u'bar3'], []), (u'/foo2/bar2/bar3', [], [u'test.txt'])] self.assertEqual(_walk, expected) def test_walk_depth(self): _walk = [] for step in self.fs.walk(search='depth'): self.assertIsInstance(step, walk.Step) path, dirs, files = step _walk.append( (path, [info.name for info in dirs], [info.name for info in files])) expected = [(u'/foo1/bar1', [], []), (u'/foo1', [u'bar1'], [u'top1.txt', u'top2.txt']), (u'/foo2/bar2/bar3', [], [u'test.txt']), (u'/foo2/bar2', [u'bar3'], []), (u'/foo2', [u'bar2'], [u'top3.bin']), (u'/foo3', [], []), (u'/', [u'foo1', u'foo2', u'foo3'], [])] self.assertEqual(_walk, expected) def test_walk_directory(self): _walk = [] for step in self.fs.walk('foo2'): self.assertIsInstance(step, walk.Step) path, dirs, files = step _walk.append( (path, [info.name for info in dirs], [info.name for info in files])) expected = [(u'/foo2', [u'bar2'], [u'top3.bin']), (u'/foo2/bar2', [u'bar3'], []), (u'/foo2/bar2/bar3', [], [u'test.txt'])] self.assertEqual(_walk, expected) def test_walk_levels_1(self): results = list(self.fs.walk(max_depth=1)) self.assertEqual(len(results), 1) dirs = sorted(info.name for info in results[0].dirs) self.assertEqual(dirs, ['foo1', 'foo2', 'foo3']) files = sorted(info.name for info in results[0].files) self.assertEqual(files, []) def test_walk_levels_1_depth(self): results = list(self.fs.walk(max_depth=1, search='depth')) self.assertEqual(len(results), 1) dirs = sorted(info.name for info in results[0].dirs) self.assertEqual(dirs, ['foo1', 'foo2', 'foo3']) files = sorted(info.name for info in results[0].files) self.assertEqual(files, []) def test_walk_levels_2(self): _walk = [] for step in self.fs.walk(max_depth=2): self.assertIsInstance(step, walk.Step) path, dirs, files = step _walk.append((path, sorted(info.name for info in dirs), sorted(info.name for info in files))) expected = [(u'/', [u'foo1', u'foo2', u'foo3'], []), (u'/foo1', [u'bar1'], [u'top1.txt', u'top2.txt']), (u'/foo2', [u'bar2'], [u'top3.bin']), (u'/foo3', [], [])] self.assertEqual(_walk, expected) def test_walk_files(self): files = list(self.fs.walk.files()) self.assertEqual(files, [ '/foo1/top1.txt', '/foo1/top2.txt', '/foo2/top3.bin', '/foo2/bar2/bar3/test.txt', ]) files = list(self.fs.walk.files(search="depth")) self.assertEqual(files, [ '/foo1/top1.txt', '/foo1/top2.txt', '/foo2/bar2/bar3/test.txt', '/foo2/top3.bin', ]) def test_walk_dirs(self): dirs = list(self.fs.walk.dirs()) self.assertEqual(dirs, [ '/foo1', '/foo2', '/foo3', '/foo1/bar1', '/foo2/bar2', '/foo2/bar2/bar3' ]) dirs = list(self.fs.walk.dirs(search="depth")) self.assertEqual(dirs, [ '/foo1/bar1', '/foo1', '/foo2/bar2/bar3', '/foo2/bar2', '/foo2', '/foo3' ]) dirs = list(self.fs.walk.dirs(search="depth", exclude_dirs=['foo2'])) self.assertEqual(dirs, ['/foo1/bar1', '/foo1', '/foo3']) def test_walk_files_filter(self): files = list(self.fs.walk.files(filter=['*.txt'])) self.assertEqual(files, [ '/foo1/top1.txt', '/foo1/top2.txt', '/foo2/bar2/bar3/test.txt', ]) files = list(self.fs.walk.files(search="depth", filter=['*.txt'])) self.assertEqual(files, [ '/foo1/top1.txt', '/foo1/top2.txt', '/foo2/bar2/bar3/test.txt', ]) files = list(self.fs.walk.files(filter=['*.bin'])) self.assertEqual(files, ['/foo2/top3.bin']) files = list(self.fs.walk.files(filter=['*.nope'])) self.assertEqual(files, []) def test_walk_info(self): walk = [] for path, info in self.fs.walk.info(): walk.append((path, info.is_dir, info.name)) expected = [(u'/foo1', True, u'foo1'), (u'/foo2', True, u'foo2'), (u'/foo3', True, u'foo3'), (u'/foo1/top1.txt', False, u'top1.txt'), (u'/foo1/top2.txt', False, u'top2.txt'), (u'/foo1/bar1', True, u'bar1'), (u'/foo2/bar2', True, u'bar2'), (u'/foo2/top3.bin', False, u'top3.bin'), (u'/foo2/bar2/bar3', True, u'bar3'), (u'/foo2/bar2/bar3/test.txt', False, u'test.txt')] self.assertEqual(walk, expected) def test_broken(self): original_scandir = self.fs.scandir def broken_scandir(path, namespaces=None): if path == '/foo2': raise FSError("can't read dir") return original_scandir(path, namespaces=namespaces) self.fs.scandir = broken_scandir files = list(self.fs.walk.files(search="depth", ignore_errors=True)) self.assertEqual(files, ['/foo1/top1.txt', '/foo1/top2.txt']) with self.assertRaises(FSError): list(self.fs.walk.files(on_error=lambda path, error: False)) def test_on_error_invalid(self): with self.assertRaises(TypeError): walk.Walker(on_error='nope') def test_subdir_uses_same_walker(self): class CustomWalker(walk.Walker): @classmethod def bind(cls, fs): return walk.BoundWalker(fs, walker_class=CustomWalker) class CustomizedMemoryFS(MemoryFS): walker_class = CustomWalker base_fs = CustomizedMemoryFS() base_fs.settext("a", "a") base_fs.makedirs("b") base_fs.settext("b/c", "c") base_fs.settext("b/d", "d") base_walker = base_fs.walk self.assertEqual(base_walker.walker_class, CustomWalker) six.assertCountEqual(self, ["/a", "/b/c", "/b/d"], base_walker.files()) sub_fs = base_fs.opendir("b") sub_walker = sub_fs.walk self.assertEqual(sub_walker.walker_class, CustomWalker) six.assertCountEqual(self, ["/c", "/d"], sub_walker.files()) def test_readonly_wrapper_uses_same_walker(self): class CustomWalker(walk.Walker): @classmethod def bind(cls, fs): return walk.BoundWalker(fs, walker_class=CustomWalker) class CustomizedMemoryFS(MemoryFS): walker_class = CustomWalker base_fs = CustomizedMemoryFS() base_walker = base_fs.walk self.assertEqual(base_walker.walker_class, CustomWalker) readonly_fs = read_only(CustomizedMemoryFS()) readonly_walker = readonly_fs.walk self.assertEqual(readonly_walker.walker_class, CustomWalker)
def _home_fs(): """Create a mock filesystem that matches the XDG user-dirs spec.""" home_fs = MemoryFS() home_fs.makedir("Desktop") home_fs.makedir("Documents") home_fs.makedir("Downloads") home_fs.makedir("Music") home_fs.makedir("Pictures") home_fs.makedir("Public") home_fs.makedir("Templates") home_fs.makedir("Videos") return home_fs
import tempfile from fs.osfs import OSFS from fs.memoryfs import MemoryFS from shutil import rmtree from six import b path = tempfile.mkdtemp() try: #fs = OSFS(path) fs = MemoryFS() fs.create('test.txt') fs.appendtext('test.txt', 'This is a test file', encoding=u'utf-8', errors=None, newline=u'') fs.makedir("TestDir") fs.create('TestDir/subtest.txt') fs.appendtext('TestDir/subtest.txt', 'This is a test file in a subfolder', encoding=u'utf-8', errors=None, newline=u'') flags = dokanmount.DOKAN_OPTION_DEBUG | dokanmount.DOKAN_OPTION_STDERR | dokanmount.DOKAN_OPTION_REMOVABLE a = dokanmount.mount(fs, "Q:\\", foreground=True, numthreads=2, flags=flags) #fs.close() finally: rmtree(path)
class fs: def __init__(self, username="******"): self.username = username self.__build_default_tree() self.NOTFOUND = "Not found" self.ALREADYEXISTS = "Already Exists" def __create_files_recurse(self, files): for file in files: if file["type"] == "dir": self.mem_fs.makedir(file["name"]) self.__create_files_recurse(file["children"]) else: data = """��*��̵/"��'g�>����O�����U�\�tc����SAƖ��b���#ں^�+� ��,�j��W��=H�a�2�M3��-���2t����5�=��˴�S+���o�G�O�F���B�}���� ����j4�g���W�%���`3h��0����qss�]�l8�o������l^�VP6d����ƽ��qpD o��[n����$V�i�b�{�������XT��dƀݙ���<V6�l���y�N�#)1�+A�xe��#Qd y��K��!^��G��@Z�W2L+��M}��S;��qORMX�`��?L*�(��>?�0K6����6G� GG�J�&BC�4gO���xb�.��#���"y���k�D�H%��88+B�H�� &l�܋��튰����� 301���x��mQ������c U{��x_���՞Jt ���y]G��#TԻ�pd{�+)j,wY��4�zN .�Y`>W�X)��v��q.L�)K�ry#:��`"L�h""" try: data = file["data"] except: pass self.mem_fs.writetext(file["name"], data) def __build_default_tree(self): self.mem_fs = MemoryFS() dir = os.path.dirname(__file__) template_path = os.path.join(dir, "data", "fs_template.json") with open(template_path, "r") as raw_template: fs_template = json.load(raw_template) self.__create_files_recurse(fs_template) self.home = f"/home/{self.username}" self.pwd = self.home self.mem_fs.makedir(self.pwd) print(self.mem_fs.listdir("/")) def ls(self, path): path = self.__format_path(path) res = [] try: res = self.mem_fs.listdir(path) except fserrors.ResourceNotFound: return self.NOTFOUND return " ".join(res) def mkdir(self, path): path = self.__format_path(path) try: res = self.mem_fs.makedir(path) except fserrors.DirectoryExists: return self.ALREADYEXISTS except fserrors.ResourceNotFound: return self.NOTFOUND def cd(self, path): path = self.__format_path(path) try: res = self.mem_fs.getinfo(path) self.pwd = path except fserrors.ResourceNotFound: return self.NOTFOUND def __format_path(self, path): if path == None or len(path) == 0: path = self.pwd elif path[0] != "~": len_rest = self.home elif path[0] != "/": path = f"{self.pwd}/{path}" elif path[0] != ".": len_rest = len(path) - 1 path = f"{self.pwd}/{path[-len_rest:]}" return path def __str__(self): return str(RenderTree(self.root, style=AsciiStyle()))
class TestWalk(unittest.TestCase): def setUp(self): self.fs = MemoryFS() self.fs.makedir("foo1") self.fs.makedir("foo2") self.fs.makedir("foo3") self.fs.create("foo1/top1.txt") self.fs.create("foo1/top2.txt") self.fs.makedir("foo1/bar1") self.fs.makedir("foo2/bar2") self.fs.makedir("foo2/bar2/bar3") self.fs.create("foo2/bar2/bar3/test.txt") self.fs.create("foo2/top3.bin") def test_invalid(self): with self.assertRaises(ValueError): self.fs.walk(search="random") def test_repr(self): repr(self.fs.walk) def test_walk(self): _walk = [] for step in self.fs.walk(): self.assertIsInstance(step, walk.Step) path, dirs, files = step _walk.append( (path, [info.name for info in dirs], [info.name for info in files]) ) expected = [ ("/", ["foo1", "foo2", "foo3"], []), ("/foo1", ["bar1"], ["top1.txt", "top2.txt"]), ("/foo2", ["bar2"], ["top3.bin"]), ("/foo3", [], []), ("/foo1/bar1", [], []), ("/foo2/bar2", ["bar3"], []), ("/foo2/bar2/bar3", [], ["test.txt"]), ] self.assertEqual(_walk, expected) def test_walk_filter_dirs(self): _walk = [] for step in self.fs.walk(filter_dirs=["foo*"]): self.assertIsInstance(step, walk.Step) path, dirs, files = step _walk.append( (path, [info.name for info in dirs], [info.name for info in files]) ) expected = [ ("/", ["foo1", "foo2", "foo3"], []), ("/foo1", [], ["top1.txt", "top2.txt"]), ("/foo2", [], ["top3.bin"]), ("/foo3", [], []), ] self.assertEqual(_walk, expected) def test_walk_depth(self): _walk = [] for step in self.fs.walk(search="depth"): self.assertIsInstance(step, walk.Step) path, dirs, files = step _walk.append( (path, [info.name for info in dirs], [info.name for info in files]) ) expected = [ ("/foo1/bar1", [], []), ("/foo1", ["bar1"], ["top1.txt", "top2.txt"]), ("/foo2/bar2/bar3", [], ["test.txt"]), ("/foo2/bar2", ["bar3"], []), ("/foo2", ["bar2"], ["top3.bin"]), ("/foo3", [], []), ("/", ["foo1", "foo2", "foo3"], []), ] self.assertEqual(_walk, expected) def test_walk_directory(self): _walk = [] for step in self.fs.walk("foo2"): self.assertIsInstance(step, walk.Step) path, dirs, files = step _walk.append( (path, [info.name for info in dirs], [info.name for info in files]) ) expected = [ ("/foo2", ["bar2"], ["top3.bin"]), ("/foo2/bar2", ["bar3"], []), ("/foo2/bar2/bar3", [], ["test.txt"]), ] self.assertEqual(_walk, expected) def test_walk_levels_1(self): results = list(self.fs.walk(max_depth=1)) self.assertEqual(len(results), 1) dirs = sorted(info.name for info in results[0].dirs) self.assertEqual(dirs, ["foo1", "foo2", "foo3"]) files = sorted(info.name for info in results[0].files) self.assertEqual(files, []) def test_walk_levels_1_depth(self): results = list(self.fs.walk(max_depth=1, search="depth")) self.assertEqual(len(results), 1) dirs = sorted(info.name for info in results[0].dirs) self.assertEqual(dirs, ["foo1", "foo2", "foo3"]) files = sorted(info.name for info in results[0].files) self.assertEqual(files, []) def test_walk_levels_2(self): _walk = [] for step in self.fs.walk(max_depth=2): self.assertIsInstance(step, walk.Step) path, dirs, files = step _walk.append( ( path, sorted(info.name for info in dirs), sorted(info.name for info in files), ) ) expected = [ ("/", ["foo1", "foo2", "foo3"], []), ("/foo1", ["bar1"], ["top1.txt", "top2.txt"]), ("/foo2", ["bar2"], ["top3.bin"]), ("/foo3", [], []), ] self.assertEqual(_walk, expected) def test_walk_files(self): files = list(self.fs.walk.files()) self.assertEqual( files, [ "/foo1/top1.txt", "/foo1/top2.txt", "/foo2/top3.bin", "/foo2/bar2/bar3/test.txt", ], ) files = list(self.fs.walk.files(search="depth")) self.assertEqual( files, [ "/foo1/top1.txt", "/foo1/top2.txt", "/foo2/bar2/bar3/test.txt", "/foo2/top3.bin", ], ) def test_walk_dirs(self): dirs = list(self.fs.walk.dirs()) self.assertEqual( dirs, ["/foo1", "/foo2", "/foo3", "/foo1/bar1", "/foo2/bar2", "/foo2/bar2/bar3"], ) dirs = list(self.fs.walk.dirs(search="depth")) self.assertEqual( dirs, ["/foo1/bar1", "/foo1", "/foo2/bar2/bar3", "/foo2/bar2", "/foo2", "/foo3"], ) dirs = list(self.fs.walk.dirs(search="depth", exclude_dirs=["foo2"])) self.assertEqual(dirs, ["/foo1/bar1", "/foo1", "/foo3"]) def test_walk_files_filter(self): files = list(self.fs.walk.files(filter=["*.txt"])) self.assertEqual( files, ["/foo1/top1.txt", "/foo1/top2.txt", "/foo2/bar2/bar3/test.txt"] ) files = list(self.fs.walk.files(search="depth", filter=["*.txt"])) self.assertEqual( files, ["/foo1/top1.txt", "/foo1/top2.txt", "/foo2/bar2/bar3/test.txt"] ) files = list(self.fs.walk.files(filter=["*.bin"])) self.assertEqual(files, ["/foo2/top3.bin"]) files = list(self.fs.walk.files(filter=["*.nope"])) self.assertEqual(files, []) def test_walk_files_exclude(self): # Test exclude argument works files = list(self.fs.walk.files(exclude=["*.txt"])) self.assertEqual(files, ["/foo2/top3.bin"]) # Test exclude doesn't break filter files = list(self.fs.walk.files(filter=["*.bin"], exclude=["*.txt"])) self.assertEqual(files, ["/foo2/top3.bin"]) # Test excluding everything files = list(self.fs.walk.files(exclude=["*"])) self.assertEqual(files, []) def test_walk_info(self): walk = [] for path, info in self.fs.walk.info(): walk.append((path, info.is_dir, info.name)) expected = [ ("/foo1", True, "foo1"), ("/foo2", True, "foo2"), ("/foo3", True, "foo3"), ("/foo1/top1.txt", False, "top1.txt"), ("/foo1/top2.txt", False, "top2.txt"), ("/foo1/bar1", True, "bar1"), ("/foo2/bar2", True, "bar2"), ("/foo2/top3.bin", False, "top3.bin"), ("/foo2/bar2/bar3", True, "bar3"), ("/foo2/bar2/bar3/test.txt", False, "test.txt"), ] self.assertEqual(walk, expected) def test_broken(self): original_scandir = self.fs.scandir def broken_scandir(path, namespaces=None): if path == "/foo2": raise FSError("can't read dir") return original_scandir(path, namespaces=namespaces) self.fs.scandir = broken_scandir files = list(self.fs.walk.files(search="depth", ignore_errors=True)) self.assertEqual(files, ["/foo1/top1.txt", "/foo1/top2.txt"]) with self.assertRaises(FSError): list(self.fs.walk.files(on_error=lambda path, error: False)) def test_on_error_invalid(self): with self.assertRaises(TypeError): walk.Walker(on_error="nope") def test_subdir_uses_same_walker(self): class CustomWalker(walk.Walker): @classmethod def bind(cls, fs): return walk.BoundWalker(fs, walker_class=CustomWalker) class CustomizedMemoryFS(MemoryFS): walker_class = CustomWalker base_fs = CustomizedMemoryFS() base_fs.writetext("a", "a") base_fs.makedirs("b") base_fs.writetext("b/c", "c") base_fs.writetext("b/d", "d") base_walker = base_fs.walk self.assertEqual(base_walker.walker_class, CustomWalker) six.assertCountEqual(self, ["/a", "/b/c", "/b/d"], base_walker.files()) sub_fs = base_fs.opendir("b") sub_walker = sub_fs.walk self.assertEqual(sub_walker.walker_class, CustomWalker) six.assertCountEqual(self, ["/c", "/d"], sub_walker.files()) def test_readonly_wrapper_uses_same_walker(self): class CustomWalker(walk.Walker): @classmethod def bind(cls, fs): return walk.BoundWalker(fs, walker_class=CustomWalker) class CustomizedMemoryFS(MemoryFS): walker_class = CustomWalker base_fs = CustomizedMemoryFS() base_walker = base_fs.walk self.assertEqual(base_walker.walker_class, CustomWalker) readonly_fs = read_only(CustomizedMemoryFS()) readonly_walker = readonly_fs.walk self.assertEqual(readonly_walker.walker_class, CustomWalker)
class TestInfo(unittest.TestCase): def setUp(self): self.fs = MemoryFS() self.fs.makedir("foo") self.fs.makedir("bar") self.fs.makedir("baz") self.fs.makedirs("foo/egg1") self.fs.makedirs("foo/egg2") self.fs.create("/root1") self.fs.create("/root2") self.fs.create("/foo/test.txt") self.fs.create("/foo/test2.txt") self.fs.create("/foo/.hidden") self.fs.makedirs("/deep/deep1/deep2/deep3/deep4/deep5/deep6") def test_tree(self): output_file = io.StringIO() tree.render(self.fs, file=output_file) expected = "|-- bar\n|-- baz\n|-- deep\n| `-- deep1\n| `-- deep2\n| `-- deep3\n| `-- deep4\n| `-- deep5\n|-- foo\n| |-- egg1\n| |-- egg2\n| |-- .hidden\n| |-- test.txt\n| `-- test2.txt\n|-- root1\n`-- root2\n" self.assertEqual(output_file.getvalue(), expected) def test_tree_encoding(self): output_file = io.StringIO() tree.render(self.fs, file=output_file, with_color=True) print(repr(output_file.getvalue())) expected = "\x1b[32m\u251c\u2500\u2500\x1b[0m \x1b[1;34mbar\x1b[0m\n\x1b[32m\u251c\u2500\u2500\x1b[0m \x1b[1;34mbaz\x1b[0m\n\x1b[32m\u251c\u2500\u2500\x1b[0m \x1b[1;34mdeep\x1b[0m\n\x1b[32m\u2502 \u2514\u2500\u2500\x1b[0m \x1b[1;34mdeep1\x1b[0m\n\x1b[32m\u2502 \u2514\u2500\u2500\x1b[0m \x1b[1;34mdeep2\x1b[0m\n\x1b[32m\u2502 \u2514\u2500\u2500\x1b[0m \x1b[1;34mdeep3\x1b[0m\n\x1b[32m\u2502 \u2514\u2500\u2500\x1b[0m \x1b[1;34mdeep4\x1b[0m\n\x1b[32m\u2502 \u2514\u2500\u2500\x1b[0m \x1b[1;34mdeep5\x1b[0m\n\x1b[32m\u251c\u2500\u2500\x1b[0m \x1b[1;34mfoo\x1b[0m\n\x1b[32m\u2502 \u251c\u2500\u2500\x1b[0m \x1b[1;34megg1\x1b[0m\n\x1b[32m\u2502 \u251c\u2500\u2500\x1b[0m \x1b[1;34megg2\x1b[0m\n\x1b[32m\u2502 \u251c\u2500\u2500\x1b[0m \x1b[33m.hidden\x1b[0m\n\x1b[32m\u2502 \u251c\u2500\u2500\x1b[0m test.txt\n\x1b[32m\u2502 \u2514\u2500\u2500\x1b[0m test2.txt\n\x1b[32m\u251c\u2500\u2500\x1b[0m root1\n\x1b[32m\u2514\u2500\u2500\x1b[0m root2\n" self.assertEqual(output_file.getvalue(), expected) def test_tree_bytes_no_dirs_first(self): output_file = io.StringIO() tree.render(self.fs, file=output_file, dirs_first=False) expected = "|-- bar\n|-- baz\n|-- deep\n| `-- deep1\n| `-- deep2\n| `-- deep3\n| `-- deep4\n| `-- deep5\n|-- foo\n| |-- .hidden\n| |-- egg1\n| |-- egg2\n| |-- test.txt\n| `-- test2.txt\n|-- root1\n`-- root2\n" self.assertEqual(output_file.getvalue(), expected) def test_error(self): output_file = io.StringIO() filterdir = self.fs.filterdir def broken_filterdir(path, **kwargs): if path.startswith("/deep/deep1/"): # Because error messages differ accross Python versions raise Exception("integer division or modulo by zero") return filterdir(path, **kwargs) self.fs.filterdir = broken_filterdir tree.render(self.fs, file=output_file, with_color=True) expected = "\x1b[32m\u251c\u2500\u2500\x1b[0m \x1b[1;34mbar\x1b[0m\n\x1b[32m\u251c\u2500\u2500\x1b[0m \x1b[1;34mbaz\x1b[0m\n\x1b[32m\u251c\u2500\u2500\x1b[0m \x1b[1;34mdeep\x1b[0m\n\x1b[32m\u2502 \u2514\u2500\u2500\x1b[0m \x1b[1;34mdeep1\x1b[0m\n\x1b[32m\u2502 \u2514\u2500\u2500\x1b[0m \x1b[1;34mdeep2\x1b[0m\n\x1b[32m\u2502 \u2514\u2500\u2500\x1b[0m \x1b[31merror (integer division or modulo by zero)\x1b[0m\n\x1b[32m\u251c\u2500\u2500\x1b[0m \x1b[1;34mfoo\x1b[0m\n\x1b[32m\u2502 \u251c\u2500\u2500\x1b[0m \x1b[1;34megg1\x1b[0m\n\x1b[32m\u2502 \u251c\u2500\u2500\x1b[0m \x1b[1;34megg2\x1b[0m\n\x1b[32m\u2502 \u251c\u2500\u2500\x1b[0m \x1b[33m.hidden\x1b[0m\n\x1b[32m\u2502 \u251c\u2500\u2500\x1b[0m test.txt\n\x1b[32m\u2502 \u2514\u2500\u2500\x1b[0m test2.txt\n\x1b[32m\u251c\u2500\u2500\x1b[0m root1\n\x1b[32m\u2514\u2500\u2500\x1b[0m root2\n" tree_output = output_file.getvalue() print(repr(tree_output)) self.assertEqual(expected, tree_output) output_file = io.StringIO() tree.render(self.fs, file=output_file, with_color=False) expected = "|-- bar\n|-- baz\n|-- deep\n| `-- deep1\n| `-- deep2\n| `-- error (integer division or modulo by zero)\n|-- foo\n| |-- egg1\n| |-- egg2\n| |-- .hidden\n| |-- test.txt\n| `-- test2.txt\n|-- root1\n`-- root2\n" self.assertEqual(expected, output_file.getvalue())
class VirtualFilesystem(AbstractedFS): """Represents a virtual filesystem (currently only memory and s3 are supported) """ def __init__(self, root, cmd_channel): AbstractedFS.__init__(self, root, cmd_channel) self.cwd = root self.type = cmd_channel.type self.s3_bucket = cmd_channel.s3_bucket self.aws_access_key = cmd_channel.aws_access_key self.aws_secret_key = cmd_channel.aws_secret_key self.seperator = cmd_channel.seperator self.thread_synchronize = cmd_channel.thread_synchronize self.key_sync_timeout = cmd_channel.key_sync_timeout if not self.cmd_channel.fs_obj: if self.type == "memory": self.fs_obj = MemoryFS() elif self.type == "s3": self.fs_obj = S3FS(bucket=self.bucket, prefix=self.prefix, aws_access_key=self.aws_access_key, aws_secret_key=self.aws_secret_key, separator=self.seperator, thread_synchronize=self.thread_synchronize, key_sync_timeout=self.key_sync_timeout) self.cmd_channel.fs_obj = self.fs_obj else: self.fs_obj = self.cmd_channel.fs_obj def ftp2fs(self, ftppath): return self.ftpnorm(ftppath) def fs2ftp(self, fspath): return fspath def validpath(self, path): # validpath was used to check symlinks escaping user home # directory; this is no longer necessary. return True def open(self, filename, mode): f = self.fs_obj.open(filename, mode) f.name=filename return f def mkdir(self, path): return self.fs_obj.makedir(path) def chdir(self, path): return self.fs_obj.opendir(path) def listdir(self,path): return self.fs_obj.listdir(path) def rmdir(self, path): return self.fs_obj.removedir(path) def remove(self, path): return self.fs_obj.remove(path) def rename(self, src, dst): return self.fs_obj.rename(src, dst) def chmod(self, path, mode): return True def readlink(self, path): return self.ftp2fs(path) def isfile(self, path): return self.fs_obj.isfile(path) def islink(self, path): return False def getsize(self, path): return self.fs_obj.getsize(path) def getmtime(self, path): return self.fs_obj.getinfo(path)['modified_time'] def realpath(self, path): return path def lexists(self, path): return self.fs_obj.exists(path) def mkstemp(self, suffix='', prefix='', mode='wb'): from tempfile import _RandomNameSequence as RandomName name = RandomName() if suffix != '': suffix = 'tmp' fname = suffix + name.next() return self.fs_obj.open(fname,mode)
class SlfFS(FS): """ Implements a read-only file system on top of a SLF-file """ _meta = { 'thread_safe': False, 'virtual': False, 'read_only': True, 'unicode_paths': False, 'case_insensitive_paths': False, 'network': False, 'atomic.setcontents': False } def __init__(self, slf_filename): super(SlfFS, self).__init__() if isinstance(slf_filename, str): slf_filename = os.path.expanduser(os.path.expandvars(slf_filename)) slf_filename = os.path.normpath(os.path.abspath(slf_filename)) try: self.file_name = slf_filename self.file = open(slf_filename, 'rb') except FileNotFoundError as e: raise CreateFailedError( 'Slf file not found ({0})'.format(slf_filename), details=e ) else: self.file_name = 'file-like' self.file = slf_filename self.header = SlfHeader.from_bytes(self.file.read(SlfHeader.get_size())) self.entries = list(map(self._read_entry, range(self.header['number_of_entries']))) self.library_name = self.header['library_name'] self.library_path = self.header['library_path'] self.sort = self.header['sort'] self.version = self.header['version'] self._path_fs = MemoryFS() for e in self.entries: path = _get_normalized_filename(e['file_name']).split('/') directory = '/'.join(path[:-1]) if len(path) > 2 else '/' if self._path_fs.isfile(directory): # Sometimes there exists a file that has the same name as a directory # Solution: Rename it with a _DIRECTORY_CONFLICT suffix self._path_fs.move(directory, directory + DIRECTORY_CONFLICT_SUFFIX) if self._path_fs.isdir('/'.join(path)): self._path_fs.createfile('/'.join(path) + DIRECTORY_CONFLICT_SUFFIX) else: self._path_fs.makedir(directory, recursive=True, allow_recreate=True) self._path_fs.createfile('/'.join(path)) def _read_entry(self, index): entry_size = SlfEntry.get_size() self.file.seek(-entry_size * (self.header['number_of_entries'] - index), os.SEEK_END) return SlfEntry.from_bytes(self.file.read(entry_size)) def __str__(self): return '<SlfFS: {0}>'.format(self['library_name']) def isfile(self, path): return self._path_fs.isfile(path) def isdir(self, path): return self._path_fs.isdir(path) def listdir(self, path="/", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False): return self._path_fs.listdir(path, wildcard, full, absolute, dirs_only, files_only) def open(self, path, mode='r', buffering=-1, encoding='ascii', errors=None, newline=None, line_buffering=False, **kwargs): if mode != 'r' and mode != 'rb': raise UnsupportedError(WRITING_NOT_SUPPORTED_ERROR.format('open')) if not self.exists(path): raise ResourceNotFoundError(path) if self.isdir(path): raise ResourceInvalidError(path) slf_entry = self._get_slf_entry_for_path(path) self.file.seek(slf_entry['offset'], os.SEEK_SET) if mode == 'rb': return io.BytesIO(self.file.read(slf_entry['length'])) return io.StringIO(self.file.read(slf_entry['length']).decode(encoding)) def getinfo(self, path): if not self.exists(path): raise ResourceNotFoundError(path) if self.isdir(path): return { 'size': 0 } slf_entry = self._get_slf_entry_for_path(path) return { 'size': slf_entry['length'], 'modified_time': slf_entry['time'] } def makedir(self, path, recursive=False, allow_recreate=False): raise UnsupportedError(WRITING_NOT_SUPPORTED_ERROR.format('makedir')) def remove(self, path): raise UnsupportedError(WRITING_NOT_SUPPORTED_ERROR.format('remove')) def removedir(self, path, recursive=False, force=False): raise UnsupportedError(WRITING_NOT_SUPPORTED_ERROR.format('removedir')) def rename(self, src, dst): raise UnsupportedError(WRITING_NOT_SUPPORTED_ERROR.format('rename')) def _get_slf_entry_for_path(self, path): if path.endswith(DIRECTORY_CONFLICT_SUFFIX): path = path[:-len(DIRECTORY_CONFLICT_SUFFIX)] return next(e for e in self.entries if _get_normalized_filename(e['file_name']) == path)