Exemplo n.º 1
0
 def test_mapping_open_complex(self):
     demo3 = path('demo3.zip')
     demo4 = path('demo4.zip')
     m = DefaultMapper()
     m.load_archive(demo3)
     m.load_archive(demo4)
     self.assertEqual(m.open('demo/dir1/file1')[1].read(1), b'b')
Exemplo n.º 2
0
 def test_mapping_simple_nested_blocked(self):
     target = path('demo2.zip')
     m = DefaultMapper()
     # create a file entry named 'demo' to block creation of dir
     m.mapping['demo'] = ('somezip.zip', 'notadir', 0)
     m.load_archive(target)
     self.assertEqual(m.mapping, {'demo': ('somezip.zip', 'notadir', 0)})
Exemplo n.º 3
0
 def test_mapping_open_complex(self):
     demo3 = path('demo3.zip')
     demo4 = path('demo4.zip')
     m = DefaultMapper()
     m.load_archive(demo3)
     m.load_archive(demo4)
     self.assertEqual(m.open('demo/dir1/file1')[1].read(1), b'b')
Exemplo n.º 4
0
    def test_mapping_complex_multiple(self):
        demo3 = path('demo3.zip')
        demo4 = path('demo4.zip')
        m = DefaultMapper()
        # load order matters, new entries will not overwrite old ones.
        m.load_archive(demo3)
        m.load_archive(demo4)

        self.assertDemo3ThenDemo4(m, demo3, demo4)

        self.assertEqual(
            m.traverse('demo/dir1/file1'), (demo3, 'demo/dir1/file1', 33))
        self.assertEqual(
            m.readfile('demo/dir1/file1'),
            b'b026324c6904b2a9cb4b88d6d61c81d1\n')

        self.assertEqual(
            m.traverse('demo/dir1/file5'), (demo4, 'demo/dir1/file5', 26))
        self.assertEqual(
            m.traverse('demo/dir4/dir5/dir6/file6'),
            (demo3, 'demo/dir4/dir5/dir6/file6', 33))

        self.assertEqual(
            m.readfile('demo/dir1/file5'),
            b'demo4.zip demo/dir1/file5\n')

        self.assertEqual(sorted(m.readdir('')), ['demo', 'hello'])
Exemplo n.º 5
0
 def test_mapping_simple_nested_blocked(self):
     target = path('demo2.zip')
     m = DefaultMapper()
     # create a file entry named 'demo' to block creation of dir
     m.mapping['demo'] = ('somezip.zip', 'notadir', 0)
     m.load_archive(target)
     self.assertEqual(m.mapping, {'demo': ('somezip.zip', 'notadir', 0)})
Exemplo n.º 6
0
 def test_mapping_load_dupe(self):
     demo3 = path('demo3.zip')
     m = DefaultMapper()
     m.load_archive(demo3)
     self.assertEqual(list(m.archives.keys()), [path('demo3.zip')])
     m.load_archive(demo3)
     # the timestamp would have changed.
     self.assertEqual(list(m.archives.keys()), [path('demo3.zip')])
Exemplo n.º 7
0
 def test_mapping_load_dupe(self):
     demo3 = path('demo3.zip')
     m = DefaultMapper()
     m.load_archive(demo3)
     self.assertEqual(list(m.archives.keys()), [path('demo3.zip')])
     m.load_archive(demo3)
     # the timestamp would have changed.
     self.assertEqual(list(m.archives.keys()), [path('demo3.zip')])
Exemplo n.º 8
0
    def test_mapping_overwrite_true(self):
        target = path('demo1.zip')
        m = DefaultMapper(overwrite=True)
        m.mapping = {
            'file5': ('dummy.zip', 'file5', 1),
            'file6': ('dummy.zip', 'file6', 1),
            'file7': ('dummy.zip', 'file7', 1),
        }

        m.load_archive(target)
        self.assertEqual(m.mapping, {
            'file1': (target, 'file1', 33),
            'file2': (target, 'file2', 33),
            'file3': (target, 'file3', 33),
            'file4': (target, 'file4', 33),
            'file5': (target, 'file5', 33),
            'file6': (target, 'file6', 33),
            'file7': ('dummy.zip', 'file7', 1),
        })
Exemplo n.º 9
0
    def test_mapping_overwrite_true(self):
        target = path('demo1.zip')
        m = DefaultMapper(overwrite=True)
        m.mapping = {
            'file5': ('dummy.zip', 'file5', 1),
            'file6': ('dummy.zip', 'file6', 1),
            'file7': ('dummy.zip', 'file7', 1),
        }

        m.load_archive(target)
        self.assertEqual(
            m.mapping, {
                'file1': (target, 'file1', 33),
                'file2': (target, 'file2', 33),
                'file3': (target, 'file3', 33),
                'file4': (target, 'file4', 33),
                'file5': (target, 'file5', 33),
                'file6': (target, 'file6', 33),
                'file7': ('dummy.zip', 'file7', 1),
            })
Exemplo n.º 10
0
 def test_mapping_bad(self):
     bad_target = path('bad.zip')
     missing_target = path('nosuchzip.zip')
     m = DefaultMapper()
     m.load_archive(bad_target)
     m.load_archive(missing_target)
     m.load_archive(object())
     self.assertEqual(m.mapping, {})
Exemplo n.º 11
0
 def test_mapping_bad(self):
     bad_target = path('bad.zip')
     missing_target = path('nosuchzip.zip')
     m = DefaultMapper()
     m.load_archive(bad_target)
     m.load_archive(missing_target)
     m.load_archive(object())
     self.assertEqual(m.mapping, {})
Exemplo n.º 12
0
    def test_mapping_complex_multiple(self):
        demo3 = path('demo3.zip')
        demo4 = path('demo4.zip')
        m = DefaultMapper()
        # load order matters, new entries will not overwrite old ones.
        m.load_archive(demo3)
        m.load_archive(demo4)

        self.assertDemo3ThenDemo4(m, demo3, demo4)

        self.assertEqual(m.traverse('demo/dir1/file1'),
                         (demo3, 'demo/dir1/file1', 33))
        self.assertEqual(m.readfile('demo/dir1/file1'),
                         b'b026324c6904b2a9cb4b88d6d61c81d1\n')

        self.assertEqual(m.traverse('demo/dir1/file5'),
                         (demo4, 'demo/dir1/file5', 26))
        self.assertEqual(m.traverse('demo/dir4/dir5/dir6/file6'),
                         (demo3, 'demo/dir4/dir5/dir6/file6', 33))

        self.assertEqual(m.readfile('demo/dir1/file5'),
                         b'demo4.zip demo/dir1/file5\n')

        self.assertEqual(sorted(m.readdir('')), ['demo', 'hello'])
Exemplo n.º 13
0
class ExplosiveFUSE(LoggingMixIn, Operations):
    """
    The interface between the mapping and the FUSE bindings (provided
    by the Operations class).
    """
    def __init__(self,
                 archive_paths,
                 pathmaker_name='default',
                 _pathmaker=None,
                 overwrite=False,
                 include_arcname=False):
        # if include_arcname is not defined, define it based whether
        # there is a single or multiple archives.
        self.mapping = DefaultMapper(
            pathmaker_name=pathmaker_name,
            _pathmaker=_pathmaker,
            overwrite=overwrite,
            include_arcname=include_arcname,
        )
        loaded = sum(
            self.mapping.load_archive(abspath(p)) for p in archive_paths)
        logger.info('loaded %d archive(s).', loaded)

        self.open_entries = {}

    def getattr(self, path, fh=None):
        key = path[1:]

        info = self.mapping.traverse(key)
        if info is None:
            raise FuseOSError(ENOENT)

        if isinstance(info, dict):
            return dir_record

        result = {'st_size': info[2]}
        result.update(file_record)
        return result

    def _mapping_open(self, key):
        idfe_fp = self.mapping.open(key)
        if not idfe_fp:
            # should this errno instead be transport error? io error?
            raise FuseOSError(ENOENT)
        return idfe_fp

    def open(self, path, flags):
        # TODO implement memory usage tracking by reusing cache.
        key = path[1:]
        logger.info('opening for %s', key)

        # the idfe is the stable identifier for this "version" of the
        # given path (id of fileentry), fp is the file pointer.
        idfe, fp = self._mapping_open(key)
        # initial position is 0
        pos = 0
        # add this to mapping, accompanied by the current position of 0
        # this is the open_entry and its id is the fh returned.
        open_entry = [fp, pos, idfe]
        # TODO ideally, the idfe is returned as the fh, but we need
        # additional tracking on all open handles.  Reference counting
        # should be use.
        fh = id(open_entry)
        self.open_entries[fh] = open_entry
        return fh

    def release(self, path, fh):
        fp, pos, idfe = self.open_entries.pop(fh, None)
        if fp:
            fp.close()

    def read(self, path, size, offset, fh):
        key = path[1:]
        logger.info('reading data for %s (fh:%#x, size:%d, offset:%d)', key,
                    fh, size, offset)
        open_entry = self.open_entries.get(fh)
        if not open_entry:
            raise FuseOSError(EIO)
        zf, pos, idfe = open_entry
        logger.debug('open_entry: zf: %s, pos: %d, idfe: %s', zf, pos, idfe)
        seek = offset - pos
        if seek < 0:
            # have to reopen...
            logger.info('seek position is %d, need reopening', seek)
            new_idfe, zf = self._mapping_open(key)
            if idfe != new_idfe:
                # different file entry, ignoring by kiling this
                raise FuseOSError(EIO)
            # overwrite the open_entry's zipfile with the new one.
            open_entry[0] = zf
            # reset rest of the values
            seek = offset
            pos = 0
        junk = zf.read(seek)
        open_entry[1] = pos + size
        return zf.read(size)

    def readdir(self, path, fh):
        key = path[1:]
        return ['.', '..'] + self.mapping.readdir(key)

    def statfs(self, path):
        # TODO report total size of the zips?
        return dict(f_bsize=1024, f_blocks=1024, f_bavail=0)
Exemplo n.º 14
0
class ExplosiveFUSE(LoggingMixIn, Operations):
    """
    The interface between the mapping and the FUSE bindings (provided
    by the Operations class).
    """

    def __init__(self, archive_paths, pathmaker_name='default',
            _pathmaker=None, overwrite=False, include_arcname=False):
        # if include_arcname is not defined, define it based whether
        # there is a single or multiple archives.
        self.mapping = DefaultMapper(
            pathmaker_name=pathmaker_name,
            _pathmaker=_pathmaker,
            overwrite=overwrite,
            include_arcname=include_arcname,
        )
        loaded = sum(self.mapping.load_archive(abspath(p))
                     for p in archive_paths)
        logger.info('loaded %d archive(s).', loaded)

        self.open_entries = {}

    def getattr(self, path, fh=None):
        key = path[1:]

        info = self.mapping.traverse(key)
        if info is None:
            raise FuseOSError(ENOENT)

        if isinstance(info, dict):
            return dir_record

        result = {'st_size': info[2]}
        result.update(file_record)
        return result

    def _mapping_open(self, key):
        idfe_fp = self.mapping.open(key)
        if not idfe_fp:
            # should this errno instead be transport error? io error?
            raise FuseOSError(ENOENT)
        return idfe_fp

    def open(self, path, flags):
        # TODO implement memory usage tracking by reusing cache.
        key = path[1:]
        logger.info('opening for %s', key)

        # the idfe is the stable identifier for this "version" of the
        # given path (id of fileentry), fp is the file pointer.
        idfe, fp = self._mapping_open(key)
        # initial position is 0
        pos = 0
        # add this to mapping, accompanied by the current position of 0
        # this is the open_entry and its id is the fh returned.
        open_entry = [fp, pos, idfe]
        # TODO ideally, the idfe is returned as the fh, but we need
        # additional tracking on all open handles.  Reference counting
        # should be use.
        fh = id(open_entry)
        self.open_entries[fh] = open_entry
        return fh

    def release(self, path, fh):
        fp, pos, idfe = self.open_entries.pop(fh, None)
        if fp:
            fp.close()

    def read(self, path, size, offset, fh):
        key = path[1:]
        logger.info(
            'reading data for %s (fh:%#x, size:%d, offset:%d)',
            key, fh, size, offset)
        open_entry = self.open_entries.get(fh)
        if not open_entry:
            raise FuseOSError(EIO)
        zf, pos, idfe = open_entry
        logger.debug(
            'open_entry: zf: %s, pos: %d, idfe: %s', zf, pos, idfe)
        seek = offset - pos
        if seek < 0:
            # have to reopen...
            logger.info('seek position is %d, need reopening', seek)
            new_idfe, zf = self._mapping_open(key)
            if idfe != new_idfe:
                # different file entry, ignoring by kiling this
                raise FuseOSError(EIO)
            # overwrite the open_entry's zipfile with the new one.
            open_entry[0] = zf
            # reset rest of the values
            seek = offset
            pos = 0
        junk = zf.read(seek)
        open_entry[1] = pos + size
        return zf.read(size)

    def readdir(self, path, fh):
        key = path[1:]
        return ['.', '..'] + self.mapping.readdir(key)

    def statfs(self, path):
        # TODO report total size of the zips?
        return dict(f_bsize=1024, f_blocks=1024, f_bavail=0)