예제 #1
0
 def test_readdir_alt(self):
     m = DefaultMapper()
     m.mkdir(['1'])
     m.mapping['notdir'] = ('somezip.zip', 'afile', 1)
     self.assertEqual(sorted(m.readdir('')), ['1', 'notdir'])
     self.assertEqual(sorted(m.readdir('notdir')), [])
     self.assertEqual(sorted(m.readdir('nowhere')), [])
예제 #2
0
 def test_readdir_alt(self):
     m = DefaultMapper()
     m.mkdir(['1'])
     m.mapping['notdir'] = ('somezip.zip', 'afile', 1)
     self.assertEqual(sorted(m.readdir('')), ['1', 'notdir'])
     self.assertEqual(sorted(m.readdir('notdir')), [])
     self.assertEqual(sorted(m.readdir('nowhere')), [])
예제 #3
0
 def test_readdir(self):
     m = DefaultMapper()
     self.assertEqual(m.readdir(''), [])
     m.mkdir(['1'])
     self.assertEqual(m.readdir(''), ['1'])
     m.mkdir(['2'])
     m.mkdir(['3', '4', '5'])
     self.assertEqual(sorted(m.readdir('')), ['1', '2', '3'])
     self.assertEqual(sorted(m.readdir('3')), ['4'])
     self.assertEqual(sorted(m.readdir('3/4')), ['5'])
예제 #4
0
 def test_readdir(self):
     m = DefaultMapper()
     self.assertEqual(m.readdir(''), [])
     m.mkdir(['1'])
     self.assertEqual(m.readdir(''), ['1'])
     m.mkdir(['2'])
     m.mkdir(['3', '4', '5'])
     self.assertEqual(sorted(m.readdir('')), ['1', '2', '3'])
     self.assertEqual(sorted(m.readdir('3')), ['4'])
     self.assertEqual(sorted(m.readdir('3/4')), ['5'])
예제 #5
0
    def test_mapping_complex_multiple(self):
        demo3 = path('demo3.zip')
        demo4 = path('demo4.zip')
        m = DefaultMapper()
        # load order matters, new entries will not overwrite old ones.
        m.load_archive(demo3)
        m.load_archive(demo4)

        self.assertDemo3ThenDemo4(m, demo3, demo4)

        self.assertEqual(
            m.traverse('demo/dir1/file1'), (demo3, 'demo/dir1/file1', 33))
        self.assertEqual(
            m.readfile('demo/dir1/file1'),
            b'b026324c6904b2a9cb4b88d6d61c81d1\n')

        self.assertEqual(
            m.traverse('demo/dir1/file5'), (demo4, 'demo/dir1/file5', 26))
        self.assertEqual(
            m.traverse('demo/dir4/dir5/dir6/file6'),
            (demo3, 'demo/dir4/dir5/dir6/file6', 33))

        self.assertEqual(
            m.readfile('demo/dir1/file5'),
            b'demo4.zip demo/dir1/file5\n')

        self.assertEqual(sorted(m.readdir('')), ['demo', 'hello'])
예제 #6
0
    def test_mapping_complex_multiple(self):
        demo3 = path('demo3.zip')
        demo4 = path('demo4.zip')
        m = DefaultMapper()
        # load order matters, new entries will not overwrite old ones.
        m.load_archive(demo3)
        m.load_archive(demo4)

        self.assertDemo3ThenDemo4(m, demo3, demo4)

        self.assertEqual(m.traverse('demo/dir1/file1'),
                         (demo3, 'demo/dir1/file1', 33))
        self.assertEqual(m.readfile('demo/dir1/file1'),
                         b'b026324c6904b2a9cb4b88d6d61c81d1\n')

        self.assertEqual(m.traverse('demo/dir1/file5'),
                         (demo4, 'demo/dir1/file5', 26))
        self.assertEqual(m.traverse('demo/dir4/dir5/dir6/file6'),
                         (demo3, 'demo/dir4/dir5/dir6/file6', 33))

        self.assertEqual(m.readfile('demo/dir1/file5'),
                         b'demo4.zip demo/dir1/file5\n')

        self.assertEqual(sorted(m.readdir('')), ['demo', 'hello'])
예제 #7
0
class ExplosiveFUSE(LoggingMixIn, Operations):
    """
    The interface between the mapping and the FUSE bindings (provided
    by the Operations class).
    """
    def __init__(self,
                 archive_paths,
                 pathmaker_name='default',
                 _pathmaker=None,
                 overwrite=False,
                 include_arcname=False):
        # if include_arcname is not defined, define it based whether
        # there is a single or multiple archives.
        self.mapping = DefaultMapper(
            pathmaker_name=pathmaker_name,
            _pathmaker=_pathmaker,
            overwrite=overwrite,
            include_arcname=include_arcname,
        )
        loaded = sum(
            self.mapping.load_archive(abspath(p)) for p in archive_paths)
        logger.info('loaded %d archive(s).', loaded)

        self.open_entries = {}

    def getattr(self, path, fh=None):
        key = path[1:]

        info = self.mapping.traverse(key)
        if info is None:
            raise FuseOSError(ENOENT)

        if isinstance(info, dict):
            return dir_record

        result = {'st_size': info[2]}
        result.update(file_record)
        return result

    def _mapping_open(self, key):
        idfe_fp = self.mapping.open(key)
        if not idfe_fp:
            # should this errno instead be transport error? io error?
            raise FuseOSError(ENOENT)
        return idfe_fp

    def open(self, path, flags):
        # TODO implement memory usage tracking by reusing cache.
        key = path[1:]
        logger.info('opening for %s', key)

        # the idfe is the stable identifier for this "version" of the
        # given path (id of fileentry), fp is the file pointer.
        idfe, fp = self._mapping_open(key)
        # initial position is 0
        pos = 0
        # add this to mapping, accompanied by the current position of 0
        # this is the open_entry and its id is the fh returned.
        open_entry = [fp, pos, idfe]
        # TODO ideally, the idfe is returned as the fh, but we need
        # additional tracking on all open handles.  Reference counting
        # should be use.
        fh = id(open_entry)
        self.open_entries[fh] = open_entry
        return fh

    def release(self, path, fh):
        fp, pos, idfe = self.open_entries.pop(fh, None)
        if fp:
            fp.close()

    def read(self, path, size, offset, fh):
        key = path[1:]
        logger.info('reading data for %s (fh:%#x, size:%d, offset:%d)', key,
                    fh, size, offset)
        open_entry = self.open_entries.get(fh)
        if not open_entry:
            raise FuseOSError(EIO)
        zf, pos, idfe = open_entry
        logger.debug('open_entry: zf: %s, pos: %d, idfe: %s', zf, pos, idfe)
        seek = offset - pos
        if seek < 0:
            # have to reopen...
            logger.info('seek position is %d, need reopening', seek)
            new_idfe, zf = self._mapping_open(key)
            if idfe != new_idfe:
                # different file entry, ignoring by kiling this
                raise FuseOSError(EIO)
            # overwrite the open_entry's zipfile with the new one.
            open_entry[0] = zf
            # reset rest of the values
            seek = offset
            pos = 0
        junk = zf.read(seek)
        open_entry[1] = pos + size
        return zf.read(size)

    def readdir(self, path, fh):
        key = path[1:]
        return ['.', '..'] + self.mapping.readdir(key)

    def statfs(self, path):
        # TODO report total size of the zips?
        return dict(f_bsize=1024, f_blocks=1024, f_bavail=0)
예제 #8
0
class ExplosiveFUSE(LoggingMixIn, Operations):
    """
    The interface between the mapping and the FUSE bindings (provided
    by the Operations class).
    """

    def __init__(self, archive_paths, pathmaker_name='default',
            _pathmaker=None, overwrite=False, include_arcname=False):
        # if include_arcname is not defined, define it based whether
        # there is a single or multiple archives.
        self.mapping = DefaultMapper(
            pathmaker_name=pathmaker_name,
            _pathmaker=_pathmaker,
            overwrite=overwrite,
            include_arcname=include_arcname,
        )
        loaded = sum(self.mapping.load_archive(abspath(p))
                     for p in archive_paths)
        logger.info('loaded %d archive(s).', loaded)

        self.open_entries = {}

    def getattr(self, path, fh=None):
        key = path[1:]

        info = self.mapping.traverse(key)
        if info is None:
            raise FuseOSError(ENOENT)

        if isinstance(info, dict):
            return dir_record

        result = {'st_size': info[2]}
        result.update(file_record)
        return result

    def _mapping_open(self, key):
        idfe_fp = self.mapping.open(key)
        if not idfe_fp:
            # should this errno instead be transport error? io error?
            raise FuseOSError(ENOENT)
        return idfe_fp

    def open(self, path, flags):
        # TODO implement memory usage tracking by reusing cache.
        key = path[1:]
        logger.info('opening for %s', key)

        # the idfe is the stable identifier for this "version" of the
        # given path (id of fileentry), fp is the file pointer.
        idfe, fp = self._mapping_open(key)
        # initial position is 0
        pos = 0
        # add this to mapping, accompanied by the current position of 0
        # this is the open_entry and its id is the fh returned.
        open_entry = [fp, pos, idfe]
        # TODO ideally, the idfe is returned as the fh, but we need
        # additional tracking on all open handles.  Reference counting
        # should be use.
        fh = id(open_entry)
        self.open_entries[fh] = open_entry
        return fh

    def release(self, path, fh):
        fp, pos, idfe = self.open_entries.pop(fh, None)
        if fp:
            fp.close()

    def read(self, path, size, offset, fh):
        key = path[1:]
        logger.info(
            'reading data for %s (fh:%#x, size:%d, offset:%d)',
            key, fh, size, offset)
        open_entry = self.open_entries.get(fh)
        if not open_entry:
            raise FuseOSError(EIO)
        zf, pos, idfe = open_entry
        logger.debug(
            'open_entry: zf: %s, pos: %d, idfe: %s', zf, pos, idfe)
        seek = offset - pos
        if seek < 0:
            # have to reopen...
            logger.info('seek position is %d, need reopening', seek)
            new_idfe, zf = self._mapping_open(key)
            if idfe != new_idfe:
                # different file entry, ignoring by kiling this
                raise FuseOSError(EIO)
            # overwrite the open_entry's zipfile with the new one.
            open_entry[0] = zf
            # reset rest of the values
            seek = offset
            pos = 0
        junk = zf.read(seek)
        open_entry[1] = pos + size
        return zf.read(size)

    def readdir(self, path, fh):
        key = path[1:]
        return ['.', '..'] + self.mapping.readdir(key)

    def statfs(self, path):
        # TODO report total size of the zips?
        return dict(f_bsize=1024, f_blocks=1024, f_bavail=0)