Ejemplo n.º 1
0
    def add_from_engine(cls, engine, **kwargs):
        app = kwargs.get('app')
        session = kwargs.get('session', db.session)
        bind_name = kwargs.get('bind_name')
        backend_version = kwargs.get('backend_version')

        use_cache = kwargs.get('use_cache', False) and app
        if use_cache:
            flush_cache = kwargs.get('flush_cache', False)
            cache_path = kwargs.get('cache_path',
                                    app.config.get('DISKCACHE_PATH'))
            cache_timeout = kwargs.get('DISKCACHE_DBEX_DATABASE_TIMEOUT')
            cache = Cache(cache_path)
            if flush_cache:
                cache.pop(engine)

        try:
            if use_cache:
                inspector = cache.get(engine)
            else:
                inspector = inspect(engine)
                if use_cache:
                    cache.set(engine, inspector, cache_timeout)
        except OperationalError as e:
            logger.error("cant inspect engine %s" % engine)
            raise e
        except Exception as e:
            raise e
        db_name = db_engine_to_name(engine)
        try:
            info = get_discovered_db_engine_info(bind_name)
        except NoBindNameFoundError:
            logger.warning("No info found to engine bind name '%s'" % bind_name)
            info = ''
        if not backend_version and info:
            try:
                backend_version = info['backend_version']
            except TypeError as e:
                pass
            except KeyError as e:
                pass
            except Exception as e:
                raise e

        d = Database(name=db_name, bind_name=bind_name, engine=engine.name,
                     driver=engine.driver, backend_version=backend_version)
        table_names = inspector.get_table_names()
        for table_name in table_names:
            logger.debug("table_name: %s" % (table_name,))
            t = Table(name=table_name)
            column_names = inspector.get_columns(table_name)
            for column in column_names:
                c = Column(**column)
                t.columns.append(c)
            d.tables.append(t)
        session.add(d)
        if kwargs.get('db_session_commit_enabled', True):
            session.commit()
Ejemplo n.º 2
0
class Cache(object):
    def __init__(self):
        try:
            self.cache = DC('./tmp')
        except Exception as ex:
            print('Get an exception with diskcache open: {}'.format(ex))
            self.cache = None

    def __del__(self):
        try:
            self.cache.close()
        except Exception as ex:
            print('Get an exception with diskcache close: {}'.format(ex))

    def set(self, key, value):
        if self.cache is not None:
            self.cache.set(key, BytesIO(value), read=True, tag=u'data')

    def get(self, key):
        if self.cache is not None:
            value = self.cache.get(key, default=b'', read=True, tag=True)
            if value is not None and value != b'':
                return value
        return None

    def pop(self, key):
        if self.cache is not None:
            value = self.cache.pop(key, default=b'', read=True, tag=True)
            if value is not None and value != b'':
                return value
        return None

    def delete(self, key):
        if self.cache is not None:
            self.cache.delete(key)

    def create_index(self):
        if self.cache is not None:
            self.cache.create_tag_index()
            return self.cache.tag_index
        return None

    def clear_all(self):
        if self.cache is not None:
            self.cache.clear()
Ejemplo n.º 3
0
class DiskCachePersistence:
    def __init__(self, dbname, dbpassphrase):
        self.dbname = dbname
        self.dbpassphrase = dbpassphrase
        self.db = Cache(dbname)
        # TODO: create encrypted Cache with kdf dbpassphrase
        # TODO: purge expired skippedMessageKey based on store_time

    def save_conversation(self, conversation):
        return self.db.set(b'conv:' + conversation.ks['CONVid'], prefix='conv', tag='conv', retry=True)

    def load_conversation(self, conv_id):
        return self.db.get(b'conv:' + conv_id, None, retry=True)

    def delete_conversation(self, conversation):
        return self.db.pop(b'conv:' + conversation.ks['CONVid'], None, retry=True)

    def get_other_names(self, name):
        names = []
        for k in self.db:
            if k.startswith('conv:'):
                names.append(self.db[k].other_name)
        return names
Ejemplo n.º 4
0
class CloudFS(Operations):
    '''Baidu netdisk filesystem'''
    def __init__(self, *args, **kw):
        self.buffer = Cache('./cache/buffer')
        self.dir_buffer = Cache('./cache/dir_buffer')

        self.traversed_folder = {}
        self.disk = PCS()

        self.createLock = Lock()

        self.writing_files = {}
        self.downloading_files = {}

        # update all folder  in other thread
        dirReaderDaemon.submit(self.readdirAsync, "/", 100, dirReaderDaemon)

    def _add_file_to_buffer(self, path, file_info):
        foo = File()
        foo['st_ctime'] = file_info['local_ctime']
        foo['st_mtime'] = file_info['local_mtime']
        foo['st_mode'] = ( stat.S_IFDIR | 0x777) if file_info['isdir'] \
            else (stat.S_IFREG | 0x777)
        foo['st_nlink'] = 2 if file_info['isdir'] else 1
        foo['st_size'] = file_info['size']
        self.buffer[path] = foo

    def _del_file_from_buffer(self, path):
        self.buffer.pop(path)

    def getattr(self, path, fh=None):
        if path in self.writing_files:
            return self.writing_files[path]
        if path.split("/")[-1].startswith("."):
            raise FuseOSError(errno.ENOENT)

        st = None
        if path not in self.buffer or self.buffer[path] is None:
            jdata = json.loads(self.disk.meta([path]))

            if 'info' not in jdata:
                raise FuseOSError(errno.ENOENT)
            if jdata['errno'] != 0:
                raise FuseOSError(errno.ENOENT)

            file_info = jdata['info'][0]
            self._add_file_to_buffer(path, file_info)
            st = self.buffer[path].getDict()
        else:
            st = self.buffer[path].getDict()

#         logger.info(f'st: {st}')
        return st

    def readdirAsync(self, path, depth=2, threadPool=pool):
        try:
            foo = json.loads(self.disk.list_files(path))
        except Exception as s:
            logger.exception(s)

        files = ['.', '..']
        abs_files = []
        if 'errno' in foo:
            logger.error(
                "maybe token is not right, try re login http://pan.baidu.com in Chrome"
            )
        if "list" not in foo:
            #             logger.info("no list")
            return

        for file in foo['list']:
            if file['server_filename'].startswith("."):
                continue
            files.append(file['server_filename'])
            abs_files.append(file['path'])
#             logger.debug(file['path'])

        file_num = len(abs_files)
        group = int(math.ceil(file_num / 100.0))
        #         logger.debug(f"group: {group}")
        #         logger.debug(f"abs_files: {abs_files}")
        for i in range(group):
            obj = [f for n, f in enumerate(abs_files) if n % group == i]  #一组数据
            while 1:
                try:
                    ret = json.loads(self.disk.meta(obj))
                    #                     logger.debug(f'{ret}')
                    break
                except Exception as e:
                    logger.info(ret)
                    logger.exception(e)
            for file_info in ret['info']:
                #                 logger.debug(file_info)
                self._add_file_to_buffer(file_info['path'], file_info)
                if depth > 0:
                    depth -= 1
                    if file_info['isdir']:
                        if file_info['path'] not in self.traversed_folder:
                            self.traversed_folder[path] = True
                            threadPool.submit(self.readdirAsync,
                                              file_info['path'], depth,
                                              threadPool)
        self.dir_buffer[path] = files

#     @funcLog

    def readdir(self, path, offset):
        #         if path not in self.traversed_folder:
        self.traversed_folder[path] = True
        pool.submit(self.readdirAsync, path, 2, pool)
        if path in self.dir_buffer:
            #             logger.info(f'{path},{self.dir_buffer[path]}')
            for r in self.dir_buffer[path]:
                yield r
        else:
            files = ['.', '..']
            for r in files:
                yield r

    # @funcLog
    def open(self, path, flags):
        if path in self.writing_files:
            return 0
        # method does not have thread race problem, open by one thread only
        try:
            if path not in self.downloading_files:
                url = self.disk.getRestUrl(path)
                x = Task(url, path, self.disk)
                x.start()
                self.downloading_files[path] = x
        except Baidu8Secs as e:
            logger.exception(e)
        except Exception as e:
            logger.exception(e)
        return 0

    def read(self, path, size, offset, fh):
        x = self.downloading_files[path]
        if x:
            data = x.get_cache(offset, size)

            filename = path[path.rfind("/") + 1:]
            if filename.startswith("enc."):
                if offset == 0:
                    if data and len(data) > encrpted_length:
                        data = bytes(cipher(data, 0, encrpted_length, 123))
                    else:
                        print("decrpt failed!")
            return data

        raise FuseOSError(errno.EIO)

    def updateCahe(self, old, new):
        directory = old[:old.rfind("/")]
        filename = old[old.rfind("/") + 1:]
        if len(directory) == 0:
            directory = "/"
        if not new:
            oldCache = self.dir_buffer[directory]
            if filename in oldCache:
                oldCache.remove(filename)
                self.dir_buffer[directory] = oldCache
            if old in self.buffer:
                self.buffer.pop(old)
        else:
            oldCache = self.dir_buffer[directory]
            if filename in oldCache:
                oldCache.remove(filename)
                newfilename = new[new.rfind("/") + 1:]
                oldCache.append(newfilename)
                self.dir_buffer[directory] = oldCache
            if old in self.buffer:
                old_info = self.buffer.pop(old)
                self.buffer[new] = old_info

    def unlink(self, path):
        self.disk.delete([path])
        self.updateCahe(path, None)

    def access(self, path, amode):
        return 0

    def rmdir(self, path):
        self.disk.delete([path])
        self.updateCahe(path, None)

    def rename(self, old, new):
        self.disk.rename(old, new)
        self.updateCahe(old, new)


#     @funcLog

    def mkdir(self, path, mode):
        directory = path[:path.rfind("/")]
        filename = path[path.rfind("/") + 1:]

        cache = self.dir_buffer[directory]
        cache.append(filename)
        self.dir_buffer[directory] = cache
        self.disk.mkdir(path)

    def create(self, path, mode, fh=None):
        with self.createLock:
            if path not in self.writing_files:
                t = time.time()
                self.writing_files[path] = {
                    'st_atime': t,
                    'st_ctime': t,
                    'st_gid': 20,
                    'st_mode': stat.S_IFREG | 0x777,
                    'st_mtime': t,
                    'st_nlink': 1,
                    'st_size': 0,
                    'st_uid': 502,
                    'uploading_tmp': tempfile.NamedTemporaryFile('wb')
                }
        return 0

    def flush(self, path, fh):
        with self.createLock:
            if path in self.writing_files:
                self.writing_files[path]["uploading_tmp"].flush()
        return 0

    def release(self, path, fh):
        with self.createLock:
            if path in self.writing_files:
                uploading_tmp = self.writing_files[path]['uploading_tmp']
                self.disk.upload(uploading_tmp.name, path)
                self.writing_files[path]['uploading_tmp'].close()

                if path in self.writing_files:
                    del self.writing_files[path]

                # why ? prevent accidently read file when uploading still in progress
                if path in self.downloading_files:
                    del self.downloading_files[path]

                print("released", path)
                return
        # method does not have thread race problem, release by one thread only
        if path in self.downloading_files:
            #             self.downloading_files[path].terminate()
            #             del self.downloading_files[path]
            #             uploading_tmp = "./uploading_tmp"+path
            #             logger.info("delete uploading_tmp:", uploading_tmp)
            #             os.remove(uploading_tmp)
            pass

    def write(self, path, data, offset, fp):

        filename = path[path.rfind("/") + 1:]
        if filename.startswith("enc."):
            if offset == 0 and data and len(data) > encrpted_length:
                data = bytes(cipher(data, 0, encrpted_length, 123))

        length = len(data)
        self.writing_files[path]["st_size"] += length
        self.writing_files[path]["uploading_tmp"].write(data)

        return length

    def chmod(self, path, mode):
        pass

    def statfs(self, path):
        return {
            'f_bavail': int(85533433401 / 4096),
            'f_bfree': int(85533433401 / 4096),  # 相同的值  block
            'f_favail': 4290675908,
            'f_ffree': 4290675908,  # 相同的值  node
            'f_bsize': 104857,  # perferd value 
            'f_blocks': int(5611374772224 / 8),
            'f_files': 4294967279,
            'f_flag': 0,
            'f_frsize': 4096,
            'f_namemax': 255
        }
Ejemplo n.º 5
0
class CloudFS(Operations):
    '''Baidu netdisk filesystem'''
    def __init__(self, mainArgs, *args, **kw):
        logger.info(colored("- fuse 4 cloud driver -", 'red'))
        self.buffer = Cache('./cache/buffer-batchmeta')
        self.dir_buffer = Cache('./cache/dir_buffer-buffer-batchmeta')

        self.attr_requesting = Cache('./cache/attr-requesting')
        self.mainArgs = mainArgs

        self.traversed_folder = Cache('./cache/traversed-folder')
        self.disk = PCS(self.mainArgs)

        self.createLock = Lock()
        self.attrLock = Lock()

        self.writing_files = {}
        self.downloading_files = {}

        logger.info(f'mainArgs:{mainArgs}')

        q = json.loads(self.disk.quota())

        # only request once
        try:
            self.total_size = q['quota']
            self.used = q['used']
        except Exception as e:
            self.total_size = 100000000000
            self.used = 0
            logger.exception(e)
            logger.debug(f'con`t load quota api, fall back to default')

        self.avail = self.total_size - self.used

        if mainArgs.debug:
            logger.setLevel(logging.DEBUG)
            logger.debug(colored("- debug mode -", 'red'))
            logger.debug(
                colored("- cach would not be the same after restart -", 'red'))
            self.buffer = Cache('./cache/buffer-batchmeta' + str(time.time()))
            self.dir_buffer = Cache('./cache/dir_buffer-buffer-batchmeta' +
                                    str(time.time()))
            self.traversed_folder = Cache('./cache/traversed-folder' +
                                          str(time.time()))
            self._readDirAsync("/", 2, dirReaderDaemon)
        else:
            logger.setLevel(logging.INFO)

            # update all folder  in other thread
            self._readDirAsync("/", mainArgs.preload_level, dirReaderDaemon)

    @staticmethod
    def add_write_permission(st, permission='u'):
        """Add `w` (`write`) permission to specified targets."""
        mode_map = {
            'u': stat.S_IWUSR,
            'g': stat.S_IWGRP,
            'o': stat.S_IWOTH,
            'a': stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH,
        }
        logger.info(
            f'-------------------{type(stat.S_IWUSR)} ,{type(st["st_mode"])}')
        for t in permission:
            st['st_mode'] |= mode_map[t]

        return st

    def _baidu_file_attr_convert(self, path, file_info):
        foo = fileAttr.copy()
        try:
            foo['st_ctime'] = file_info[
                'local_ctime'] if 'local_ctime' in file_info else file_info[
                    'ctime']
            foo['st_mtime'] = file_info[
                'local_mtime'] if 'local_mtime' in file_info else file_info[
                    'mtime']
            foo['st_mode'] = 16877 if file_info['isdir'] else 36279
            foo['st_nlink'] = 2 if file_info['isdir'] else 1
            foo['st_size'] = int(
                file_info['size']) if 'size' in file_info else 0
            self.buffer[path] = foo
        except Exception as e:
            logger.debug(f'======================')
            logger.debug(f'add buffer error {e},{path}:{file_info}')

    def _del_file_from_buffer(self, path):
        self.buffer.pop(path)

    def _getRootAttr(self):
        path = "/"
        if path in self.buffer:
            return self.buffer[path]

        logger.debug(f'net root: {path}')
        jdata = json.loads(self.disk.meta([path]))

        f = fileAttr.copy()
        f["st_mode"] = 16877
        f["st_nlink"] = 2
        if 'error_code' in jdata and jdata["error_code"] != 0:
            logger.debug(f"error_code:{jdata}")
            #             logger.info(f'{error_map[str(jdata["error_code"])]} args: {path}')
            self.buffer.set(path, f, expire=60)
            return f

        if "list" not in jdata or len(jdata["list"]) == 0:
            logger.debug(f"{path} not list :{jdata}")
            self.buffer.set(path, f, expire=60)
            return f

        file_info = jdata["list"][0]
        self._baidu_file_attr_convert(path, file_info)
        return file_info

    @funcLog
    def getattr(self, path, fh=None):
        '''
        Returns a dictionary with keys identical to the stat C structure of
        stat(2).

        st_atime, st_mtime and st_ctime should be floats.

        NOTE: There is an incompatibility between Linux and Mac OS X
        concerning st_nlink of directories. Mac OS X counts all files inside
        the directory, while Linux counts only the subdirectories.
        '''
        if path in self.writing_files:
            return self.writing_files[path]

        if path.split("/")[-1].startswith("."):
            raise FuseOSError(errno.ENOENT)

        # special handle root Attr
        if path == "/":
            return self._getRootAttr()

        parentDir = os.path.dirname(path)
        if parentDir not in self.dir_buffer:
            self._readDir(parentDir, 1)

        if path in self.buffer:
            return self.buffer[path]

        raise FuseOSError(errno.ENOENT)

    @funcLog
    def truncate(self, path, length, fh=None):
        self.unlink(path)
        self.create(path, None)
        self.writing_files[path]["uploading_tmp"].truncate(length)

    def _readDirAsync(self, path, depth, p):
        p.submit(self._readDir, path, depth, p)

    def _readDir(self, path, depth=2, threadPool=pool):
        if path not in self.traversed_folder:
            self.traversed_folder.set(path,
                                      b'1',
                                      expire=self.mainArgs.cache_timeout)
            logger.debug(f'net dir {depth} - {path} ')
            try:
                foo = json.loads(self.disk.list_files(path))

                files = ['.', '..']
                if 'error_code' in foo and foo["error_code"] != 0:
                    logger.info(
                        f'{error_map[str(foo["error_code"])]} args: {path}')
                if "list" not in foo:
                    return

                depth -= 1
                for file in foo['list']:
                    if file['server_filename'].startswith("."):
                        continue
                    files.append(file['server_filename'])
                    #                 logger.debug(f'{file}')
                    self._baidu_file_attr_convert(file['path'], file)
                    if depth > 0:
                        if file['isdir']:
                            self._readDirAsync(file['path'], depth, threadPool)
#                             self._readDir(file['path'],depth,threadPool)

                self.dir_buffer[path] = files

            except Exception as s:
                logger.exception(s)

    @funcLog
    def readdir(self, path, offset):
        self._readDirAsync(path, 1, pool)
        if path in self.dir_buffer:
            for r in self.dir_buffer[path]:
                yield r
        else:
            files = ['.', '..']
            for r in files:
                yield r

    # @funcLog
    def open(self, path, flags):
        if path in self.writing_files:
            return 0
        # method does not have thread race problem, open by one thread only
        try:
            if path not in self.downloading_files:
                url = self.disk.getRestUrl(path)
                x = Task(url, mainArgs, path, self.disk)
                x.start()
                self.downloading_files[path] = x
        except Baidu8Secs as e:
            logger.exception(e)
        except Exception as e:
            logger.exception(e)
        return 0

    def read(self, path, size, offset, fh):
        x = self.downloading_files[path]
        if x:
            data = x.get_cache(offset, size)

            filename = path[path.rfind("/") + 1:]
            if filename.startswith("enc."):
                if offset == 0:
                    if data and len(data) > encrpted_length:
                        data = bytes(
                            cipher(data, 0, encrpted_length,
                                   self.mainArgs.key))
                    else:
                        print("decrpt failed!")
            return data

        raise FuseOSError(errno.EIO)

    def updateCache(self, path, newValue):
        '''
        add     updateCache(path,value)
        delete  updateCache(path,None)
        udpate  updateCache(path,value)
        
        '''
        pass

    def updateCacheKeyOnly(self, old, new):
        '''
        delete     updateCacheKeyOnly(old,None)
        add/update updateCacheKeyOnly(old,new) 
        '''
        try:
            old_parent_dir = os.path.dirname(old)
            old_name = os.path.basename(old)
            if not new:
                oldCache = self.dir_buffer.get(old_parent_dir)
                # remove
                if oldCache:
                    if old_name in oldCache:
                        oldCache.remove(old_name)
                        self.dir_buffer[old_parent_dir] = oldCache
                    if old in self.buffer:
                        self.buffer.pop(old)
                else:
                    pass
            else:
                print("updateCache", old, new)
                oldCache = self.dir_buffer[old_parent_dir]
                new_parent_dir = os.path.dirname(new)
                if old_name in oldCache:
                    # dir old remove
                    oldCache.remove(old_name)
                    self.dir_buffer[old_parent_dir] = oldCache
                    # dir new add
                    newfilename = new[new.rfind("/") + 1:]
                    newCache = self.dir_buffer.get(new_parent_dir, [])
                    newCache.append(newfilename)
                    self.dir_buffer[new_parent_dir] = newCache

                if old in self.buffer:
                    old_info = self.buffer.pop(old)
                    self.buffer[new] = old_info
        except Exception as e:
            logger.info(e)

    def updateDir(self, old, new):
        pass

    def unlink(self, path):
        ''' 
        will only delete file
        '''
        print("unlink .....................")

        self.disk.delete([path])
        self.updateCacheKeyOnly(path, None)

    def rmdir(self, path):
        '''
        will only delete directory
        '''

        self.disk.delete([path])
        self.updateCacheKeyOnly(path, None)

    def access(self, path, amode):
        return 0

    def rename(self, old, new):
        '''
        will effect dir and file
        '''
        logger.info(f'rename {old}, {new}')
        self.disk.rename(old, new)
        self.updateCacheKeyOnly(old, new)

    @funcLog
    def mkdir(self, path, mode):
        logger.info(f'making dir {path}')

        r = json.loads(self.disk.mkdir(path))

        if 'error_code' in r:
            logger.info(f'{r}')
            logger.info(
                f'{error_map[str(r["error_code"])]} args: {path}, response:{r}'
            )
            return

        directory = path[:path.rfind("/")]
        filename = path[path.rfind("/") + 1:]

        cache = None
        if directory in self.dir_buffer:
            cache = self.dir_buffer[directory]
            cache.append(filename)
        self.dir_buffer[directory] = cache

        self._baidu_file_attr_convert(path, r)

    @funcLog
    def create(self, path, mode, fh=None):
        logger.debug(f'create {path}')
        with self.createLock:
            if path not in self.writing_files:
                attr = fileAttr.copy()
                t = time.time()

                attr['uploading_tmp'] = tempfile.NamedTemporaryFile('wb')
                attr['st_mode'] = attr[
                    'st_mode'] | stat.S_IFREG | stat.S_ISUID | stat.S_ISGID

                self.writing_files[path] = attr
            else:
                logger.debug(f'{path} is writing on, wait another turn..')
        return 0

    def flush(self, path, fh):
        with self.createLock:
            if path in self.writing_files:
                self.writing_files[path]["uploading_tmp"].flush()
        return 0

    def release(self, path, fh):
        with self.createLock:
            if path in self.writing_files:
                uploading_tmp = self.writing_files[path]['uploading_tmp']
                r = json.loads(self.disk.upload(uploading_tmp.name, path))
                logger.info(f'================================={r}')

                self.writing_files[path]['uploading_tmp'].close()
                #                 if path in self.buffer:
                #                     del self.buffer[path]

                if path in self.writing_files:
                    del self.writing_files[path]

                # why ? prevent accidently read file when uploading still in progress
                if path in self.downloading_files:
                    del self.downloading_files[path]

# update file
                self._baidu_file_attr_convert(path, r)

                # update parent dir
                parentDir = os.path.dirname(path)
                filename = path[path.rfind("/") + 1:]

                if parentDir in self.dir_buffer:
                    parentDirCache = self.dir_buffer[parentDir]
                    parentDirCache.append(filename)
                    self.dir_buffer[parentDir] = parentDirCache
                    logger.info(f'{self.dir_buffer[parentDir]}')

                print("released", path)
                return
        # method does not have thread race problem, release by one thread only
        if path in self.downloading_files:
            #             self.downloading_files[path].terminate()
            #             del self.downloading_files[path]
            #             uploading_tmp = "./uploading_tmp"+path
            #             logger.info("delete uploading_tmp:", uploading_tmp)
            #             os.remove(uploading_tmp)
            pass

    def write(self, path, data, offset, fp):

        filename = path[path.rfind("/") + 1:]
        if filename.startswith("enc."):
            if offset == 0 and data and len(data) > encrpted_length:
                data = bytes(
                    cipher(data, 0, encrpted_length, self.mainArgs.key))

        length = len(data)
        self.writing_files[path]["st_size"] += length
        self.writing_files[path]["uploading_tmp"].write(data)

        return length

    def chmod(self, path, mode):
        pass

    def statfs(self, path):

        # TODO read from cloud disk
        return {
            'f_bavail': int((self.avail) / 4096),
            'f_bfree': int((self.avail) / 4096),  # 相同的值  block
            'f_favail': 4290675908,
            'f_ffree': 4290675908,  # 相同的值  node
            'f_bsize': 104857,  # perferd value 
            'f_blocks': int(self.total_size / 8),
            'f_files': 4294967279,
            'f_flag': 0,
            'f_frsize': 4096,
            'f_namemax': 255
        }