class BaiduFS(Fuse): '''Baidu netdisk filesystem''' def __init__(self, *args, **kw): Fuse.__init__(self, *args, **kw) self.disk = BaiduPan(Baidufuseconf.baidu_token) def get_abs_path(self, path): return "%s%s" % (baidu_rootdir, path) def getattr(self, path): logger.error("getattr is: " + path) abs_path = self.get_abs_path(path) st = MyStat() jdata = json.loads(self.disk.meta(abs_path)) if 'list' not in jdata: logger.error("getattr is None") return -errno.ENOENT st.st_ctime = jdata['list'][0]['ctime'] st.st_mtime = jdata['list'][0]['mtime'] st.st_mode = (stat.S_IFDIR | 0755) if jdata['list'][0]['isdir']\ else (stat.S_IFREG | 0755) st.st_nlink = 2 if jdata['list'][0]['isdir'] else 1 st.st_size = jdata['list'][0]['size'] return st def readdir(self, path, offset): logger.error("readdir is: " + path) abs_path = self.get_abs_path(path) jdata = json.loads(self.disk.ls(abs_path)) files = ['.', '..'] for r in jdata['list']: files.append(r['path'].encode('ascii', 'ignore')[len(abs_path):]) logger.error(files) for r in files: yield fuse.Direntry(r) def open(self, path, flags): logger.error("open is: " + path) pass accmode = os.O_RDONLY | os.O_WRONLY | os.O_RDWR if (flags & accmode) != os.O_RDONLY: return -errno.EACCES def mkdir(self, path, mode): logger.error("mkdir is:" + path) abs_path = self.get_abs_path(path) self.disk.mkdir(abs_path) def rmdir(self, path): logger.error("rmdir is:" + path) abs_path = self.get_abs_path(path) self.disk.rm(abs_path) def read(self, path, size, offset): logger.error("read is: " + path) abs_path = self.get_abs_path(path) paras = {'Range': 'bytes=%s-%s' % (offset, offset + size)} return self.disk.download(abs_path, paras)
class BaiduFS(Fuse): '''Baidu netdisk filesystem''' def __init__(self, *args, **kw): Fuse.__init__(self, *args, **kw) self.disk = BaiduPan(Baidufuseconf.baidu_token) def get_abs_path(self, path): return "%s%s" % (Baidufuseconf.baidu_rootdir, path) def getattr(self, path): logger.error("getattr is: " + path) abs_path = self.get_abs_path(path) st = MyStat() jdata = json.loads(self.disk.meta(abs_path)) if 'list' not in jdata: logger.error("getattr is None") return -errno.ENOENT st.st_ctime = jdata['list'][0]['ctime'] st.st_mtime = jdata['list'][0]['mtime'] st.st_mode = (stat.S_IFDIR | 0755) if jdata['list'][0]['isdir']\ else (stat.S_IFREG | 0755) st.st_nlink = 2 if jdata['list'][0]['isdir'] else 1 st.st_size = jdata['list'][0]['size'] return st def readdir(self, path, offset): logger.error("readdir is: " + path) abs_path = self.get_abs_path(path) jdata = json.loads(self.disk.ls(abs_path)) files = ['.', '..'] for r in jdata['list']: files.append(r['path'].encode('ascii', 'ignore')[len(abs_path):]) logger.error(files) for r in files: yield fuse.Direntry(r) def open(self, path, flags): logger.error("open is: " + path) pass accmode = os.O_RDONLY | os.O_WRONLY | os.O_RDWR if (flags & accmode) != os.O_RDONLY: return -errno.EACCES def mkdir(self, path, mode): logger.error("mkdir is:" + path) abs_path = self.get_abs_path(path) self.disk.mkdir(abs_path) def rmdir(self, path): logger.error("rmdir is:" + path) abs_path = self.get_abs_path(path) self.disk.rm(abs_path) def read(self, path, size, offset): logger.error("read is: " + path) abs_path = self.get_abs_path(path) paras = {'Range': 'Range: bytes=%s-%s' % (offset, offset + size)} return self.disk.download(abs_path, headers=paras)
def upload_bpcs(upload_dir,upload_file,del_file): access_token="access"#从百度云获取 disk=BaiduPan(access_token) bpcs_dir='/apps/bpcs_uploader/' if not disk.meta(bpcs_dir+upload_dir): disk.mkdir(bpcs_dir+upload_dir) #查看使用情况 #print disk.quota() disk.upload(upload_file, path=bpcs_dir+upload_dir+upload_file,ondup='overwrite') #上传文件,如果存在,直接覆盖 if disk.meta(bpcs_dir+upload_dir+del_file): #删除历史文件 disk.rm(bpcs_dir+upload_dir+del_file)
def upload_bpcs(upload_dir, upload_file, del_file): access_token = "access" # 从百度云获取 disk = BaiduPan(access_token) bpcs_dir = '/apps/bpcs_uploader/' if not disk.meta(bpcs_dir + upload_dir): disk.mkdir(bpcs_dir + upload_dir) # 查看使用情况 # print disk.quota() disk.upload(upload_file, path=bpcs_dir + upload_dir + upload_file, ondup='overwrite') # 上传文件,如果存在,直接覆盖 if disk.meta(bpcs_dir + upload_dir + del_file): # 删除历史文件 disk.rm(bpcs_dir + upload_dir + del_file)
}] } print disk.mmv(json.dumps(par)) #cp print disk.cp("/apps/appname/hello.txt.bak", "/apps/appname/hello.txt") #mcp par = { "list": [{ "path": "/apps/appname/hello.txt1" }, { "path": "/apps/appname/dirs" }] } print disk.mcp(json.dumps(par)) #rm print disk.rm('/apps/appname/hello.txt.bak') #mrm par = { "list": [{ "path": "/apps/appname/hello.txt1" }, { "path": "/apps/appname/dirs" }] } print disk.mrm(json.dumps(par)) #search print disk.grep('hello', '/apps/appname/') print disk.search('hello', '/apps/appname/') #thumb print disk.thumb('/apps/appname/1.png', 100, 100) #diff
class BaiFuse(LoggingMixIn, Operations): def __init__(self, root, token): self.root = root self.token = token self.api = BaiduPan(self.token) def get_path(self, path): return os.path.join(BAIDUPATH,path) # def chmod(self, path, mode): # return True # def chown(self, path, uid, gid): # return True # def create(self, path, mode): # f = self.sftp.open(path, 'w') # f.chmod(mode) # f.close() # return 0 # def destroy(self, path): # self.sftp.close() # self.client.close() def getattr(self, path, fh=None): resp = json.loads(self.api.meta(self.get_path(path))) if 'list' not in resp: return {} return { 'st_ino': 0, 'st_dev': 0, 'st_atime': 0, 'st_mtime': resp['list'][0]['mtime'], 'st_ctime': resp['list'][0]['ctime'], 'st_gid': os.getgid(), 'st_uid': os.getuid(), 'st_mode': ((stat.S_IFDIR | 0755) if resp['list'][0]['isdir'] else (stat.S_IFREG | 0755)), 'st_size': resp['list'][0]['size'], 'st_nlink': (2 if resp['list'][0]['isdir'] else 1), } def mkdir(self, path, mode): self.api.mkdir(self.get_path(path)) #return? def read(self, path, size, offset, fh): return self.api.download(self.get_path(path), headers={'Range':"Range: bytes=%s-%s"%(offset,offset+size)}) def readdir(self, path, fh): resp = json.loads(self.api.ls(self.get_path(path))) return ['.', '..'] + [name['path'].encode('utf-8') for name in resp['list']] # def readlink(self, path): # return self.sftp.readlink(path) # def rename(self, old, new): # return self.sftp.rename(old, self.root + new) def rmdir(self, path): self.api.rm(self.get_path(path)) # return ? # def symlink(self, target, source): # return self.sftp.symlink(source, target) # def truncate(self, path, length, fh=None): # return self.sftp.truncate(path, length) # def unlink(self, path): # return self.sftp.unlink(path) # def utimens(self, path, times=None): # return self.sftp.utime(path, times) def write(self, path, data, offset, fh): # can't use the api -> need file dissociation + merge (super file) return self.api.upload(path)
#mv print disk.mv("/apps/appname/hello.txt", "/apps/appname/hello.txt.bak") #mmv par = {"list": [{"from": "/apps/appname/hello.txt.bak", "to": "/apps/appname/hello.txt.bak.bak"}, {"from": "/apps/appname/dirs", "to": "/apps/appname/dirsbak"}]} print disk.mmv(json.dumps(par)) #cp print disk.cp("/apps/appname/hello.txt.bak", "/apps/appname/hello.txt") #mcp par = {"list": [{"path": "/apps/appname/hello.txt1"}, {"path": "/apps/appname/dirs"}]} print disk.mcp(json.dumps(par)) #rm print disk.rm('/apps/appname/hello.txt.bak') #mrm par = {"list": [{"path": "/apps/appname/hello.txt1"}, {"path": "/apps/appname/dirs"}]} print disk.mrm(json.dumps(par)) #search print disk.grep('hello', '/apps/appname/') print disk.search('hello', '/apps/appname/') #thumb print disk.thumb('/apps/appname/1.png', 100, 100) #diff print disk.diff() #streaming print disk.streaming('/apps/appname/1.mkv') #stream print disk.stream(type='doc')