def init_media(self): if 'media' not in self.filesystems: return if not self.media_urls: if not self.media_app: raise errors.StartupFailedError("no 'url' or 'app' specified in [media] section") if self.media_app not in self.apps: startup_log.warning('app set in [media]/app has not been installed ({})'.format(self.media_app)) return try: self.media_urls = [self.apps[self.media_app].mounts[0][1]] except: raise errors.StartupFailedError('unable to detect media url! (specify in [media]/url)') for i, _url in enumerate(self.media_urls): startup_log.debug('media url #%s is %s', i, _url) media_fs = self.filesystems['media'] media_mount_fs = MountFS() for app in itervalues(self.apps): for media_name, media_sub_fs in iteritems(app.lib.media): name = "%s_%s" % (app.name, media_name) media_path = "%s-%s" % (app.name, media_name) app.media[media_name] = media_path if name in self.filesystems: mount_media = self.filesystems[name] else: mount_media = media_sub_fs if name not in self.filesystems: self.filesystems[name] = mount_media media_mount_fs.mountdir(media_path, mount_media) media_fs.addfs("media", media_mount_fs)
def copydir(fs1, fs2, create_destination=True, ignore_errors=False, chunk_size=64*1024): """Copies contents of a directory from one filesystem to another. :param fs1: Source filesystem, or a tuple of (<filesystem>, <directory path>) :param fs2: Destination filesystem, or a tuple of (<filesystem>, <directory path>) :param create_destination: If True, the destination will be created if it doesn't exist :param ignore_errors: If True, exceptions from file moves are ignored :param chunk_size: Size of chunks to move if a simple copy is used """ if isinstance(fs1, tuple): fs1, dir1 = fs1 fs1 = fs1.opendir(dir1) if isinstance(fs2, tuple): fs2, dir2 = fs2 if create_destination: fs2.makedir(dir2, allow_recreate=True, recursive=True) fs2 = fs2.opendir(dir2) mount_fs = MountFS(auto_close=False) mount_fs.mount('src', fs1) mount_fs.mount('dst', fs2) mount_fs.copydir('src', 'dst', overwrite=True, ignore_errors=ignore_errors, chunk_size=chunk_size)
def test_empty(self): """Test MountFS with nothing mounted.""" mount_fs = MountFS() self.assertEqual(mount_fs.getinfo(''), {}) self.assertEqual(mount_fs.getxattr('', 'yo'), None) self.assertEqual(mount_fs.listdir(), []) self.assertEqual(list(mount_fs.ilistdir()), [])
def movedir(fs1, fs2, overwrite=False, ignore_errors=False, chunk_size=64*1024): """Moves contents of a directory from one filesystem to another. :param fs1: Source filesystem, or a tuple of (<filesystem>, <directory path>) :param fs2: Destination filesystem, or a tuple of (<filesystem>, <directory path>) :param ignore_errors: If True, exceptions from file moves are ignored :param chunk_size: Size of chunks to move if a simple copy is used """ if isinstance(fs1, tuple): fs1, dir1 = fs1 fs1 = fs1.opendir(dir1) if isinstance(fs2, tuple): fs2, dir2 = fs2 fs2.makedir(dir2, allow_recreate=True) fs2 = fs2.opendir(dir2) mount_fs = MountFS() mount_fs.mount('src', fs1) mount_fs.mount('dst', fs2) mount_fs.movedir('src', 'dst', overwrite=overwrite, ignore_errors=ignore_errors, chunk_size=chunk_size)
def main(): parser = argparse.ArgumentParser(description='SLF Unpacker') parser.add_argument('slf_file', help="path to the SLF file") parser.add_argument( '-o', '--output-folder', default=None, help="folder for extracted files. By default, files extracted alongside the slf file in a subdirectory. For example, content of foo/bar/maps.slf is extracted into folder foo/bar/maps" ) parser.add_argument( '-v', '--verbose', action='store_true', default=False, help="be verbose, e.g. print names of the extracted files" ) args = parser.parse_args() slf_file = args.slf_file slf_file = os.path.expanduser(os.path.expandvars(slf_file)) slf_file = os.path.normpath(os.path.abspath(slf_file)) if not os.path.exists(slf_file): print("Error: '{}' is not found".format(args.slf_file), file=sys.stderr) exit(1) output_folder = args.output_folder if output_folder is None: output_folder = os.path.join(os.path.dirname(slf_file), os.path.splitext(os.path.basename(slf_file))[0]) output_folder = os.path.expanduser(os.path.expandvars(output_folder)) output_folder = os.path.normpath(os.path.abspath(output_folder)) if not os.path.exists(output_folder): os.mkdir(output_folder) if args.verbose: print("Input file: {}".format(slf_file)) print("Output folder: {}".format(output_folder)) slf_fs = SlfFS(slf_file) out_fs = OSFS(output_folder) if args.verbose: print("Extracting Files:") slf_fs.printtree() combined_fs = MountFS() combined_fs.mountdir('slf', slf_fs) combined_fs.mountdir('out', out_fs) combined_fs.copydir('/slf', '/out', overwrite=True) if args.verbose: print("Done")
def test_auto_close(self): """Test MountFS auto close is working""" multi_fs = MountFS() m1 = MemoryFS() m2 = MemoryFS() multi_fs.mount('/m1', m1) multi_fs.mount('/m2', m2) self.assert_(not m1.closed) self.assert_(not m2.closed) multi_fs.close() self.assert_(m1.closed) self.assert_(m2.closed)
def test_no_auto_close(self): """Test MountFS auto close can be disabled""" multi_fs = MountFS(auto_close=False) m1 = MemoryFS() m2 = MemoryFS() multi_fs.mount('/m1', m1) multi_fs.mount('/m2', m2) self.assert_(not m1.closed) self.assert_(not m2.closed) multi_fs.close() self.assert_(not m1.closed) self.assert_(not m2.closed)
def open_slf_for_copy(src_path, dest_path, slf_name): """Opens an SLF files for reading and returns a MountFS""" slf_file = src_path / (slf_name + ".slf") slf_fs = SlfFS(str(slf_file)) output_dir = dest_path / slf_name output_dir.mkdir(exist_ok=True) out_fs = OSFS(output_dir) combined_fs = MountFS() combined_fs.mountdir('slf', slf_fs) combined_fs.mountdir('out', out_fs) return combined_fs
def __init__(self, datapath='data', log=None, level=None): if not log: log = 'stream' if not level: level = 1 if log == 'stream': logginghandler = Logger() logginghandler.create_stream_logger(level=level) #Defining Pathes workingpath = os.path.dirname(os.path.realpath(__file__)) self.workingpath = workingpath logger.info('Startup Core') logger.info('Workingpath: %s' % workingpath) ### Config if datapath: self.config = Config(datapath, 'MediaProxy') else: self.config = Config(workingpath, 'MediaProxy') if not self.config.mode: logger.info('Fresh Start, No Config File') else: logger.info('Config in state: %s' % self.config.mode) ### Import dependencies requires extlibs from .plugin_manager import Plugins from .database import Database from fs.mountfs import MountFS ### Plugins if self.config.mode: self.plugins = Plugins( self.config.cfg, loadpathes=[os.path.join(datapath, 'plugins')]) self.filesystem = MountFS() ###Database self.database = Database(self.config) self.database.load() self.channels_populate()
def get_fs(cls, registry, fs_name, fs_name_params, fs_path, writeable, create_dir): from fs.mountfs import MountFS from ConfigParser import ConfigParser cfg = ConfigParser() if '#' in fs_path: path, section = fs_path.split('#', 1) else: path = fs_path section = 'fs' cfg.readfp(registry.open(path)) mount_fs = MountFS() for mount_point, mount_path in cfg.items(section): mount_fs.mount(mount_point, registry.opendir(mount_path, create_dir=create_dir)) return mount_fs, ''
def get_fs(cls, registry, fs_name, fs_name_params, fs_path, writeable, create_dir): from fs.mountfs import MountFS from configparser import ConfigParser cfg = ConfigParser() if '#' in fs_path: path, section = fs_path.split('#', 1) else: path = fs_path section = 'fs' cfg.readfp(registry.open(path)) mount_fs = MountFS() for mount_point, mount_path in cfg.items(section): mount_fs.mount(mount_point, registry.opendir(mount_path, create_dir=create_dir)) return mount_fs, ''
def movedir(fs1, fs2, overwrite=False, ignore_errors=False, chunk_size=64 * 1024): """Moves contents of a directory from one filesystem to another. :param fs1: Source filesystem, or a tuple of (<filesystem>, <directory path>) :param fs2: Destination filesystem, or a tuple of (<filesystem>, <directory path>) :param ignore_errors: If True, exceptions from file moves are ignored :param chunk_size: Size of chunks to move if a simple copy is used """ if isinstance(fs1, tuple): fs1, dir1 = fs1 fs1 = fs1.opendir(dir1) if isinstance(fs2, tuple): fs2, dir2 = fs2 fs2.makedir(dir2, allow_recreate=True) fs2 = fs2.opendir(dir2) mount_fs = MountFS() mount_fs.mount('src', fs1) mount_fs.mount('dst', fs2) mount_fs.movedir('src', 'dst', overwrite=overwrite, ignore_errors=ignore_errors, chunk_size=chunk_size)
def copydir(fs1, fs2, create_destination=True, ignore_errors=False, chunk_size=64 * 1024): """Copies contents of a directory from one filesystem to another. :param fs1: Source filesystem, or a tuple of (<filesystem>, <directory path>) :param fs2: Destination filesystem, or a tuple of (<filesystem>, <directory path>) :param create_destination: If True, the destination will be created if it doesn't exist :param ignore_errors: If True, exceptions from file moves are ignored :param chunk_size: Size of chunks to move if a simple copy is used """ if isinstance(fs1, tuple): fs1, dir1 = fs1 fs1 = fs1.opendir(dir1) if isinstance(fs2, tuple): fs2, dir2 = fs2 if create_destination: fs2.makedir(dir2, allow_recreate=True, recursive=True) fs2 = fs2.opendir(dir2) mount_fs = MountFS(auto_close=False) mount_fs.mount('src', fs1) mount_fs.mount('dst', fs2) mount_fs.copydir('src', 'dst', overwrite=True, ignore_errors=ignore_errors, chunk_size=chunk_size)
def channel_get(self, ctype=None): if not ctype: return self.filesystem else: from fs.mountfs import MountFS channel = MountFS() for path, fs in self.filesystem.mounts: if fs.exists(u'/'): if 'mediaproxy.media' in fs.getinfo(u'/').raw: logger.info( 'Check Channel: %s - %s' % (path, fs.getinfo(u'/').raw['mediaproxy.media']['type'])) if fs.getinfo( u'/').raw['mediaproxy.media']['type'] == ctype: channel.mount(path, fs) return channel
def movedir(fs1, fs2, create_destination=True, ignore_errors=False, chunk_size=64*1024): """Moves contents of a directory from one filesystem to another. :param fs1: A tuple of (<filesystem>, <directory path>) :param fs2: Destination filesystem, or a tuple of (<filesystem>, <directory path>) :param create_destination: If True, the destination will be created if it doesn't exist :param ignore_errors: If True, exceptions from file moves are ignored :param chunk_size: Size of chunks to move if a simple copy is used """ if not isinstance(fs1, tuple): raise ValueError("first argument must be a tuple of (<filesystem>, <path>)") fs1, dir1 = fs1 parent_fs1 = fs1 parent_dir1 = dir1 fs1 = fs1.opendir(dir1) print fs1 if parent_dir1 in ('', '/'): raise RemoveRootError(dir1) if isinstance(fs2, tuple): fs2, dir2 = fs2 if create_destination: fs2.makedir(dir2, allow_recreate=True, recursive=True) fs2 = fs2.opendir(dir2) mount_fs = MountFS(auto_close=False) mount_fs.mount('src', fs1) mount_fs.mount('dst', fs2) mount_fs.copydir('src', 'dst', overwrite=True, ignore_errors=ignore_errors, chunk_size=chunk_size) parent_fs1.removedir(parent_dir1, force=True)
def test_auto_close(self): """Test MultiFS auto close is working""" multi_fs = MountFS() m1 = MemoryFS() m2 = MemoryFS() multi_fs.mount('/m1', m1) multi_fs.mount('/m2', m2) self.assert_(not m1.closed) self.assert_(not m2.closed) multi_fs.close() self.assert_(m1.closed) self.assert_(m2.closed)
def test_no_auto_close(self): """Test MountFS auto close can be disabled""" mount_fs = MountFS(auto_close=False) m1 = MemoryFS() m2 = MemoryFS() mount_fs.mount("/m1", m1) mount_fs.mount("/m2", m2) self.assertFalse(m1.isclosed()) self.assertFalse(m2.isclosed()) mount_fs.close() self.assertFalse(m1.isclosed()) self.assertFalse(m2.isclosed())
def test_auto_close(self): """Test MountFS auto close is working""" mount_fs = MountFS() m1 = MemoryFS() m2 = MemoryFS() mount_fs.mount("/m1", m1) mount_fs.mount("/m2", m2) self.assertFalse(m1.isclosed()) self.assertFalse(m2.isclosed()) mount_fs.close() self.assertTrue(m1.isclosed()) self.assertTrue(m2.isclosed())
def test_no_auto_close(self): """Test MultiFS auto close can be disabled""" multi_fs = MountFS(auto_close=False) m1 = MemoryFS() m2 = MemoryFS() multi_fs.mount('/m1', m1) multi_fs.mount('/m2', m2) self.assert_(not m1.closed) self.assert_(not m2.closed) multi_fs.close() self.assert_(not m1.closed) self.assert_(not m2.closed)
def smart_mount(self, file_path=None, request=None): """ Mounts only fs which store file on file_path. If file_path isn't set mounts all fs. :param file_path: (string) relative path to the file(from a pyfs root) :param request: (django request object) if passed we can add messages about error while connecting """ self.fs = MountFS() local_fs = OSFS(self.UPLOAD_ROOT) self.fs.mountdir(".", local_fs) if not self.fs.exists(file_path) or not file_path: for storage in self.lab.storages.all(): if file_path.startswith(storage.get_folder_name()) or not file_path: try: if storage.public_key: pkey = paramiko.RSAKey.from_private_key(StringIO.StringIO(storage.public_key)) remote_fs = SFTPFS( connection=storage.host, username=storage.username, pkey=pkey, root_path=storage.get_path(), ) elif storage.password: remote_fs = SFTPFS( connection=storage.host, username=storage.username, password=storage.password, root_path=storage.get_path(), ) # else raise if storage.readonly: remote_fs = ReadOnlyFS(remote_fs) self.fs.mountdir(storage.get_folder_name(), remote_fs) except Exception: storage.active = False storage.save() if request: messages.add_message( request, messages.ERROR, _("Error while connecting to %(storage_name)s.") % {"storage_name": storage}, )
def movedir(fs1, fs2, create_destination=True, ignore_errors=False, chunk_size=64 * 1024): """Moves contents of a directory from one filesystem to another. :param fs1: A tuple of (<filesystem>, <directory path>) :param fs2: Destination filesystem, or a tuple of (<filesystem>, <directory path>) :param create_destination: If True, the destination will be created if it doesn't exist :param ignore_errors: If True, exceptions from file moves are ignored :param chunk_size: Size of chunks to move if a simple copy is used """ if not isinstance(fs1, tuple): raise ValueError( "first argument must be a tuple of (<filesystem>, <path>)") fs1, dir1 = fs1 parent_fs1 = fs1 parent_dir1 = dir1 fs1 = fs1.opendir(dir1) print fs1 if parent_dir1 in ('', '/'): raise RemoveRootError(dir1) if isinstance(fs2, tuple): fs2, dir2 = fs2 if create_destination: fs2.makedir(dir2, allow_recreate=True, recursive=True) fs2 = fs2.opendir(dir2) mount_fs = MountFS(auto_close=False) mount_fs.mount('src', fs1) mount_fs.mount('dst', fs2) mount_fs.copydir('src', 'dst', overwrite=True, ignore_errors=ignore_errors, chunk_size=chunk_size) parent_fs1.removedir(parent_dir1, force=True)
def test_desc(self): mount_fs = MountFS() mount_fs.desc("/")
def test_empty(self): """Test MountFS with nothing mounted.""" mount_fs = MountFS() self.assertEqual(mount_fs.listdir("/"), [])
def test_mount_self(self): mount_fs = MountFS() with self.assertRaises(ValueError): mount_fs.mount("/", mount_fs)
def test_bad_mount(self): mount_fs = MountFS() with self.assertRaises(TypeError): mount_fs.mount('foo', 5) with self.assertRaises(TypeError): mount_fs.mount('foo', b'bar')
class FileManagerMixin(object): def dispatch(self, request, *args, **kwargs): self.UPLOAD_URL, self.UPLOAD_ROOT = self.get_upload(request, *args, **kwargs) self.lab = Lab.objects.get(pk=request.session.get("lab")) if not self.lab.is_assistant(request.user): raise PermissionDenied def smart_mount(self, file_path=None, request=None): """ Mounts only fs which store file on file_path. If file_path isn't set mounts all fs. :param file_path: (string) relative path to the file(from a pyfs root) :param request: (django request object) if passed we can add messages about error while connecting """ self.fs = MountFS() local_fs = OSFS(self.UPLOAD_ROOT) self.fs.mountdir(".", local_fs) if not self.fs.exists(file_path) or not file_path: for storage in self.lab.storages.all(): if file_path.startswith(storage.get_folder_name()) or not file_path: try: if storage.public_key: pkey = paramiko.RSAKey.from_private_key(StringIO.StringIO(storage.public_key)) remote_fs = SFTPFS( connection=storage.host, username=storage.username, pkey=pkey, root_path=storage.get_path(), ) elif storage.password: remote_fs = SFTPFS( connection=storage.host, username=storage.username, password=storage.password, root_path=storage.get_path(), ) # else raise if storage.readonly: remote_fs = ReadOnlyFS(remote_fs) self.fs.mountdir(storage.get_folder_name(), remote_fs) except Exception: storage.active = False storage.save() if request: messages.add_message( request, messages.ERROR, _("Error while connecting to %(storage_name)s.") % {"storage_name": storage}, ) def get_upload(self, request, *args, **kwargs): lab = request.session.get("lab") if not lab: lab = unicode(Lab.objects.get(pk=kwargs.get("lab_pk")).id) request.session["lab"] = lab return ( os.path.join(settings.FILEMANAGER_UPLOAD_URL, lab + "/"), os.path.join(settings.FILEMANAGER_UPLOAD_ROOT, lab + "/"), )
def test_bad_mount(self): mount_fs = MountFS() with self.assertRaises(TypeError): mount_fs.mount("foo", 5) with self.assertRaises(TypeError): mount_fs.mount("foo", b"bar")
def test_listdir(self): mount_fs = MountFS() self.assertEqual(mount_fs.listdir("/"), []) m1 = MemoryFS() m3 = MemoryFS() m4 = TempFS() mount_fs.mount("/m1", m1) mount_fs.mount("/m2", "temp://") mount_fs.mount("/m3", m3) with self.assertRaises(MountError): mount_fs.mount("/m3/foo", m4) self.assertEqual(sorted(mount_fs.listdir("/")), ["m1", "m2", "m3"]) m3.makedir("foo") self.assertEqual(sorted(mount_fs.listdir("/m3")), ["foo"])
def DEFAULT_FILE_STORAGE_FS(): _m = MountFS() _m.mount('pytigon', OSFS(settings.ROOT_PATH)) _m.mount('static', OSFS(settings.STATIC_ROOT)) _m.mount('app', OSFS(settings.LOCAL_ROOT_PATH)) _m.mount('data', OSFS(settings.DATA_PATH)) try: _m.mount('temp', OSFS(settings.TEMP_PATH)) except: pass try: _m.mount('media', OSFS(settings.MEDIA_ROOT)) except: pass try: _m.mount('upload', OSFS(settings.UPLOAD_PATH)) _m.mount('filer_public', OSFS(os.path.join(settings.UPLOAD_PATH, "filer_public"))) _m.mount('filer_private', OSFS(os.path.join(settings.UPLOAD_PATH, "filer_private"))) _m.mount( 'filer_public_thumbnails', OSFS(os.path.join(settings.UPLOAD_PATH, "filer_public_thumbnails"))) _m.mount( 'filer_private_thumbnails', OSFS(os.path.join(settings.UPLOAD_PATH, "filer_private_thumbnails"))) except: pass if sys.argv and sys.argv[0].endswith('pytigon'): if platform_name() == 'Windows': _m.mount('osfs', OSFS("c:\\")) else: _m.mount('osfs', OSFS("/")) return _m
'id/'||id AS path, length(testscript) AS size FROM tasks_task) UNION (SELECT id AS fid, 'task/'||url AS path, length(testscript) AS size FROM tasks_task); """, readcmd= "SELECT testscript FROM tasks_task WHERE id=%s", writecmd="UPDATE tasks_task SET testscript=%s WHERE id=%s", ) # create putkafs -- a virtual filesystem with filesystems above mounted onto its folders putkafs = MountFS() putkafs.mountdir('uploads', fs_uploads) putkafs.mountdir('att', fs_attachments) putkafs.mountdir('testscript', fs_testscripts) if opts.unmount: print 'Unmounting putkafs from %s' % opts.mountpoint ret = os.system('fusermount -u %s' % opts.mountpoint) sys.exit(ret) else: # redirect stdout, stderr to a temporary file if opts.bg: sys.stdout.flush() sys.stderr.flush()
def DEFAULT_FILE_STORAGE_FS(): _m = MountFS() _m.mount('pytigon', OSFS(settings.ROOT_PATH)) _m.mount('static', OSFS(settings.STATIC_ROOT)) _m.mount('app', OSFS(settings.LOCAL_ROOT_PATH)) _m.mount('data', OSFS(settings.DATA_PATH)) try: _m.mount('temp', OSFS(settings.TEMP_PATH)) except: pass try: _m.mount('media', OSFS(settings.MEDIA_ROOT)) except: pass try: _m.mount('upload', OSFS(settings.UPLOAD_PATH)) _m.mount('filer_public', OSFS(os.path.join(settings.UPLOAD_PATH,"filer_public"))) _m.mount('filer_private', OSFS(os.path.join(settings.UPLOAD_PATH,"filer_private"))) _m.mount('filer_public_thumbnails', OSFS(os.path.join(settings.UPLOAD_PATH,"filer_public_thumbnails"))) _m.mount('filer_private_thumbnails', OSFS(os.path.join(settings.UPLOAD_PATH,"filer_private_thumbnails"))) except: pass if sys.argv and sys.argv[0].endswith('pytigon'): if platform_name() == 'Windows': _m.mount('osfs', OSFS("c:\\")) else: _m.mount('osfs', OSFS("/")) return _m
def test_mountfile(self): """Test mounting a file""" quote = b"""If you wish to make an apple pie from scratch, you must first invent the universe.""" mem_fs = MemoryFS() mem_fs.makedir('foo') mem_fs.setcontents('foo/bar.txt', quote) foo_dir = mem_fs.opendir('foo') mount_fs = MountFS() mount_fs.mountfile('bar.txt', foo_dir.open, foo_dir.getinfo) self.assert_(mount_fs.isdir('/')) self.assert_(mount_fs.isdir('./')) self.assert_(mount_fs.isdir('')) # Check we can see the mounted file in the dir list self.assertEqual(mount_fs.listdir(), ["bar.txt"]) self.assert_(not mount_fs.exists('nobodyhere.txt')) self.assert_(mount_fs.exists('bar.txt')) self.assert_(mount_fs.isfile('bar.txt')) self.assert_(not mount_fs.isdir('bar.txt')) # Check open and getinfo callables self.assertEqual(mount_fs.getcontents('bar.txt'), quote) self.assertEqual(mount_fs.getsize('bar.txt'), len(quote)) # Check changes are written back mem_fs.setcontents('foo/bar.txt', 'baz') self.assertEqual(mount_fs.getcontents('bar.txt'), b'baz') self.assertEqual(mount_fs.getsize('bar.txt'), len('baz')) # Check changes are written to the original fs self.assertEqual(mem_fs.getcontents('foo/bar.txt'), b'baz') self.assertEqual(mem_fs.getsize('foo/bar.txt'), len('baz')) # Check unmount self.assert_(mount_fs.unmount("bar.txt")) self.assertEqual(mount_fs.listdir(), []) self.assert_(not mount_fs.exists('bar.txt')) # Check unount a second time is a null op, and returns False self.assertFalse(mount_fs.unmount("bar.txt"))
def test_listdir(self): mount_fs = MountFS() self.assertEqual(mount_fs.listdir('/'), []) m1 = MemoryFS() m3 = MemoryFS() m4 = TempFS() mount_fs.mount('/m1', m1) mount_fs.mount('/m2', 'temp://') mount_fs.mount('/m3', m3) with self.assertRaises(MountError): mount_fs.mount('/m3/foo', m4) self.assertEqual( sorted(mount_fs.listdir('/')), ['m1', 'm2', 'm3'] ) m3.makedir('foo') self.assertEqual( sorted(mount_fs.listdir('/m3')), ['foo'] )
'id/'||id AS path, length(testscript) AS size FROM tasks_task) UNION (SELECT id AS fid, 'task/'||url AS path, length(testscript) AS size FROM tasks_task); """, readcmd="SELECT testscript FROM tasks_task WHERE id=%s", writecmd="UPDATE tasks_task SET testscript=%s WHERE id=%s", ) # create putkafs -- a virtual filesystem with filesystems above mounted onto its folders putkafs = MountFS() putkafs.mountdir('uploads', fs_uploads) putkafs.mountdir('att', fs_attachments) putkafs.mountdir('testscript', fs_testscripts) if opts.unmount: print 'Unmounting putkafs from %s' % opts.mountpoint ret = os.system('fusermount -u %s' % opts.mountpoint) sys.exit(ret) else: # redirect stdout, stderr to a temporary file if opts.bg: sys.stdout.flush() sys.stderr.flush() _out, _err = os.dup(sys.stdout.fileno()), os.dup(sys.stderr.fileno())
def make_fs(self): fs = MountFS() mem_fs = MemoryFS() fs.mount("/", mem_fs) return fs
def make_fs(self): fs = MountFS() mem_fs = MemoryFS() fs.mount("/foo", mem_fs) return fs.opendir("foo")
def make_fs(self): fs = MountFS() mem_fs = MemoryFS() fs.mount('/foo', mem_fs) return fs.opendir('foo')
class Core: plugins = None def __init__(self, datapath='data', log=None, level=None): if not log: log = 'stream' if not level: level = 1 if log == 'stream': logginghandler = Logger() logginghandler.create_stream_logger(level=level) #Defining Pathes workingpath = os.path.dirname(os.path.realpath(__file__)) self.workingpath = workingpath logger.info('Startup Core') logger.info('Workingpath: %s' % workingpath) ### Config if datapath: self.config = Config(datapath, 'MediaProxy') else: self.config = Config(workingpath, 'MediaProxy') if not self.config.mode: logger.info('Fresh Start, No Config File') else: logger.info('Config in state: %s' % self.config.mode) ### Import dependencies requires extlibs from .plugin_manager import Plugins from .database import Database from fs.mountfs import MountFS ### Plugins if self.config.mode: self.plugins = Plugins( self.config.cfg, loadpathes=[os.path.join(datapath, 'plugins')]) self.filesystem = MountFS() ###Database self.database = Database(self.config) self.database.load() self.channels_populate() def channel_get(self, ctype=None): if not ctype: return self.filesystem else: from fs.mountfs import MountFS channel = MountFS() for path, fs in self.filesystem.mounts: if fs.exists(u'/'): if 'mediaproxy.media' in fs.getinfo(u'/').raw: logger.info( 'Check Channel: %s - %s' % (path, fs.getinfo(u'/').raw['mediaproxy.media']['type'])) if fs.getinfo( u'/').raw['mediaproxy.media']['type'] == ctype: channel.mount(path, fs) return channel def channels(self): return self.filesystem.listdir(u'/') def load_meta(self, path, filesystem=None): logger.info('path: %s' % path) if filesystem: meta = filesystem.getinfo(path, 'media') else: meta = self.filesystem.getinfo(path, 'media') #~ print self.filesystem.getinfo(path,'media').raw filemeta = {} filemeta['path'] = path if meta.get('media', 'files'): filemeta['files'] = meta.get('media', 'files') else: filemeta['files'] = [path] filemeta['id'] = meta.get('media', 'id') filemeta['title'] = meta.get('media', 'title') filemeta['sorttitle'] = meta.get('media', 'sorttitle') filemeta['originaltitle'] = meta.get('media', 'originaltitle') filemeta['year'] = meta.get('media', 'year') filemeta['rating'] = meta.get('media', 'rating') filemeta['outline'] = meta.get('media', 'outline') filemeta['plot'] = meta.get('media', 'plot') filemeta['runtime'] = meta.get('media', 'runtime') filemeta['genre'] = meta.get('media', 'genre') filemeta['set'] = meta.get('media', 'set') if meta.get('media', 'thumb'): filemeta['thumb'] = base64.b64encode(meta.get('media', 'thumb')) else: filemeta['thumb'] = None if meta.get('media', 'plakat'): filemeta['plakat'] = base64.b64encode(meta.get('media', 'plakat')) else: filemeta['plakat'] = None filemeta['size'] = meta.get('media', 'size') if meta.get('meta', 'extension') and meta.get('media', 'mime'): filemeta['mime'] = meta.get('media', 'mime') filemeta['extension'] = meta.get('media', 'extension') elif meta.get('media', 'extension'): filemeta['extension'] = meta.get('media', 'extension') filemeta['mime'] = mime.guess_type(filemeta['extension']) elif meta.get('media', 'mime'): filemeta['mime'] = meta.get('media', 'mime') filemeta['extension'] = mime.guess_extension(filemeta['mime']) else: filemeta['mime'] = None filemeta['extension'] = None return filemeta def channels_populate(self): if not self.database.db: return channellist = list(self.database.db.select('channels')) for c in channellist: name = c.NAME cid = c.CID categorie, pluginname = c.ID.split('.') cplugin = self.plugins.getPluginByName(pluginname, categorie) if c.TYPE: categorie, pluginname = c.TYPE.split('.') splugin = self.plugins.getPluginByName(pluginname, categorie) try: fs = cplugin.plugin_object.load(c.PATH, c.LOGIN) if c.TYPE: gfs = splugin.plugin_object.load(fs) #~ # ~ gfs.setinfo(u'',{'cid':cid}) # ~ gfs.setinfo(u'',{'status':'online'}) else: gfs = fs self.filesystem.mount('/' + name, gfs) except: self.filesystem.makedir(u'/%s' % name) pass #~ def setup_check(self): logger.info('Installed, %s' % self.config.mode) if not self.config.mode: return False if not self.config.get('database', 'dbtype'): return False return True def setup(self, mode): if self.setup_check(): raise IOError('Already installed') if mode == 'portable': self.config.set('database', 'dbtype', 'sqlite') self.config.save('portable') self.plugins = Plugins(self.config.cfg) self.database.load() def plugins_list(self, categorie=None, only_active=False): return self.plugins.list_plugins(categorie=categorie, only_active=only_active) def plugin_activate(self, id): self.plugins.activatePluginByID(id) def plugin_deactivate(self, id): self.plugins.deactivatePluginByID(id) def channel_add(self, name, pluginid, scraperid, path, login): # ~ assert type(name) == unicode # ~ assert type(path) == unicode categorie, pluginname = pluginid.split('.') cplugin = self.plugins.getPluginByName(pluginname, categorie) if scraperid: categorie, pluginname = scraperid.split('.') splugin = self.plugins.getPluginByName(pluginname, categorie) cid = str(uuid.uuid4()) fs = cplugin.plugin_object.load(path, login) if scraperid: gfs = splugin.plugin_object.load(fs) else: gfs = fs # ~ fs.setinfo(u'',{'cid':cid}) if not gfs.isdir(u'/'): raise IOError('Filesystem not accessable') try: self.filesystem.mount(name, gfs) except: fs.close() raise IOError('Filesystem not includable') with self.database.db.transaction(): self.database.db.insert('channels', CID=cid, NAME=name, TYPE=scraperid, ID=pluginid, PATH=path, LOGIN=login, ACTIVE=1) def channel_info(self, path): return self.filesystem.getinfo(path).raw['mediaproxy']