def process_view(self, request, view_func, view_args, view_kwargs): """ Sets request.fs and request.jt on every request to point to the configured filesystem. """ has_hadoop = apputil.has_hadoop() fs_ref = request.GET.get('fs', request.POST.get('fs', view_kwargs.get('fs'))) if "fs" in view_kwargs: del view_kwargs["fs"] if fs_ref is None: request.fs_ref, request.fs = fsmanager.get_default_hdfs() else: try: request.fs = fsmanager.get_filesystem(fs_ref) request.fs_ref = fs_ref except KeyError: raise KeyError('Cannot find filesystem called "%s"' % (fs_ref, )) if request.user.is_authenticated() and request.fs is not None: request.fs.setuser(request.user.username) if request.user.is_authenticated() and has_hadoop: request.jt = cluster.get_default_mrcluster() if request.jt is not None: request.jt.setuser(request.user.username) else: request.jt = None
def process_view(self, request, view_func, view_args, view_kwargs): """ Sets request.fs and request.jt on every request to point to the configured filesystem. """ has_hadoop = apputil.has_hadoop() fs_ref = request.GET.get('fs', request.POST.get('fs', view_kwargs.get('fs'))) if "fs" in view_kwargs: del view_kwargs["fs"] if fs_ref is None: request.fs_ref, request.fs = fsmanager.get_default_hdfs() else: try: request.fs = fsmanager.get_filesystem(fs_ref) request.fs_ref = fs_ref except KeyError: raise KeyError('Cannot find filesystem called "%s"' % (fs_ref,)) if request.user.is_authenticated() and request.fs is not None: request.fs.setuser(request.user.username) if request.user.is_authenticated() and has_hadoop: request.jt = cluster.get_default_mrcluster() if request.jt is not None: request.jt.setuser(request.user.username) else: request.jt = None
def process_view(self, request, view_func, view_args, view_kwargs): """ Sets request.fs and request.jt on every request to point to the configured filesystem. """ has_hadoop = apputil.has_hadoop() fs_ref = request.GET.get("fs", request.POST.get("fs", view_kwargs.get("fs"))) if "fs" in view_kwargs: del view_kwargs["fs"] if not fs_ref: fs_ref = "default" try: request.fs = fsmanager.get_filesystem(fs_ref) request.fs_ref = fs_ref except KeyError: if fs_ref == "default" and not has_hadoop: request.fs = None else: raise if request.user.is_authenticated() and request.fs is not None: request.fs.setuser(request.user.username, request.user.get_groups()) if request.user.is_authenticated() and has_hadoop: request.jt = cluster.get_mrcluster() if request.jt is not None: request.jt.setuser(request.user.username, request.user.get_groups()) else: request.jt = None
def _init_filesystems(): """Initialize the module-scoped filesystem dictionary.""" global _filesystems if _filesystems is not None: return _filesystems = {} if has_hadoop(): # Load HDFSes _filesystems.update(get_all_hdfs()) # Load local for identifier in conf.LOCAL_FILESYSTEMS.keys(): local_fs = LocalSubFileSystem( conf.LOCAL_FILESYSTEMS[identifier].PATH.get()) if identifier in _filesystems: raise Exception(("Filesystem '%s' configured twice. First is " + "%s, second is local FS %s") % (identifier, _filesystems[identifier], local_fs)) _filesystems[identifier] = local_fs