def process_view(self, request, view_func, view_args, view_kwargs): """ Sets request.fs and request.jt on every request to point to the configured filesystem. """ has_hadoop = apputil.has_hadoop() fs_ref = request.GET.get('fs', request.POST.get('fs', view_kwargs.get('fs'))) if "fs" in view_kwargs: del view_kwargs["fs"] if fs_ref is None: request.fs_ref, request.fs = fsmanager.get_default_hdfs() else: try: request.fs = fsmanager.get_filesystem(fs_ref) request.fs_ref = fs_ref except KeyError: raise KeyError('Cannot find filesystem called "%s"' % (fs_ref, )) if request.user.is_authenticated() and request.fs is not None: request.fs.setuser(request.user.username) if request.user.is_authenticated() and has_hadoop: request.jt = cluster.get_default_mrcluster() if request.jt is not None: request.jt.setuser(request.user.username) else: request.jt = None
def process_view(self, request, view_func, view_args, view_kwargs): """ Sets request.fs and request.jt on every request to point to the configured filesystem. """ has_hadoop = apputil.has_hadoop() fs_ref = request.GET.get('fs', request.POST.get('fs', view_kwargs.get('fs'))) if "fs" in view_kwargs: del view_kwargs["fs"] if fs_ref is None: request.fs_ref, request.fs = fsmanager.get_default_hdfs() else: try: request.fs = fsmanager.get_filesystem(fs_ref) request.fs_ref = fs_ref except KeyError: raise KeyError('Cannot find filesystem called "%s"' % (fs_ref,)) if request.user.is_authenticated() and request.fs is not None: request.fs.setuser(request.user.username) if request.user.is_authenticated() and has_hadoop: request.jt = cluster.get_default_mrcluster() if request.jt is not None: request.jt.setuser(request.user.username) else: request.jt = None
def __init__(self, request, name): self.name = name self.size = None self._do_cleanup = False try: self._fs = request.fs except AttributeError: _, self._fs = fsmanager.get_default_hdfs() # Don't want to handle this upload if we don't have an HDFS if not self._fs: raise HDFSerror("No HDFS found") # We want to set the user to be the superuser. But any operation # in the fs needs a username, including the retrieval of the superuser. # So we first set it to the DEFAULT_USER to break this chicken-&-egg. self._fs.setuser(hadoop.fs.hadoopfs.DEFAULT_USER) self._fs.setuser(self._fs.superuser) self._path = self._fs.mktemp( subdir='hue-uploads', prefix='tmp.%s' % (request.environ['REMOTE_ADDR'],)) # Make the tmp dir 0777 self._fs.chmod(self._fs.dirname(self._path), 0777) hadoop.fs.hadoopfs.FileUpload.__init__(self, self._fs, self._path) self._do_cleanup = True
def __init__(self, request, name): self.name = name self.size = None self._do_cleanup = False try: self._fs = request.fs except AttributeError: _, self._fs = fsmanager.get_default_hdfs() # Don't want to handle this upload if we don't have an HDFS if not self._fs: raise HDFSerror("No HDFS found") # We want to set the user to be the superuser. But any operation # in the fs needs a username, including the retrieval of the superuser. # So we first set it to the DEFAULT_USER to break this chicken-&-egg. self._fs.setuser(hadoop.fs.hadoopfs.DEFAULT_USER) self._fs.setuser(self._fs.superuser) self._path = self._fs.mktemp(subdir='hue-uploads', prefix='tmp.%s' % (request.environ['REMOTE_ADDR'], )) # Make the tmp dir 0777 self._fs.chmod(self._fs.dirname(self._path), 0777) hadoop.fs.hadoopfs.FileUpload.__init__(self, self._fs, self._path) self._do_cleanup = True
def get_filesys(fs_ref): if fs_ref is None: fs_ref, fs = fsmanager.get_default_hdfs() else: try: fs = fsmanager.get_filesystem(fs_ref) except KeyError: raise KeyError('Cannot find filesystem called "%s"' % (fs_ref, )) return fs