def addRepo(repo_id): try: if all_repos.has_key(repo_id): return repo = seafile_api.get_repo(repo_id) if repo: all_repos[repo_id] = repo except SearpcError, e: util.warn("Failed to get repo %.8s: %s" % (repo_id, e.msg))
def addRepo(repo_id): try: if all_repos.has_key(repo_id): return repo = seafile_api.get_repo(repo_id) if repo and not repo.encrypted: repo.name = repo.name.encode('utf-8') all_repos[repo_id] = repo except SearpcError, e: util.warn("Failed to get repo %.8s: %s" % (repo_id, e.msg))
def davmain(config, args): app = WsgiDAVApp(config) if len(args) > 0: if args[0] == "runfcgi": if len(args) >= 2 and args[1] == "method=threaded": _runFlup(app, config, "flup-fcgi") else: _runFlup(app, config, "flup-fcgi_fork") else: util.warn("Unknown command %s" % args[0]) exit(1) else: _runCherryPy(app, config, "cherrypy-bundled")
def addRepo(repo): if isinstance(repo, basestring): repo_id = repo repo = None else: repo_id = repo.id try: if all_repos.has_key(repo_id): return repo = repo or seafile_api.get_repo(repo_id) if repo and not repo.encrypted: repo.name = repo.name.encode('utf-8') all_repos[repo_id] = repo except SearpcError, e: util.warn("Failed to get repo %.8s: %s" % (repo_id, e.msg))
def _dump(self, msg="", out=None): if out is None: out = sys.stdout print("%s(%s): %s" % (self.__class__.__name__, self.__repr__(), msg), file=out) if not self._loaded: self._lazyOpen() if self._verbose >= 2: return # Already dumped in _lazyOpen try: for k, v in self._dict.items(): print(" ", k, file=out) for k2, v2 in v.items(): try: print(" %s: '%s'" % (k2, v2), file=out) except Exception as e: print(" %s: ERROR %s" % (k2, e), file=out) out.flush() except Exception as e: util.warn("PropertyManager._dump() ERROR: %s" % e)
def handle_error(self, request, client_address): """Handle an error gracefully. May be overridden. The default is to print a traceback and continue. """ ei = sys.exc_info() e = ei[1] # Suppress stack trace when client aborts connection disgracefully: # 10053: Software caused connection abort # 10054: Connection reset by peer if e[0] in (10053, 10054): util.warn("*** Caught socket.error: %s" % e) return # This is what BaseHTTPServer.HTTPServer.handle_error does, but with # added thread ID and using stderr print >>sys.stderr, '-'*40 print >>sys.stderr, '<%s> Exception happened during processing of request from %s' % (threading._get_ident(), client_address) print >>sys.stderr, client_address traceback.print_exc() print >>sys.stderr, '-'*40 print >>sys.stderr, request
def __init__(self, application, config): self._verbose = config.get("verbose", 2) self._application = application self._user_mapping = config.get("user_mapping", {}) self._domaincontroller = config.get( "domaincontroller") or WsgiDAVDomainController(self._user_mapping) self._acceptbasic = config.get("acceptbasic", True) self._acceptdigest = config.get("acceptdigest", True) self._defaultdigest = config.get("defaultdigest", True) self._trusted_auth_header = config.get("trusted_auth_header", None) self._noncedict = dict([]) self._headerparser = re.compile(r"([\w]+)=([^,]*),") # Note: extra parser to handle digest auth requests from certain # clients, that leave commas un-encoded to interfere with the above. self._headerfixparser = re.compile(r'([\w]+)=("[^"]*,[^"]*"),') self._headermethod = re.compile(r"^([\w]+)") wdcName = "NTDomainController" if self._domaincontroller.__class__.__name__ == wdcName: if self._authacceptdigest or self._authdefaultdigest or not self._authacceptbasic: util.warn( "WARNING: %s requires basic authentication.\n\tSet acceptbasic=True, " "acceptdigest=False, defaultdigest=False" % wdcName)
def getAccessibleRepos(username, org_id, is_guest): all_repos = {} def addRepo(repo): if all_repos.get(repo.repo_id): return if not repo.encrypted: all_repos[repo.repo_id] = repo try: owned_repos = get_owned_repos(username, org_id) except SearpcError as e: util.warn("Failed to list owned repos: %s" % e.msg) for orepo in owned_repos: if orepo: # store_id is used by seafobj to access fs object. # repo's store_id is equal to repo_id except virtual_repo. orepo.store_id = orepo.repo_id addRepo(orepo) try: shared_repos = get_share_in_repo_list(username, org_id) except SearpcError as e: util.warn("Failed to list shared repos: %s" % e.msg) for srepo in shared_repos: if srepo: addRepo(srepo) pass try: repos = get_group_repos(username, org_id) except SearpcError as e: util.warn("Failed to get groups for %s" % username) for grepo in repos: if grepo: addRepo(grepo) for prepo in list_inner_pub_repos(username, org_id, is_guest): if prepo: addRepo(prepo) return all_repos.values()
def __init__(self, path, environ, filePath): if not os.path.exists(filePath): util.warn("FileResource(%r) does not exist." % filePath) _VirtualNonCollection.__init__(self, path, environ) self.filePath = filePath
def addRepo(repo_id): try: if all_repos.has_key(repo_id): return repo = seafile_api.get_repo(repo_id) if repo and not repo.encrypted: repo.name = repo.name.encode('utf-8') all_repos[repo_id] = repo except SearpcError, e: util.warn("Failed to get repo %.8s: %s" % (repo_id, e.msg)) try: owned_repos = get_owned_repos(username, org_id) except SearpcError, e: util.warn("Failed to list owned repos: %s" % e.msg) for orepo in owned_repos: addRepo(orepo.id) try: shared_repos = get_share_in_repo_list(username, org_id) except SearpcError, e: util.warn("Failed to list shared repos: %s" % e.msg) for srepo in shared_repos: addRepo(srepo.repo_id) try: groups = get_groups_by_user(username, org_id) repo_ids = get_group_repos(username, org_id, groups)
def __init__(self, path, environ, data, filePath): if not os.path.exists(filePath): util.warn("VirtualResFile(%r) does not exist." % filePath) _VirtualNonCollection.__init__(self, path, environ) self.data = data self.filePath = filePath
all_repos = {} def addRepo(repo_id): try: if all_repos.has_key(repo_id): return repo = seafile_api.get_repo(repo_id) if repo: all_repos[repo_id] = repo except SearpcError, e: util.warn("Failed to get repo %.8s: %s" % (repo_id, e.msg)) try: owned_repos = seafile_api.get_owned_repo_list(username) except SearpcError, e: util.warn("Failed to list owned repos: %s" % e.msg) for orepo in owned_repos: addRepo(orepo.id) try: shared_repos = seafile_api.get_share_in_repo_list(username, -1, -1) except SearpcError, e: util.warn("Failed to list shared repos: %s" % e.msg) for srepo in shared_repos: addRepo(srepo.repo_id) try: joined_groups = seaserv.get_personal_groups_by_user(username) except SearpcError, e:
def __call__(self, environ, start_response): # Intercept start_response sub_app_start_response = util.SubAppStartResponse() try: try: # request_server app may be a generator (for example the GET handler) # So we must iterate - not return self._application(..)! # Otherwise the we could not catch exceptions here. response_started = False app_iter = self._application(environ, sub_app_start_response) for v in app_iter: # Start response (the first time) if not response_started: # Success! start_response(sub_app_start_response.status, sub_app_start_response.response_headers, sub_app_start_response.exc_info) response_started = True yield v # Close out iterator if hasattr(app_iter, "close"): app_iter.close() # Start response (if it hasn't been done yet) if not response_started: # Success! start_response(sub_app_start_response.status, sub_app_start_response.response_headers, sub_app_start_response.exc_info) return except DAVError as e: _logger.debug("re-raising %s" % e) raise except Exception as e: # Caught a non-DAVError if self._catch_all_exceptions: # Catch all exceptions to return as 500 Internal Error traceback.print_exc(10, environ.get( "wsgi.errors") or sys.stderr) raise asDAVError(e) else: util.warn("ErrorPrinter: caught Exception") traceback.print_exc(10, sys.stderr) raise except DAVError as e: _logger.debug("caught %s" % e) status = getHttpStatusString(e) # Dump internal errors to console if e.value == HTTP_INTERNAL_ERROR: print("ErrorPrinter: caught HTTPRequestException(" "HTTP_INTERNAL_ERROR)", file=sys.stdout) traceback.print_exc(10, environ.get( "wsgi.errors") or sys.stdout) print("e.srcexception:\n%s" % e.srcexception, file=sys.stdout) elif e.value in (HTTP_NOT_MODIFIED, HTTP_NO_CONTENT): # util.log("ErrorPrinter: forcing empty error response for %s" # % e.value) # See paste.lint: these code don't have content start_response(status, [("Content-Length", "0"), ("Date", util.getRfc1123Time()), ]) yield b"" return # If exception has pre-/post-condition: return as XML response, # else return as HTML content_type, body = e.getResponsePage() # TODO: provide exc_info=sys.exc_info()? start_response(status, [("Content-Type", content_type), ("Content-Length", str(len(body))), ("Date", util.getRfc1123Time()), ]) yield body return
def _start_response_wrapper(status, response_headers, exc_info=None): # Postprocess response headers headerDict = {} for header, value in response_headers: if header.lower() in headerDict: util.warn("Duplicate header in response: %s" % header) headerDict[header.lower()] = value # Check if we should close the connection after this request. # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.4 forceCloseConnection = False currentContentLength = headerDict.get("content-length") statusCode = int(status.split(" ", 1)[0]) contentLengthRequired = (environ["REQUEST_METHOD"] != "HEAD" and statusCode >= 200 and not statusCode in (204, 304)) # print(environ["REQUEST_METHOD"], statusCode, contentLengthRequired) if contentLengthRequired and currentContentLength in (None, ""): # A typical case: a GET request on a virtual resource, for which # the provider doesn't know the length util.warn( "Missing required Content-Length header in %s-response: closing connection" % statusCode) forceCloseConnection = True elif not type(currentContentLength) is str: util.warn("Invalid Content-Length header in response (%r): closing connection" % headerDict.get("content-length")) forceCloseConnection = True # HOTFIX for Vista and Windows 7 (GC issue 13, issue 23) # It seems that we must read *all* of the request body, otherwise # clients may miss the response. # For example Vista MiniRedir didn't understand a 401 response, # when trying an anonymous PUT of big files. As a consequence, it # doesn't retry with credentials and the file copy fails. # (XP is fine however). util.readAndDiscardInput(environ) # Make sure the socket is not reused, unless we are 100% sure all # current input was consumed if(util.getContentLength(environ) != 0 and not environ.get("wsgidav.all_input_read")): util.warn( "Input stream not completely consumed: closing connection") forceCloseConnection = True if forceCloseConnection and headerDict.get("connection") != "close": util.warn("Adding 'Connection: close' header") response_headers.append(("Connection", "close")) # Log request if self._verbose >= 1: userInfo = environ.get("http_authenticator.username") if not userInfo: userInfo = "(anonymous)" threadInfo = "" if self._verbose >= 1: threadInfo = "<%s> " % threading.currentThread().ident extra = [] if "HTTP_DESTINATION" in environ: extra.append('dest="%s"' % environ.get("HTTP_DESTINATION")) if environ.get("CONTENT_LENGTH", "") != "": extra.append("length=%s" % environ.get("CONTENT_LENGTH")) if "HTTP_DEPTH" in environ: extra.append("depth=%s" % environ.get("HTTP_DEPTH")) if "HTTP_RANGE" in environ: extra.append("range=%s" % environ.get("HTTP_RANGE")) if "HTTP_OVERWRITE" in environ: extra.append("overwrite=%s" % environ.get("HTTP_OVERWRITE")) if self._verbose >= 1 and "HTTP_EXPECT" in environ: extra.append('expect="%s"' % environ.get("HTTP_EXPECT")) if self._verbose >= 2 and "HTTP_CONNECTION" in environ: extra.append('connection="%s"' % environ.get("HTTP_CONNECTION")) if self._verbose >= 2 and "HTTP_USER_AGENT" in environ: extra.append('agent="%s"' % environ.get("HTTP_USER_AGENT")) if self._verbose >= 2 and "HTTP_TRANSFER_ENCODING" in environ: extra.append('transfer-enc=%s' % environ.get("HTTP_TRANSFER_ENCODING")) if self._verbose >= 1: extra.append('elap=%.3fsec' % (time.time() - start_time)) extra = ", ".join(extra) # This is the CherryPy format: # 127.0.0.1 - - [08/Jul/2009:17:25:23] "GET /loginPrompt?redirect=/renderActionList%3Frelation%3Dpersonal%26key%3D%26filter%3DprivateSchedule&reason=0 HTTP/1.1" 200 1944 "http://127.0.0.1:8002/command?id=CMD_Schedule" "Mozilla/5.0 (Windows; U; Windows NT 6.0; de; rv:1.9.1) Gecko/20090624 Firefox/3.5" # print >>sys.stderr, '%s - %s - [%s] "%s" %s -> %s' % ( print('%s - %s - [%s] "%s" %s -> %s' % ( threadInfo + environ.get("REMOTE_ADDR", ""), userInfo, util.getLogTime(), environ.get("REQUEST_METHOD") + " " + safeReEncode(environ.get("PATH_INFO", ""), sys.stdout.encoding or "ASCII"), extra, status, # response_headers.get(""), # response Content-Length # referer ), file=sys.stdout) return start_response(status, response_headers, exc_info)
class PropertyManager(object): """ An in-memory property manager implementation using a dictionary. This is obviously not persistent, but should be enough in some cases. For a persistent implementation, see property_manager.ShelvePropertyManager(). """ def __init__(self): self._dict = None self._loaded = False self._lock = ReadWriteLock() self._verbose = 2 def __repr__(self): return "PropertyManager" def __del__(self): if __debug__ and self._verbose >= 2: self._check() self._close() def _lazyOpen(self): _logger.debug("_lazyOpen()") self._lock.acquireWrite() try: self._dict = {} self._loaded = True finally: self._lock.release() def _sync(self): pass def _close(self): _logger.debug("_close()") self._lock.acquireWrite() try: self._dict = None self._loaded = False finally: self._lock.release() def _check(self, msg=""): try: if not self._loaded: return True # for k in self._dict.keys(): # print "%s" % k # print " -> %s" % self._dict[k] # self._dump() for k, v in self._dict.items(): _ = "%s, %s" % (k, v) # _logger.debug("%s checks ok %s" % (self.__class__.__name__, msg)) return True except Exception: _logger.exception("%s _check: ERROR %s" % (self.__class__.__name__, msg)) # traceback.print_exc() # raise # sys.exit(-1) return False def _dump(self, msg="", out=None): if out is None: out = sys.stdout print >> out, "%s(%s): %s" % (self.__class__.__name__, self.__repr__(), msg) if not self._loaded: self._lazyOpen() if self._verbose >= 2: return # Already dumped in _lazyOpen try: for k, v in self._dict.items(): print >> out, " ", k for k2, v2 in v.items(): try: print >> out, " %s: '%s'" % (k2, v2) except Exception, e: print >> out, " %s: ERROR %s" % (k2, e) out.flush() except Exception, e: util.warn("PropertyManager._dump() ERROR: %s" % e)