def parseLockRequest(stream): """ @return a twisted.web2.dav.element.WebDAVElement corresponding to the root element of the request body. Raises an error if the root element is not a lockinfo element, or if the request body (the stream) is empty. The latter is not quite correct, since according to RFC 2518, Section 7.8, a client may submit a LOCK request with an empty body (and an appropriate If: header) in order to refresh a lock, but it should be good enough for now. """ # obtain a DOM representation of the xml on the stream document = waitForDeferred(davXMLFromStream(stream)) yield document document = document.getResult() if document is None: # No request body makes no sense. # this is actually not quite correct, error = "Empty LOCK request." log.error(error) raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, error)) if not isinstance(document.root_element, LockInfo): error = "LOCK request must have lockinfo element as root element." raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, error)) yield document.root_element
def getOpaqueLockToken(request): """ @return the opaque lock token on the If:-header, if it exists, None otherwise. TODO: We currently assume it looks like this: If: (<opaquelocktoken:UUID>) which is certainly overly simplistic. Should work for now, though. THIS MUST BE CLEANED UP!!! See: http://www.webdav.org/specs/rfc2518.html#HEADER_If and twisted.web2.http_headers, parser_dav_headers, generator_dav_headers for a proper implementation of the If: header """ if not request.headers.hasHeader("If:"): return None ifh = request.headers.getRawHeaders("If:")[0] # ugly hack: the string representation of a UUID has 8 + 3 * 4 + 12 + 4 == 36 characters, # the string "opaquelocktoken" has 15 characters, plus one ":" and four padding # characters this yields a string of 56 characters: if not len(ifh) != 56: error = "invalid opaque lock token" HTTPError(StatusResponse(responsecode.BAD_REQUEST, error)) # remove the padding characters: oplt = ifh[2:-2] if not oplt.find("opaquelocktoken") == 0: error = "invalid opaque lock token" HTTPError(StatusResponse(responsecode.BAD_REQUEST, error)) return oplt
def __checkSpot(self): log.debug("calling __checkSpot") if os.path.exists(self.fp.path): log.warn("Attempt to create collection where resource exists: %s" % (self.fp.path, )) raise HTTPError(responsecode.NOT_ALLOWED) if not self.parent().isCollection(): log.error( "Attempt to create collection with non-collection parent: %s" % (self.parent().fp.path, )) raise HTTPError( StatusResponse(responsecode.CONFLICT, "Parent resource is not a collection.")) if not self.parent() or not self.parent().isCollection(): log.error( "Attempt to create collection with no parent directory: %s" % (self.fp.path, )) raise HTTPError( StatusResponse( responsecode.INTERNAL_SERVER_ERROR, "The requested resource is not backed by a parent directory." )) if not self.parent().isWritableFile(): errorMessage = "You don't have sufficient privileges to create a collection in this location." log.error(errorMessage) raise HTTPError( StatusResponse(responsecode.UNAUTHORIZED, errorMessage)) log.debug("done __checkSpot")
def _prepareForCopy(destination, destination_uri, request, depth): # # Destination must be a DAV resource # try: destination = IDAVResource(destination) except TypeError: log.err("Attempt to %s to a non-DAV resource: (%s) %s" % (request.method, destination.__class__, destination_uri)) raise HTTPError( StatusResponse( responsecode.FORBIDDEN, "Destination %s is not a WebDAV resource." % (destination_uri, ))) # # FIXME: Right now we don't know how to copy to a non-DAVFile resource. # We may need some more API in IDAVResource. # So far, we need: .exists(), .fp.parent() # if not isinstance(destination, twisted.web2.dav.static.DAVFile): log.err("DAV copy between non-DAVFile DAV resources isn't implemented") raise HTTPError( StatusResponse( responsecode.NOT_IMPLEMENTED, "Destination %s is not a DAVFile resource." % (destination_uri, ))) # # Check for existing destination resource # overwrite = request.headers.getHeader("overwrite", True) if destination.exists() and not overwrite: log.err( "Attempt to %s onto existing file without overwrite flag enabled: %s" % (request.method, destination.fp.path)) raise HTTPError( StatusResponse( responsecode.PRECONDITION_FAILED, "Destination %s already exists." % (destination_uri, ))) # # Make sure destination's parent exists # if not destination.fp.parent().isdir(): log.err("Attempt to %s to a resource with no parent: %s" % (request.method, destination.fp.path)) raise HTTPError( StatusResponse(responsecode.CONFLICT, "No parent collection.")) return destination, destination_uri, depth
def defer(): if property.protected: raise HTTPError( StatusResponse( responsecode.FORBIDDEN, "Protected property %s may not be set." % (property.sname(), ))) if property.namespace == twisted_private_namespace: raise HTTPError( StatusResponse( responsecode.FORBIDDEN, "Properties in the %s namespace are private to the server." % (property.sname(), ))) return self.deadProperties().set(property)
def __http_copy(self, request): """Assert that the destination is not locked.""" destination_uri = request.headers.getHeader("destination") # assert that the destination exists if not destination_uri: msg = "No destination header in %s request." % ( request.method, ) log.error(msg) raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, msg)) # assert that the destination is not locked dest = waitForDeferred(request.locateResource(destination_uri)) yield dest dest = dest.getResult() ignore = waitForDeferred( deferredGenerator(dest.assertNotLocked)(request)) yield ignore ignore = ignore.getResult() dd = waitForDeferred(super(Lockable, self).http_COPY(request)) yield dd yield dd.getResult()
def doMove(r): destination, destination_uri, depth = r # # RFC 2518, section 8.9 says that we must act as if the Depth header is set # to infinity, and that the client must omit the Depth header or set it to # infinity. # # This seems somewhat at odds with the notion that a bad request should be # rejected outright; if the client sends a bad depth header, the client is # broken, and section 8 suggests that a bad request should be rejected... # # Let's play it safe for now and ignore broken clients. # if self.fp.isdir() and depth != "infinity": msg = "Client sent illegal depth header value for MOVE: %s" % ( depth, ) log.err(msg) raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, msg)) # May need to add a location header addLocation(request, destination_uri) return move(self.fp, request.uri, destination.fp, destination_uri, depth)
def do(action, property): """ Perform action(property, request) while maintaining an undo queue. """ has = waitForDeferred(self.hasProperty(property, request)) yield has has = has.getResult() if has: oldProperty = waitForDeferred( self.readProperty(property, request)) yield oldProperty oldProperty.getResult() def undo(): return self.writeProperty(oldProperty, request) else: def undo(): return self.removeProperty(property, request) try: x = waitForDeferred(action(property, request)) yield x x.getResult() except ValueError, e: # Convert ValueError exception into HTTPError responses.add( Failure(exc_value=HTTPError( StatusResponse(responsecode.FORBIDDEN, str(e)))), property) yield False return
def forbidden(method): """ This method results in a FORBIDDEN response. """ error = "Denying " + method + " request." log.warn(error) raise HTTPError(StatusResponse(responsecode.FORBIDDEN, error))
def assertExistence(self): """ Raise and log an appropriate error if the resource does not exist on the file system. """ if not os.path.exists(self.resource.fp.path): error = "Resource %s not found on file system." % self.resource.fp.path log.warn(error) raise HTTPError(StatusResponse(responsecode.NOT_FOUND, error))
def __lockPreconditions(self, request): if not self.exists(): error = "File not found in LOCK request: %s" % (self.fp.path, ) raise HTTPError(StatusResponse(responsecode.NOT_FOUND, error)) if not self.isWritableFile(): error = "No write permission for file." raise HTTPError(StatusResponse(responsecode.UNAUTHORIZED, error)) ignore = waitForDeferred( deferredGenerator(self.assertNotLocked)(request)) yield ignore ignore = ignore.getResult() # for some reason, the result of preconditions_LOCK is handed as an argument to http_LOCK # (i guess so that the request can be modified during the preconditions call). anyway, # we must yield the request at the end. yield request
def get(self, qname): try: value = self.attrs[self._encode(qname)] except KeyError: raise HTTPError( StatusResponse(responsecode.NOT_FOUND, "No such property: {%s}%s" % qname)) doc = davxml.WebDAVDocument.fromString(value) return doc.root_element
def readRequestBody(request): """ Read XML body from request stream. """ try: doc = waitForDeferred(davXMLFromStream(request.stream)) yield doc doc = doc.getResult() except ValueError, e: log.error("Error while reading PROPPATCH body", exc_info=e) raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, str(e)))
def preconditions_PUT(self, request): if self.fp.exists(): if not self.fp.isfile(): log.err("Unable to PUT to non-file: %s" % (self.fp.path, )) raise HTTPError( StatusResponse( responsecode.FORBIDDEN, "The requested resource exists but is not backed by a regular file." )) resource_is_new = False else: if not self.fp.parent().isdir(): log.err("No such directory: %s" % (self.fp.path, )) raise HTTPError( StatusResponse(responsecode.CONFLICT, "Parent collection resource does not exist.")) resource_is_new = True # # HTTP/1.1 (RFC 2068, section 9.6) requires that we respond with a Not # Implemented error if we get a Content-* header which we don't # recognize and handle properly. # for header, value in request.headers.getAllRawHeaders(): if header.startswith("Content-") and header not in ( #"Content-Base", # Doesn't make sense in PUT? #"Content-Encoding", # Requires that we decode it? "Content-Language", "Content-Length", #"Content-Location", # Doesn't make sense in PUT? "Content-MD5", #"Content-Range", # FIXME: Need to implement this "Content-Type", ): log.err( "Client sent unrecognized content header in PUT request: %s" % (header, )) raise HTTPError( StatusResponse( responsecode.NOT_IMPLEMENTED, "Unrecognized content header %r in request." % (header, )))
def assertWriteLock(lockInfo): """ RFC 2518, Section 7: The write lock is a specific instance of a lock type, and is the only lock type described in this specification. I suppose this means that we can require that the LOCK request requests a write lock. """ error = "Only write locks supported so far." if lockInfo.childOfType(davxml.LockType).childOfType(davxml.Write) is None: raise HTTPError(StatusResponse(responsecode.NOT_IMPLEMENTED, error))
def assertZeroLockDepth(depth): """ We currently only support lock depths of "0" (i.e. no recursive locking of collection contents). This is NOT in agreement with RFC 2518, Section 8.10.4: All resources that support the LOCK method MUST support the Depth header. Whatever. """ if depth != "0": error = "LOCK operations with depth 'infinity' are not yet supported." raise HTTPError(StatusResponse(responsecode.NOT_IMPLEMENTED, error))
def assertNotLocked(self, request): il = waitForDeferred(deferredGenerator(self.isLocked)(request)) yield il il = il.getResult() if il is True: error = "Resource is locked and you don't have the proper token handy." log.error(error) raise HTTPError(StatusResponse(responsecode.LOCKED, error)) # we must forward the request to possible callbacks yield request
def defer(): if type(property) is tuple: qname = property sname = "{%s}%s" % property else: qname = property.qname() sname = property.sname() if qname in self.liveProperties: raise HTTPError( StatusResponse( responsecode.FORBIDDEN, "Live property %s cannot be deleted." % (sname, ))) if qname[0] == twisted_private_namespace: raise HTTPError( StatusResponse( responsecode.FORBIDDEN, "Properties in the %s namespace are private to the server." % (sname, ))) return self.deadProperties().delete(qname)
def http_MKCOL(self, request): """ Respond to a MKCOL request. (RFC 2518, section 8.3) """ parent = waitForDeferred(request.locateResource(parentForURL(request.uri))) yield parent parent = parent.getResult() if self.fp.exists(): log.err("Attempt to create collection where file exists: %s" % (self.fp.path, )) raise HTTPError(responsecode.NOT_ALLOWED) if not parent.isCollection(): log.err("Attempt to create collection with non-collection parent: %s" % (self.fp.path, )) raise HTTPError( StatusResponse(responsecode.CONFLICT, "Parent resource is not a collection.")) if not self.fp.parent().isdir(): log.err("Attempt to create collection with no parent directory: %s" % (self.fp.path, )) raise HTTPError( StatusResponse( responsecode.INTERNAL_SERVER_ERROR, "The requested resource is not backed by a parent directory.")) # # Read request body # x = waitForDeferred(noDataFromStream(request.stream)) yield x try: x.getResult() except ValueError, e: log.err("Error while handling MKCOL body: %s" % (e, )) raise HTTPError(responsecode.UNSUPPORTED_MEDIA_TYPE)
def prepareForCopy(self, request): # # Get the depth # depth = request.headers.getHeader("depth", "infinity") if depth not in ("0", "infinity"): msg = ("Client sent illegal depth header value: %s" % (depth, )) log.err(msg) raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, msg)) # # Verify this resource exists # if not self.exists(): log.err("File not found: %s" % (self.fp.path, )) raise HTTPError( StatusResponse(responsecode.NOT_FOUND, "Source resource %s not found." % (request.uri, ))) # # Get the destination # destination_uri = request.headers.getHeader("destination") if not destination_uri: msg = "No destination header in %s request." % (request.method, ) log.err(msg) raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, msg)) d = request.locateResource(destination_uri) d.addCallback(_prepareForCopy, destination_uri, request, depth) return d
def __proppatch(self, request): # read the body doc = waitForDeferred(deferredGenerator(readRequestBody)(request)) yield doc doc = doc.getResult() # perform basic validation, and extract the clone field to be updated try: cloneField = validateBodyXML(doc) except AssertionError, e: error = "PROPPATCH request body does not validate. Error: " + str( e) log.info(error) raise HTTPError(StatusResponse(responsecode.FORBIDDEN, error))
def getDepth(headers): """ RFC 2518, Section 8.10.4: The Depth header may be used with the LOCK method. Values other than 0 or infinity MUST NOT be used with the Depth header on a LOCK method. All resources that support the LOCK method MUST support the Depth header. If no Depth header is submitted on a LOCK request then the request MUST act as if a "Depth:infinity" had been submitted. """ depth = headers.getHeader("depth", "infinity") if depth not in ("0", "infinity"): error = "Values other than 0 or infinity MUST NOT be used with the Depth header on a LOCK method." raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, error)) return depth
def http_PROPPATCH(self, request): """ Respond to a PROPPATCH request. (RFC 2518, section 8.2) """ if not self.fp.exists(): log.err("File not found: %s" % (self.fp.path, )) raise HTTPError(responsecode.NOT_FOUND) # # Read request body # try: doc = waitForDeferred(davXMLFromStream(request.stream)) yield doc doc = doc.getResult() except ValueError, e: log.err("Error while handling PROPPATCH body: %s" % (e, )) raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, str(e)))
def checkDepthHeader(request): """ RFC 2518, section 8.6 says that we must act as if the Depth header is set to infinity, and that the client must omit the Depth header or set it to infinity, meaning that for collections, we will delete all members. This seems somewhat at odds with the notion that a bad request should be rejected outright; if the client sends a bad depth header, the client is broken, and RFC 2518, section 8 suggests that a bad request should be rejected... Let's play it safe for now and ignore broken clients. """ depth = request.headers.getHeader("depth", "infinity") if depth != "infinity": msg = ("Client sent illegal depth header value for DELETE: %s" % (depth,)) log.debug(msg) raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, msg))
def defaultHandler(property, store): """ Add an individual property. @param property: the property to be stored @param store: where to store the property @return: the response for this property """ try: if store.contains(property.qname()) and (store.get(property.qname()) == property): pass else: store.set(property) return responsecode.OK except ValueError, e: return Failure(exc_value=HTTPError( StatusResponse(responsecode.BAD_REQUEST, str(e))))
def assertExclusiveLock(lockInfo): """ RFC 2518, Section 15.2: A class 2 compliant resource MUST meet all class 1 requirements and support the LOCK method, the supportedlock property, the lockdiscovery property, the Time-Out response header and the Lock-Token request header. A class "2" compliant resource SHOULD also support the Time-Out request header and the owner XML element. RFC 2518, Section 6.1: If the server does support locking it may choose to support any combination of exclusive and shared locks for any access types. In other words, it seems sufficient to only support exclusive locks in order to be class 2 compliant, which is convenient. """ error = "Only exclusive locks supported so far." if lockInfo.childOfType(davxml.LockScope).childOfType( davxml.Exclusive) is None: raise HTTPError(StatusResponse(responsecode.NOT_IMPLEMENTED, error))
def _put(self, stream): if not self.isWritableFile(): message = "http_PUT: not authorized to put file: " + self.fp.path log.error(message) raise HTTPError(StatusResponse(responsecode.UNAUTHORIZED, message)) response = waitForDeferred(deferredGenerator(self.__putDelete)()) yield response response = response.getResult() xx = waitForDeferred(deferredGenerator(self.__putFile)(stream)) yield xx xx = xx.getResult() self._registerWithParent() xx = waitForDeferred(deferredGenerator(self._updateMetadata)()) yield xx yield response
def cloneHandler(property, store, request, lresource): """ The host from which the request originates must have access to a local clone, store if we want. """ from angel_app.resource.remote.clone import clonesFromElement, clonesToElement if store.contains(elements.Clones.qname()): residentClones = clonesFromElement(store.get(elements.Clones.qname())) else: residentClones = [] #if len(residentClones) >= maxclones: # failWith("Too many clones. Not adding.") newClones = clonesFromElement(property) if len(newClones) == 0: failWith("Received malformed clone property:" + ` property ` + ".") newClone = newClones[0] newClone = pingBack(newClone, request, lresource.publicKeyString(), lresource.resourceID(), lresource) if not newClone: error = "Can't connect to you. I will ignore you." response = StatusResponse(responsecode.BAD_REQUEST, error) return Failure(exc_value=HTTPError(response)) # check if this clone is already registered _after_ doing the potential # IP resolution if newClone in residentClones: log.info("clone %r already registered.", newClone) # nothing needs to be done, pretend everything is fine return responsecode.OK newClones = [newClone] + residentClones if len(newClones) > maxclones: newClones = newClones[:maxclones] return defaultHandler(clonesToElement(newClones), store)
# # Read request body # try: doc = waitForDeferred(davXMLFromStream(request.stream)) yield doc doc = doc.getResult() except ValueError, e: log.err("Error while handling PROPPATCH body: %s" % (e, )) raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, str(e))) if doc is None: error = "Request XML body is required." log.err(error) raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, error)) # # Parse request # update = doc.root_element if not isinstance(update, davxml.PropertyUpdate): error = ("Request XML body must be a propertyupdate element." % (davxml.PropertyUpdate.sname(), )) log.err(error) raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, error)) responses = PropertyStatusResponseQueue("PROPPATCH", request.uri, responsecode.NO_CONTENT) undoActions = [] gotError = False
def delete(uri, filepath, depth="infinity"): """ Perform a X{DELETE} operation on the given URI, which is backed by the given filepath. @param filepath: the L{FilePath} to delete. @param depth: the recursion X{Depth} for the X{DELETE} operation, which must be "infinity". @raise HTTPError: (containing a response with a status code of L{responsecode.BAD_REQUEST}) if C{depth} is not "infinity". @raise HTTPError: (containing an appropriate response) if the delete operation fails. If C{filepath} is a directory, the response will be a L{MultiStatusResponse}. @return: a deferred response with a status code of L{responsecode.NO_CONTENT} if the X{DELETE} operation succeeds. """ # # Remove the file(s) # # FIXME: defer if filepath.isdir(): # # RFC 2518, section 8.6 says that we must act as if the Depth header is # set to infinity, and that the client must omit the Depth header or set # it to infinity, meaning that for collections, we will delete all # members. # # This seems somewhat at odds with the notion that a bad request should # be rejected outright; if the client sends a bad depth header, the # client is broken, and RFC 2518, section 8 suggests that a bad request # should be rejected... # # Let's play it safe for now and ignore broken clients. # if depth != "infinity": msg = ("Client sent illegal depth header value for DELETE: %s" % (depth, )) log.err(msg) raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, msg)) # # Recursive delete # # RFC 2518, section 8.6 says that if we get an error deleting a resource # other than the collection in the request-URI, that we must respond # with a multi-status response containing error statuses for each # resource that we fail to delete. It also says we should not return # no-content (success) status, which means that we should continue after # errors, rather than aborting right away. This is interesting in that # it's different from how most operating system tools act (eg. rm) when # recursive filsystem deletes fail. # uri_path = urllib.unquote(urlsplit(uri)[2]) if uri_path[-1] == "/": uri_path = uri_path[:-1] log.msg("Deleting directory %s" % (filepath.path, )) # NOTE: len(uri_path) is wrong if os.sep is not one byte long... meh. request_basename = filepath.path[:-len(uri_path)] request_basename_len = len(request_basename) errors = ResponseQueue(request_basename, "DELETE", responsecode.NO_CONTENT) # FIXME: defer this for dir, subdirs, files in os.walk(filepath.path, topdown=False): for filename in files: path = os.path.join(dir, filename) try: os.remove(path) except: errors.add(path, Failure()) for subdir in subdirs: path = os.path.join(dir, subdir) if os.path.islink(path): try: os.remove(path) except: errors.add(path, Failure()) else: try: os.rmdir(path) except: errors.add(path, Failure()) try: os.rmdir(filepath.path) except: raise HTTPError( statusForFailure(Failure(), "deleting directory: %s" % (filepath.path, ))) response = errors.response() else: # # Delete a file; much simpler, eh? # log.msg("Deleting file %s" % (filepath.path, )) try: os.remove(filepath.path) except: raise HTTPError( statusForFailure(Failure(), "deleting file: %s" % (filepath.path, ))) response = responsecode.NO_CONTENT # Restat filepath since we deleted the backing file filepath.restat(False) return succeed(response)