def loseConnection(self): home = self._attachment._calendarObject._calendar._home oldSize = self._attachment.size() newSize = self._file.tell() # FIXME: do anything self._file.close() # Check max size for attachment if newSize > config.MaximumAttachmentSize: self._path.remove() return fail(AttachmentSizeTooLarge()) # Check overall user quota allowed = home.quotaAllowedBytes() if allowed is not None and allowed < (home.quotaUsedBytes() + (newSize - oldSize)): self._path.remove() return fail(QuotaExceeded()) self._path.moveTo(self._attachment._path) md5 = hashlib.md5(self._attachment._path.getContent()).hexdigest() props = self._attachment.properties() props[contentTypeKey] = GETContentType( generateContentType(self._contentType)) props[md5key] = TwistedGETContentMD5.fromString(md5) # Adjust quota home.adjustQuotaUsedBytes(newSize - oldSize) props.flush() return succeed(None)
def loseConnection(self): home = self._attachment._calendarObject._calendar._home oldSize = self._attachment.size() newSize = self._file.tell() # FIXME: do anything self._file.close() # Check max size for attachment if newSize > config.MaximumAttachmentSize: self._path.remove() return fail(AttachmentSizeTooLarge()) # Check overall user quota allowed = home.quotaAllowedBytes() if allowed is not None and allowed < (home.quotaUsedBytes() + (newSize - oldSize)): self._path.remove() return fail(QuotaExceeded()) self._path.moveTo(self._attachment._path) md5 = hashlib.md5(self._attachment._path.getContent()).hexdigest() props = self._attachment.properties() props[contentTypeKey] = GETContentType( generateContentType(self._contentType) ) props[md5key] = TwistedGETContentMD5.fromString(md5) # Adjust quota home.adjustQuotaUsedBytes(newSize - oldSize) props.flush() return succeed(None)
def readProperty(self, property, request): if type(property) is tuple: qname = property else: qname = property.qname() namespace, name = qname if namespace == dav_namespace: if name == "resourcetype": result = davxml.ResourceType.empty # @UndefinedVariable return result elif name == "getetag": result = davxml.GETETag( ETag(hashlib.md5(self.vCardText()).hexdigest()).generate()) return result elif name == "getcontenttype": mimeType = MimeType('text', 'vcard', {}) result = davxml.GETContentType(generateContentType(mimeType)) return result elif name == "getcontentlength": result = davxml.GETContentLength.fromString( str(len(self.vCardText()))) return result elif name == "getlastmodified": if self.vCard().hasProperty("REV"): modDatetime = parse_date(self.vCard().propertyValue("REV")) else: modDatetime = datetime.datetime.utcnow() # strip time zone because time zones are unimplemented in davxml.GETLastModified.fromDate d = modDatetime.date() t = modDatetime.time() modDatetimeNoTZ = datetime.datetime(d.year, d.month, d.day, t.hour, t.minute, t.second, t.microsecond, None) result = davxml.GETLastModified.fromDate(modDatetimeNoTZ) return result elif name == "creationdate": if self.vCard().hasProperty( "REV"): # use modification date property if it exists creationDatetime = parse_date( self.vCard().propertyValue("REV")) else: creationDatetime = datetime.datetime.utcnow() result = davxml.CreationDate.fromDate(creationDatetime) return result elif name == "displayname": # AddressBook.app uses N. Use FN or UID instead? result = davxml.DisplayName.fromString( self.vCard().propertyValue("N")) return result elif namespace == twisted_dav_namespace: return super(ABDirectoryQueryResult, self).readProperty(property, request) return self._directoryBackedAddressBook.readProperty(property, request)
def serialize(self): """ Create a dictionary mapping key attributes so this object can be sent over a cross-pod call and reconstituted at the other end. Note that the other end may have a different schema so the attributes may not match exactly and will need to be processed accordingly. """ result = dict([(attr[1:], getattr(self, attr, None)) for attr in self._rowAttributes()]) result["created"] = result["created"].isoformat(" ") result["modified"] = result["modified"].isoformat(" ") result["contentType"] = generateContentType(result["contentType"]) return result
def readProperty(self, property, request): if type(property) is tuple: qname = property else: qname = property.qname() namespace, name = qname if namespace == dav_namespace: if name == "resourcetype": result = davxml.ResourceType.empty #@UndefinedVariable return result elif name == "getetag": result = davxml.GETETag(ETag(hashlib.md5(self.vCardText()).hexdigest()).generate()) return result elif name == "getcontenttype": mimeType = MimeType('text', 'vcard', {}) result = davxml.GETContentType(generateContentType(mimeType)) return result elif name == "getcontentlength": result = davxml.GETContentLength.fromString(str(len(self.vCardText()))) return result elif name == "getlastmodified": if self.vCard().hasProperty("REV"): modDatetime = parse_date(self.vCard().propertyValue("REV")) else: modDatetime = datetime.datetime.utcnow() #strip time zone because time zones are unimplemented in davxml.GETLastModified.fromDate d = modDatetime.date() t = modDatetime.time() modDatetimeNoTZ = datetime.datetime(d.year, d.month, d.day, t.hour, t.minute, t.second, t.microsecond, None) result = davxml.GETLastModified.fromDate(modDatetimeNoTZ) return result elif name == "creationdate": if self.vCard().hasProperty("REV"): # use modification date property if it exists creationDatetime = parse_date(self.vCard().propertyValue("REV")) else: creationDatetime = datetime.datetime.utcnow() result = davxml.CreationDate.fromDate(creationDatetime) return result elif name == "displayname": # AddressBook.app uses N. Use FN or UID instead? result = davxml.DisplayName.fromString(self.vCard().propertyValue("N")) return result elif namespace == twisted_dav_namespace: return super(ABDirectoryQueryResult, self).readProperty(property, request) return self._directoryBackedAddressBook.readProperty(property, request)
def recv_get_attachment_data(self, txn, request, stream): """ Process an getAttachmentData cross-pod request. Request arguments as per L{send_get_attachment_data}. @param request: request arguments @type request: C{dict} """ home, _ignore = yield self._getStoreObjectForRequest(txn, request) attachment = yield home.getAttachmentByID(request["attachmentID"]) if attachment is None: raise InvalidAttachmentOperation("Attachment is missing: {}".format(request["attachmentID"])) attachment.retrieve(stream) returnValue((generateContentType(attachment.contentType()), attachment.name(),))
def setData(self, uid, notificationtype, notificationdata, inserting=False): rname = uid + ".xml" self._parentCollection.retrieveOldIndex().addOrUpdateRecord( NotificationRecord(uid, rname, notificationtype) ) self._notificationdata = notificationdata notificationtext = json.dumps(self._notificationdata) md5 = hashlib.md5(notificationtext).hexdigest() def do(): backup = None if self._path.exists(): backup = hidden(self._path.temporarySibling()) self._path.moveTo(backup) fh = self._path.open("w") try: # FIXME: concurrency problem; if this write is interrupted # halfway through, the underlying file will be corrupt. fh.write(notificationtext) finally: fh.close() def undo(): if backup: backup.moveTo(self._path) else: self._path.remove() return undo self._transaction.addOperation(do, "set notification data %r" % (self.name(),)) # Mark all properties as dirty, so they will be re-added to the # temporary file when the main file is deleted. NOTE: if there were a # temporary file and a rename() as there should be, this should really # happen after the write but before the rename. self.properties().update(self.properties()) props = self.properties() props[PropertyName(*GETContentType.qname())] = GETContentType.fromString(generateContentType(MimeType("text", "xml", params={"charset": "utf-8"}))) props[PropertyName.fromElement(NotificationType)] = NotificationType(json.dumps(notificationtype)) props[PropertyName.fromElement(TwistedGETContentMD5)] = TwistedGETContentMD5.fromString(md5) # FIXME: the property store's flush() method may already have been # added to the transaction, but we need to add it again to make sure it # happens _after_ the new file has been written. we may end up doing # the work multiple times, and external callers to property- # manipulation methods won't work. self._transaction.addOperation(self.properties().flush, "post-update property flush")
def changed(self, contentType, dispositionName, md5, size): """ Dropbox attachments never change their path - ignore dispositionName. """ self._contentType = contentType self._md5 = md5 self._size = size att = self._attachmentSchema self._created, self._modified = map(parseSQLTimestamp, (yield Update( { att.CONTENT_TYPE: generateContentType(self._contentType), att.SIZE: self._size, att.MD5: self._md5, att.MODIFIED: utcNowSQL, }, Where=(att.ATTACHMENT_ID == self._attachmentID), Return=(att.CREATED, att.MODIFIED)).on(self._txn))[0])
def recv_get_attachment_data(self, txn, request, stream): """ Process an getAttachmentData cross-pod request. Request arguments as per L{send_get_attachment_data}. @param request: request arguments @type request: C{dict} """ home, _ignore = yield self._getStoreObjectForRequest(txn, request) attachment = yield home.getAttachmentByID(request["attachmentID"]) if attachment is None: raise InvalidAttachmentOperation( "Attachment is missing: {}".format(request["attachmentID"])) attachment.retrieve(stream) returnValue(( generateContentType(attachment.contentType()), attachment.name(), ))
def changed(self, contentType, dispositionName, md5, size): """ Always update name to current disposition name. """ self._contentType = contentType self._name = dispositionName self._md5 = md5 self._size = size att = self._attachmentSchema self._created, self._modified = map(parseSQLTimestamp, (yield Update( { att.CONTENT_TYPE: generateContentType(self._contentType), att.SIZE: self._size, att.MD5: self._md5, att.MODIFIED: utcNowSQL, att.PATH: self._name, }, Where=(att.ATTACHMENT_ID == self._attachmentID), Return=(att.CREATED, att.MODIFIED)).on(self._txn))[0])
def changed(self, contentType, dispositionName, md5, size): """ Dropbox attachments never change their path - ignore dispositionName. """ self._contentType = contentType self._md5 = md5 self._size = size att = self._attachmentSchema self._created, self._modified = map( parseSQLTimestamp, (yield Update( { att.CONTENT_TYPE : generateContentType(self._contentType), att.SIZE : self._size, att.MD5 : self._md5, att.MODIFIED : utcNowSQL, }, Where=(att.ATTACHMENT_ID == self._attachmentID), Return=(att.CREATED, att.MODIFIED)).on(self._txn))[0] )
def changed(self, contentType, dispositionName, md5, size): """ Always update name to current disposition name. """ self._contentType = contentType self._name = dispositionName self._md5 = md5 self._size = size att = self._attachmentSchema self._created, self._modified = map( parseSQLTimestamp, (yield Update( { att.CONTENT_TYPE : generateContentType(self._contentType), att.SIZE : self._size, att.MD5 : self._md5, att.MODIFIED : utcNowSQL, att.PATH : self._name, }, Where=(att.ATTACHMENT_ID == self._attachmentID), Return=(att.CREATED, att.MODIFIED)).on(self._txn))[0] )
def storeResource( request, source=None, source_uri=None, data=None, destination=None, destination_uri=None, deletesource=False, depth="0" ): """ Function that does common PUT/COPY/MOVE behaviour. @param request: the L{txweb2.server.Request} for the current HTTP request. @param source: the L{DAVFile} for the source resource to copy from, or None if source data is to be read from the request. @param source_uri: the URI for the source resource. @param data: a C{str} to copy data from instead of the request stream. @param destination: the L{DAVFile} for the destination resource to copy into. @param destination_uri: the URI for the destination resource. @param deletesource: True if the source resource is to be deleted on successful completion, False otherwise. @param depth: a C{str} containing the COPY/MOVE Depth header value. @return: status response. """ try: assert request is not None and destination is not None and destination_uri is not None assert (source is None) or (source is not None and source_uri is not None) assert not deletesource or (deletesource and source is not None) except AssertionError: log.error("Invalid arguments to storeResource():") log.error("request=%s\n" % (request,)) log.error("source=%s\n" % (source,)) log.error("source_uri=%s\n" % (source_uri,)) log.error("data=%s\n" % (data,)) log.error("destination=%s\n" % (destination,)) log.error("destination_uri=%s\n" % (destination_uri,)) log.error("deletesource=%s\n" % (deletesource,)) log.error("depth=%s\n" % (depth,)) raise class RollbackState(object): """ This class encapsulates the state needed to rollback the entire PUT/COPY/MOVE transaction, leaving the server state the same as it was before the request was processed. The DoRollback method will actually execute the rollback operations. """ def __init__(self): self.active = True self.source_copy = None self.destination_copy = None self.destination_created = False self.source_deleted = False def Rollback(self): """ Rollback the server state. Do not allow this to raise another exception. If rollback fails then we are going to be left in an awkward state that will need to be cleaned up eventually. """ if self.active: self.active = False log.error("Rollback: rollback") try: if self.source_copy and self.source_deleted: self.source_copy.moveTo(source.fp) log.error("Rollback: source restored %s to %s" % (self.source_copy.path, source.fp.path)) self.source_copy = None self.source_deleted = False if self.destination_copy: destination.fp.remove() log.error("Rollback: destination restored %s to %s" % (self.destination_copy.path, destination.fp.path)) self.destination_copy.moveTo(destination.fp) self.destination_copy = None elif self.destination_created: destination.fp.remove() log.error("Rollback: destination removed %s" % (destination.fp.path,)) self.destination_created = False except: log.error("Rollback: exception caught and not handled: %s" % Failure()) def Commit(self): """ Commit the resource changes by wiping the rollback state. """ if self.active: log.error("Rollback: commit") self.active = False if self.source_copy: self.source_copy.remove() log.error("Rollback: removed source backup %s" % (self.source_copy.path,)) self.source_copy = None if self.destination_copy: self.destination_copy.remove() log.error("Rollback: removed destination backup %s" % (self.destination_copy.path,)) self.destination_copy = None self.destination_created = False self.source_deleted = False rollback = RollbackState() try: """ Handle validation operations here. """ """ Handle rollback setup here. """ # Do quota checks on destination and source before we start messing with adding other files destquota = waitForDeferred(destination.quota(request)) yield destquota destquota = destquota.getResult() if destquota is not None and destination.exists(): old_dest_size = waitForDeferred(destination.quotaSize(request)) yield old_dest_size old_dest_size = old_dest_size.getResult() else: old_dest_size = 0 if source is not None: sourcequota = waitForDeferred(source.quota(request)) yield sourcequota sourcequota = sourcequota.getResult() if sourcequota is not None and source.exists(): old_source_size = waitForDeferred(source.quotaSize(request)) yield old_source_size old_source_size = old_source_size.getResult() else: old_source_size = 0 else: sourcequota = None old_source_size = 0 # We may need to restore the original resource data if the PUT/COPY/MOVE fails, # so rename the original file in case we need to rollback. overwrite = destination.exists() if overwrite: rollback.destination_copy = FilePath(destination.fp.path) rollback.destination_copy.path += ".rollback" destination.fp.copyTo(rollback.destination_copy) else: rollback.destination_created = True if deletesource: rollback.source_copy = FilePath(source.fp.path) rollback.source_copy.path += ".rollback" source.fp.copyTo(rollback.source_copy) """ Handle actual store operations here. """ # Do put or copy based on whether source exists if source is not None: response = maybeDeferred(copy, source.fp, destination.fp, destination_uri, depth) else: datastream = request.stream if data is not None: datastream = MemoryStream(data) md5 = MD5Stream(datastream) response = maybeDeferred(put, md5, destination.fp) response = waitForDeferred(response) yield response response = response.getResult() # Update the MD5 value on the resource if source is not None: # Copy MD5 value from source to destination if source.hasDeadProperty(TwistedGETContentMD5): md5 = source.readDeadProperty(TwistedGETContentMD5) destination.writeDeadProperty(md5) else: # Finish MD5 calc and write dead property md5.close() md5 = md5.getMD5() destination.writeDeadProperty(TwistedGETContentMD5.fromString(md5)) # Update the content-type value on the resource if it is not been copied or moved if source is None: content_type = request.headers.getHeader("content-type") if content_type is not None: destination.writeDeadProperty(davxml.GETContentType.fromString(generateContentType(content_type))) response = IResponse(response) # Do quota check on destination if destquota is not None: # Get size of new/old resources new_dest_size = waitForDeferred(destination.quotaSize(request)) yield new_dest_size new_dest_size = new_dest_size.getResult() diff_size = new_dest_size - old_dest_size if diff_size >= destquota[0]: log.error("Over quota: available %d, need %d" % (destquota[0], diff_size)) raise HTTPError(ErrorResponse( responsecode.INSUFFICIENT_STORAGE_SPACE, (dav_namespace, "quota-not-exceeded") )) d = waitForDeferred(destination.quotaSizeAdjust(request, diff_size)) yield d d.getResult() if deletesource: # Delete the source resource if sourcequota is not None: delete_size = 0 - old_source_size d = waitForDeferred(source.quotaSizeAdjust(request, delete_size)) yield d d.getResult() delete(source_uri, source.fp, depth) rollback.source_deleted = True # Can now commit changes and forget the rollback details rollback.Commit() yield response return except: # Roll back changes to original server state. Note this may do nothing # if the rollback has already ocurred or changes already committed. rollback.Rollback() raise