def test_delete_returning(self): """ txn.execSQL works with all logging options on. """ txn = self.transactionUnderTest() cs = schema.CALENDARSERVER yield Insert({cs.NAME: "TEST", cs.VALUE: "Value"}, ).on(txn) yield self.commit() txn = self.transactionUnderTest() value = yield Delete( From=cs, Where=(cs.NAME == "TEST"), Return=cs.VALUE, ).on(txn) self.assertEqual(list(value), [["Value"]]) txn = self.transactionUnderTest() value = yield Delete( From=cs, Where=(cs.NAME == "TEST"), Return=cs.VALUE, ).on(txn) self.assertEqual(list(value), [])
def removeNotificationsForUUID(self, uuid): # Get NOTIFICATION_HOME.RESOURCE_ID nh = schema.NOTIFICATION_HOME kwds = {"UUID": uuid} rows = (yield Select( [ nh.RESOURCE_ID, ], From=nh, Where=(nh.OWNER_UID == Parameter("UUID")), ).on(self.txn, **kwds)) if rows: resourceID = rows[0][0] # Delete NOTIFICATION rows if not self.options["dry-run"]: no = schema.NOTIFICATION kwds = {"ResourceID": resourceID} yield Delete( From=no, Where=(no.NOTIFICATION_HOME_RESOURCE_ID == Parameter( "ResourceID")), ).on(self.txn, **kwds) # Delete NOTIFICATION_HOME (will cascade to NOTIFICATION_OBJECT_REVISIONS) if not self.options["dry-run"]: kwds = {"UUID": uuid} yield Delete( From=nh, Where=(nh.OWNER_UID == Parameter("UUID")), ).on(self.txn, **kwds)
def deletesome(cls, transaction, where, returnCols=None): """ Delete all rows matching the where expression from the table that corresponds to C{cls}. """ if transaction.dbtype.dialect == ORACLE_DIALECT and returnCols is not None: # Oracle cannot return multiple rows in the RETURNING clause so # we have to split this into a SELECT followed by a DELETE if not isinstance(returnCols, (tuple, list)): returnCols = [ returnCols, ] result = yield Select( returnCols, From=cls.table, Where=where, ).on(transaction) yield Delete( From=cls.table, Where=where, ).on(transaction) else: result = yield Delete( From=cls.table, Where=where, Return=returnCols, ).on(transaction) returnValue(result)
def update(self, txn): if self.useDirectoryBasedDelegates: # Pull in delegate assignments from the directory and stick them # into the delegate db recordsWithDirectoryBasedDelegates = yield self.directoryBasedDelegatesSource( ) externalAssignments = {} for record in recordsWithDirectoryBasedDelegates: try: readWriteProxy = record.readWriteProxy except AttributeError: readWriteProxy = None try: readOnlyProxy = record.readOnlyProxy except AttributeError: readOnlyProxy = None if readOnlyProxy or readWriteProxy: externalAssignments[record.uid] = (readOnlyProxy, readWriteProxy) yield self.scheduleExternalAssignments(txn, externalAssignments) # Figure out which groups matter groupUIDs = yield self.groupsToRefresh(txn) # self.log.debug( # "Groups to refresh: {g}", g=groupUIDs # ) gr = schema.GROUPS if config.AutomaticPurging.Enabled and groupUIDs: # remove unused groups and groups that have not been seen in a while dateLimit = ( datetime.datetime.utcnow() - datetime.timedelta(seconds=float( config.AutomaticPurging.GroupPurgeIntervalSeconds))) rows = yield Delete( From=gr, Where=((gr.EXTANT == 0).And(gr.MODIFIED < dateLimit)).Or( gr.GROUP_UID.NotIn(Parameter("groupUIDs", len(groupUIDs)))) if groupUIDs else None, Return=[gr.GROUP_UID]).on(txn, groupUIDs=groupUIDs) else: # remove unused groups rows = yield Delete( From=gr, Where=gr.GROUP_UID.NotIn(Parameter( "groupUIDs", len(groupUIDs))) if groupUIDs else None, Return=[gr.GROUP_UID]).on(txn, groupUIDs=groupUIDs) deletedGroupUIDs = [row[0] for row in rows] if deletedGroupUIDs: self.log.debug("Deleted old or unused groups {d}", d=deletedGroupUIDs) # For each of those groups, create a per-group refresh work item for groupUID in set(groupUIDs) - set(deletedGroupUIDs): self.log.debug("Enqueuing group refresh for {u}", u=groupUID) yield GroupRefreshWork.reschedule(txn, 0, groupUid=groupUID)
def removeAttachments(self, resourceID): # Get ATTACHMENT paths at = schema.ATTACHMENT kwds = {"resourceID": resourceID} rows = (yield Select( [ at.PATH, ], From=at, Where=(at.CALENDAR_HOME_RESOURCE_ID == Parameter("resourceID")), ).on(self.txn, **kwds)) if rows: self.attachments.update([row[0] for row in rows]) # Delete ATTACHMENT rows if not self.options["dry-run"]: at = schema.ATTACHMENT kwds = {"resourceID": resourceID} yield Delete( From=at, Where=(at.CALENDAR_HOME_RESOURCE_ID == Parameter( "resourceID")), ).on(self.txn, **kwds) returnValue(len(rows) if rows else 0)
def doWork(self): # Delete any other work items for this UID yield Delete( From=self.table, Where=self.group, ).on(self.transaction) # If not enabled, punt here if not config.AutomaticPurging.Enabled: returnValue(None) log.debug("Checking for existence of {uid} in directory", uid=self.uid) directory = self.transaction.store().directoryService() record = yield directory.recordWithUID(self.uid) if record is None: # Schedule purge of this UID a week from now notBefore = ( datetime.datetime.utcnow() + datetime.timedelta(seconds=config.AutomaticPurging.PurgeIntervalSeconds) ) log.warn( "Principal {uid} is no longer in the directory; scheduling clean-up at {when}", uid=self.uid, when=notBefore ) yield self.transaction.enqueue( PrincipalPurgeWork, uid=self.uid, notBefore=notBefore ) else: log.debug("{uid} is still in the directory", uid=self.uid)
def doWork(self): # Delete any other work items for this UID yield Delete( From=self.table, Where=self.group, ).on(self.transaction) # If not enabled, punt here if not config.AutomaticPurging.Enabled: returnValue(None) # Check for UID in directory again log.debug("One last existence check for {uid}", uid=self.uid) directory = self.transaction.store().directoryService() record = yield directory.recordWithUID(self.uid) if record is None: # Time to go service = PurgePrincipalService(self.transaction.store()) log.warn( "Cleaning up future events for principal {uid} since they are no longer in directory", uid=self.uid ) yield service.purgeUIDs( self.transaction.store(), directory, [self.uid], proxies=True, when=None ) else: log.debug("{uid} has re-appeared in the directory", uid=self.uid)
def deleteremotes(cls, txn, homeid, remotes): return Delete( From=cls.table, Where=(cls.calendarHomeResourceID == homeid).And( cls.remoteResourceID.In(Parameter("remotes", len(remotes))) ), ).on(txn, remotes=remotes)
def removeRevisionsForCalendarResourceID(self, resourceID): if not self.options["dry-run"]: rev = schema.CALENDAR_OBJECT_REVISIONS kwds = {"ResourceID": resourceID} yield Delete( From=rev, Where=(rev.CALENDAR_RESOURCE_ID == Parameter("ResourceID")), ).on(self.txn, **kwds)
def removeHomeForResourceID(self, resourceID): if not self.options["dry-run"]: ch = schema.CALENDAR_HOME kwds = {"ResourceID": resourceID} yield Delete( From=ch, Where=(ch.RESOURCE_ID == Parameter("ResourceID")), ).on(self.txn, **kwds)
def removeProperty(txn, propelement): pname = PropertyName.fromElement(propelement) rp = schema.RESOURCE_PROPERTY yield Delete( From=rp, Where=rp.NAME == pname.toString(), ).on(txn)
def removePropertiesForResourceID(self, resourceID): if not self.options["dry-run"]: props = schema.RESOURCE_PROPERTY kwds = {"ResourceID": resourceID} yield Delete( From=props, Where=(props.RESOURCE_ID == Parameter("ResourceID")), ).on(self.txn, **kwds)
def deletesome(cls, transaction, where): """ Delete all rows matching the where expression from the table that corresponds to C{cls}. """ return Delete( From=cls.table, Where=where, ).on(transaction)
def removedHome(cls, txn, homeID): """ A calendar home is being removed so all of its attachments must go too. When removing, we don't care about quota adjustment as there will be no quota once the home is removed. TODO: this needs to be transactional wrt the actual file deletes. """ att = cls._attachmentSchema attco = cls._attachmentLinkSchema rows = (yield Select( [ att.ATTACHMENT_ID, att.DROPBOX_ID, ], From=att, Where=(att.CALENDAR_HOME_RESOURCE_ID == homeID), ).on(txn)) for attachmentID, dropboxID in rows: if dropboxID != ".": attachment = DropBoxAttachment(txn, attachmentID, None, None) else: attachment = ManagedAttachment(txn, attachmentID, None, None) attachment = (yield attachment.initFromStore()) if attachment._path.exists(): attachment.removePaths() yield Delete( From=attco, Where=(attco.ATTACHMENT_ID.In( Select( [ att.ATTACHMENT_ID, ], From=att, Where=(att.CALENDAR_HOME_RESOURCE_ID == homeID), ))), ).on(txn) yield Delete( From=att, Where=(att.CALENDAR_HOME_RESOURCE_ID == homeID), ).on(txn)
def remove(self): """ Remove DB rows corresponding to this notification home. """ # Delete NOTIFICATION rows no = schema.NOTIFICATION kwds = {"ResourceID": self._resourceID} yield Delete( From=no, Where=( no.NOTIFICATION_HOME_RESOURCE_ID == Parameter("ResourceID")), ).on(self._txn, **kwds) # Delete NOTIFICATION_HOME (will cascade to NOTIFICATION_OBJECT_REVISIONS) nh = schema.NOTIFICATION_HOME yield Delete( From=nh, Where=(nh.RESOURCE_ID == Parameter("ResourceID")), ).on(self._txn, **kwds)
def _deleteSyncTokenQuery(cls): """ DAL query to remove all child revision information. The revision for the collection itself is not touched. """ rev = cls._revisionsSchema return Delete(From=rev, Where=(rev.HOME_RESOURCE_ID == Parameter("homeID")).And( rev.RESOURCE_ID == Parameter("resourceID")).And( rev.COLLECTION_NAME == None))
def doWork(self): # Delete any other work items for this UID yield Delete( From=self.table, Where=self.group, ).on(self.transaction) # NB We do not check config.AutomaticPurging.Enabled here because if this work # item was enqueued we always need to complete it # Check for pending scheduling operations sow = schema.SCHEDULE_ORGANIZER_WORK sosw = schema.SCHEDULE_ORGANIZER_SEND_WORK srw = schema.SCHEDULE_REPLY_WORK srcw = schema.SCHEDULE_REPLY_CANCEL_WORK rows = yield Select( [sow.HOME_RESOURCE_ID], From=sow, Where=(sow.HOME_RESOURCE_ID == self.homeResourceID), SetExpression=Union( Select( [sosw.HOME_RESOURCE_ID], From=sosw, Where=(sosw.HOME_RESOURCE_ID == self.homeResourceID), SetExpression=Union( Select( [srw.HOME_RESOURCE_ID], From=srw, Where=( srw.HOME_RESOURCE_ID == self.homeResourceID), SetExpression=Union( Select( [srcw.HOME_RESOURCE_ID], From=srcw, Where=(srcw.HOME_RESOURCE_ID == self.homeResourceID), )), )), )), ).on(self.transaction) if rows and len(rows): # Regenerate this job notBefore = (datetime.datetime.utcnow() + datetime.timedelta( seconds=config.AutomaticPurging.HomePurgeDelaySeconds)) yield self.transaction.enqueue(PrincipalPurgeHomeWork, homeResourceID=self.homeResourceID, notBefore=notBefore) else: # Get the home and remove it - only if properly marked as being purged home = yield self.transaction.calendarHomeWithResourceID( self.homeResourceID) if home.purging(): yield home.remove()
def populate(self): yield populateCalendarsFrom(self.requirements, self.storeUnderTest()) self.notifierFactory.reset() txn = self._sqlCalendarStore.newTransaction() Delete( From=schema.ATTACHMENT, Where=None ).on(txn) (yield txn.commit())
def setUp(self): self.patch(config, "EnableManagedAttachments", True) yield super(AttachmentMigrationTests, self).setUp() self._sqlCalendarStore.enableManagedAttachments = True txn = self.transactionUnderTest() cs = schema.CALENDARSERVER yield Delete(From=cs, Where=cs.NAME == "MANAGED-ATTACHMENTS").on(txn) yield self.commit()
def doWork(self): # Find all work items with the same push ID and find the highest # priority. Delete matching work items. results = (yield Select( [self.table.WORK_ID, self.table.JOB_ID, self.table.PUSH_PRIORITY], From=self.table, Where=self.table.PUSH_ID == self.pushID).on(self.transaction)) maxPriority = self.pushPriority # If there are other enqueued work items for this push ID, find the # highest priority one and use that value. Note that L{results} will # not contain this work item as job processing behavior will have already # deleted it. So we need to make sure the max priority calculation includes # this one. if results: workIDs, jobIDs, priorities = zip(*results) maxPriority = max(priorities + (self.pushPriority, )) # Delete the work items and jobs we selected - deleting the job will ensure that there are no # orphaned" jobs left in the job queue which would otherwise get to run at some later point, # though not do anything because there is no related work item. yield Delete(From=self.table, Where=self.table.WORK_ID.In( Parameter("workIDs", len(workIDs)))).on(self.transaction, workIDs=workIDs) yield Delete( From=JobItem.table, #@UndefinedVariable Where=JobItem.jobID.In(Parameter( "jobIDs", len(jobIDs))) #@UndefinedVariable ).on(self.transaction, jobIDs=jobIDs) pushDistributor = self.transaction._pushDistributor if pushDistributor is not None: # Convert the integer priority value back into a constant priority = PushPriority.lookupByValue(maxPriority) yield pushDistributor.enqueue(self.transaction, self.pushID, priority=priority)
def delete(self): """ Delete this row from the database. @return: a L{Deferred} which fires with C{None} when the underlying row has been deleted, or fails with L{NoSuchRecord} if the underlying row was already deleted. """ return Delete( From=self.table, Where=self._primaryKeyComparison(self._primaryKeyValue()) ).on(self.transaction, raiseOnZeroRowCount=NoSuchRecord)
def deleteBind(self, homeID, resourceID): if not self.options["dry-run"]: cb = schema.CALENDAR_BIND kwds = { "HomeID": homeID, "ResourceID": resourceID, } yield Delete( From=cb, Where=(( cb.CALENDAR_HOME_RESOURCE_ID == Parameter("HomeID")).And( cb.CALENDAR_RESOURCE_ID == Parameter("ResourceID"))), ).on(self.txn, **kwds)
def pop(cls, transaction, *primaryKey): """ Atomically retrieve and remove a row from this L{Record}'s table with a primary key value of C{primaryKey}. @return: a L{Deferred} that fires with an instance of C{cls}, or fails with L{NoSuchRecord} if there were no records in the database. @rtype: L{Deferred} """ return cls._rowsFromQuery( transaction, Delete(Where=cls._primaryKeyComparison(primaryKey), From=cls.table, Return=list(cls.table)), lambda: NoSuchRecord()).addCallback(lambda x: x[0])
def doWork(self): log.debug("ScheduleAutoReplyWork - running for ID: {id}, UID: {uid}", id=self.workID, uid=self.icalendarUID) # Delete all other work items with the same pushID yield Delete( From=self.table, Where=self.table.RESOURCE_ID == self.resourceID ).on(self.transaction) # Do reply yield self._sendAttendeeAutoReply() self._dequeued() log.debug("ScheduleAutoReplyWork - done for ID: {id}, UID: {uid}", id=self.workID, uid=self.icalendarUID)
def doWork(self): # Delete all other work items for this group yield Delete( From=self.table, Where=self.group, ).on(self.transaction) groupCacher = getattr(self.transaction, "_groupCacher", None) if groupCacher is not None: try: yield groupCacher.refreshGroup(self.transaction, self.groupUid.decode("utf-8")) except Exception, e: log.error("Failed to refresh group {group} {err}", group=self.groupUid, err=e)
def _initStore(self, enableManagedAttachments=True): """ Build a store with certain bits cleaned out. """ self.patch(config, "EnableManagedAttachments", enableManagedAttachments) store = yield theStoreBuilder.buildStore( self, {"push": StubNotifierFactory()}) store.enableManagedAttachments = enableManagedAttachments txn = store.newTransaction() cs = schema.CALENDARSERVER yield Delete(From=cs, Where=cs.NAME == "MANAGED-ATTACHMENTS").on(txn) yield txn.commit() returnValue(store)
def deleteCalendar(self, resourceID): # Need to delete any remaining CALENDAR_OBJECT_REVISIONS entries yield self.removeRevisionsForCalendarResourceID(resourceID) # Delete the CALENDAR entry (will cascade to CALENDAR_BIND and CALENDAR_OBJECT) if not self.options["dry-run"]: ca = schema.CALENDAR kwds = { "ResourceID": resourceID, } yield Delete( From=ca, Where=(ca.RESOURCE_ID == Parameter("ResourceID")), ).on(self.txn, **kwds) # Remove properties yield self.removePropertiesForResourceID(resourceID)
def removeFromResource(self, resourceID): # Delete the reference attco = self._attachmentLinkSchema yield Delete( From=attco, Where=(attco.ATTACHMENT_ID == self._attachmentID).And( attco.CALENDAR_OBJECT_RESOURCE_ID == resourceID), ).on(self._txn) # References still exist - if not remove actual attachment rows = (yield Select( [ attco.CALENDAR_OBJECT_RESOURCE_ID, ], From=attco, Where=(attco.ATTACHMENT_ID == self._attachmentID), ).on(self._txn)) if len(rows) == 0: yield self.remove()
class PropertyStore(AbstractPropertyStore): """ We are going to use memcache to cache properties per-resource/per-user. However, we need to be able to invalidate on a per-resource basis, in addition to per-resource/per-user. So we will also track in memcache which resource/uid tokens are valid. That way we can remove the tracking entry to completely invalidate all the per-resource/per-user pairs. """ _cacher = Memcacher("SQL.props", pickle=True, key_normalization=False) def __init__(self, *a, **kw): raise NotImplementedError( "do not construct directly, call PropertyStore.load()") _allWithID = Select([prop.NAME, prop.VIEWER_UID, prop.VALUE], From=prop, Where=prop.RESOURCE_ID == Parameter("resourceID")) _allWithIDViewer = Select( [prop.NAME, prop.VALUE], From=prop, Where=(prop.RESOURCE_ID == Parameter("resourceID")).And( prop.VIEWER_UID == Parameter("viewerID"))) def _cacheToken(self, userid): return "{0!s}/{1}".format(self._resourceID, userid) @inlineCallbacks def _refresh(self, txn): """ Load, or re-load, this object with the given transaction; first from memcache, then pulling from the database again. """ # Cache existing properties in this object # Look for memcache entry first @inlineCallbacks def _cache_user_props(uid): # First check whether uid already has a valid cached entry rows = None if self._cacher is not None: valid_cached_users = yield self._cacher.get( str(self._resourceID)) if valid_cached_users is None: valid_cached_users = set() # Fetch cached user data if valid and present if uid in valid_cached_users: rows = yield self._cacher.get(self._cacheToken(uid)) # If no cached data, fetch from SQL DB and cache if rows is None: rows = yield self._allWithIDViewer.on( txn, resourceID=self._resourceID, viewerID=uid, ) if self._cacher is not None: yield self._cacher.set(self._cacheToken(uid), rows if rows is not None else ()) # Mark this uid as valid valid_cached_users.add(uid) yield self._cacher.set(str(self._resourceID), valid_cached_users) for name, value in rows: self._cached[(name, uid)] = value # Cache for the owner first, then the sharee if different yield _cache_user_props(self._defaultUser) if self._perUser != self._defaultUser: yield _cache_user_props(self._perUser) if self._proxyUser != self._perUser: yield _cache_user_props(self._proxyUser) @classmethod @inlineCallbacks def load(cls, defaultuser, shareUser, proxyUser, txn, resourceID, created=False, notifyCallback=None): """ @param notifyCallback: a callable used to trigger notifications when the property store changes. """ self = cls.__new__(cls) super(PropertyStore, self).__init__(defaultuser, shareUser, proxyUser) self._txn = txn self._resourceID = resourceID if not self._txn.store().queryCachingEnabled(): self._cacher = None self._cached = {} if not created: yield self._refresh(txn) self._notifyCallback = notifyCallback returnValue(self) @classmethod @inlineCallbacks def forMultipleResources(cls, defaultUser, shareeUser, proxyUser, txn, childColumn, parentColumn, parentID): """ Load all property stores for all objects in a collection. This is used to optimize Depth:1 operations on that collection, by loading all relevant properties in a single query. @param defaultUser: the UID of the user who owns / is requesting the property stores; the ones whose per-user properties will be exposed. @type defaultUser: C{str} @param txn: the transaction within which to fetch the rows. @type txn: L{IAsyncTransaction} @param childColumn: The resource ID column for the child resources, i.e. the resources of the type for which this method will loading the property stores. @param parentColumn: The resource ID column for the parent resources. e.g. if childColumn is addressbook object's resource ID, then this should be addressbook's resource ID. @return: a L{Deferred} that fires with a C{dict} mapping resource ID (a value taken from C{childColumn}) to a L{PropertyStore} for that ID. """ childTable = TableSyntax(childColumn.model.table) query = Select( [ childColumn, # XXX is that column necessary? as per the 'on' clause it has to be # the same as prop.RESOURCE_ID anyway. prop.RESOURCE_ID, prop.NAME, prop.VIEWER_UID, prop.VALUE ], From=prop.join(childTable, prop.RESOURCE_ID == childColumn, 'right'), Where=parentColumn == parentID) rows = yield query.on(txn) stores = cls._createMultipleStores(defaultUser, shareeUser, proxyUser, txn, rows) returnValue(stores) @classmethod @inlineCallbacks def forMultipleResourcesWithResourceIDs(cls, defaultUser, shareeUser, proxyUser, txn, resourceIDs): """ Load all property stores for all specified resources. This is used to optimize Depth:1 operations on that collection, by loading all relevant properties in a single query. Note that the caller of this method must make sure that the number of items being queried for is within a reasonable batch size. If the caller is itself batching related queries, that will take care of itself. @param defaultUser: the UID of the user who owns / is requesting the property stores; the ones whose per-user properties will be exposed. @type defaultUser: C{str} @param txn: the transaction within which to fetch the rows. @type txn: L{IAsyncTransaction} @param resourceIDs: The set of resource ID's to query. @return: a L{Deferred} that fires with a C{dict} mapping resource ID (a value taken from C{childColumn}) to a L{PropertyStore} for that ID. """ query = Select( [prop.RESOURCE_ID, prop.NAME, prop.VIEWER_UID, prop.VALUE], From=prop, Where=prop.RESOURCE_ID.In( Parameter("resourceIDs", len(resourceIDs)))) rows = yield query.on(txn, resourceIDs=resourceIDs) stores = cls._createMultipleStores(defaultUser, shareeUser, proxyUser, txn, rows) # Make sure we have a store for each resourceID even if no properties exist for resourceID in resourceIDs: if resourceID not in stores: store = cls.__new__(cls) super(PropertyStore, store).__init__(defaultUser, shareeUser, proxyUser) store._txn = txn store._resourceID = resourceID store._cached = {} stores[resourceID] = store returnValue(stores) @classmethod def _createMultipleStores(cls, defaultUser, shareeUser, proxyUser, txn, rows): """ Create a set of stores for the set of rows passed in. """ createdStores = {} for row in rows: if len(row) == 5: object_resource_id, resource_id, name, view_uid, value = row else: object_resource_id = None resource_id, name, view_uid, value = row if resource_id: if resource_id not in createdStores: store = cls.__new__(cls) super(PropertyStore, store).__init__(defaultUser, shareeUser, proxyUser) store._txn = txn store._resourceID = resource_id store._cached = {} createdStores[resource_id] = store createdStores[resource_id]._cached[(name, view_uid)] = value elif object_resource_id: store = cls.__new__(cls) super(PropertyStore, store).__init__(defaultUser, shareeUser, proxyUser) store._txn = txn store._resourceID = object_resource_id store._cached = {} createdStores[object_resource_id] = store return createdStores def _getitem_uid(self, key, uid): validKey(key) try: value = self._cached[(key.toString(), uid)] except KeyError: raise KeyError(key) return WebDAVDocument.fromString(value).root_element _updateQuery = Update( {prop.VALUE: Parameter("value")}, Where=(prop.RESOURCE_ID == Parameter("resourceID")).And( prop.NAME == Parameter("name")).And( prop.VIEWER_UID == Parameter("uid"))) _insertQuery = Insert({ prop.VALUE: Parameter("value"), prop.RESOURCE_ID: Parameter("resourceID"), prop.NAME: Parameter("name"), prop.VIEWER_UID: Parameter("uid") }) def _setitem_uid(self, key, value, uid): validKey(key) key_str = key.toString() value_str = value.toxml() tried = [] wasCached = [(key_str, uid) in self._cached] self._cached[(key_str, uid)] = value_str @inlineCallbacks def trySetItem(txn): if tried: yield self._refresh(txn) wasCached[:] = [(key_str, uid) in self._cached] tried.append(True) if wasCached[0]: yield self._updateQuery.on(txn, resourceID=self._resourceID, value=value_str, name=key_str, uid=uid) else: yield self._insertQuery.on(txn, resourceID=self._resourceID, value=value_str, name=key_str, uid=uid) if self._cacher is not None: self._cacher.delete(self._cacheToken(uid)) # Call the registered notification callback - we need to do this as a preCommit since it involves # a bunch of deferred operations, but this propstore api is not deferred. preCommit will execute # the deferreds properly, and it is fine to wait until everything else is done before sending the # notifications. if hasattr(self, "_notifyCallback") and self._notifyCallback is not None: self._txn.preCommit(self._notifyCallback) def justLogIt(f): f.trap(AllRetriesFailed) self.log.error("setting a property failed; probably nothing.") self._txn.subtransaction(trySetItem).addErrback(justLogIt) _deleteQuery = Delete( prop, Where=(prop.RESOURCE_ID == Parameter("resourceID")).And( prop.NAME == Parameter("name")).And( prop.VIEWER_UID == Parameter("uid"))) def _delitem_uid(self, key, uid): validKey(key) key_str = key.toString() del self._cached[(key_str, uid)] @inlineCallbacks def doIt(txn): yield self._deleteQuery.on(txn, lambda: KeyError(key), resourceID=self._resourceID, name=key_str, uid=uid) if self._cacher is not None: self._cacher.delete(self._cacheToken(uid)) # Call the registered notification callback - we need to do this as a preCommit since it involves # a bunch of deferred operations, but this propstore api is not deferred. preCommit will execute # the deferreds properly, and it is fine to wait until everything else is done before sending the # notifications. if hasattr(self, "_notifyCallback") and self._notifyCallback is not None: self._txn.preCommit(self._notifyCallback) def justLogIt(f): f.trap(AllRetriesFailed) self.log.error("setting a property failed; probably nothing.") self._txn.subtransaction(doIt).addErrback(justLogIt) def _keys_uid(self, uid): for cachedKey, cachedUID in self._cached.keys(): if cachedUID == uid: yield PropertyName.fromString(cachedKey) _deleteResourceQuery = Delete( prop, Where=(prop.RESOURCE_ID == Parameter("resourceID"))) @inlineCallbacks def _removeResource(self): self._cached = {} yield self._deleteResourceQuery.on(self._txn, resourceID=self._resourceID) # Invalidate entire set of cached per-user data for this resource if self._cacher is not None: self._cacher.delete(str(self._resourceID)) @inlineCallbacks def copyAllProperties(self, other): """ Copy all the properties from another store into this one. This needs to be done independently of the UID. """ rows = yield other._allWithID.on(other._txn, resourceID=other._resourceID) for key_str, uid, value_str in rows: wasCached = [(key_str, uid) in self._cached] if wasCached[0]: yield self._updateQuery.on(self._txn, resourceID=self._resourceID, value=value_str, name=key_str, uid=uid) else: yield self._insertQuery.on(self._txn, resourceID=self._resourceID, value=value_str, name=key_str, uid=uid) # Invalidate entire set of cached per-user data for this resource and reload self._cached = {} if self._cacher is not None: self._cacher.delete(str(self._resourceID)) yield self._refresh(self._txn)
def doWork(self): # Look for other work items for this resource and ignore this one if other later ones exist srw = schema.SCHEDULE_REFRESH_WORK rows = (yield Select( (srw.WORK_ID,), From=srw, Where=( srw.HOME_RESOURCE_ID == self.homeResourceID).And( srw.RESOURCE_ID == self.resourceID ), ).on(self.transaction)) if rows: log.debug("Schedule refresh for resource-id: {rid} - ignored", rid=self.resourceID) returnValue(None) log.debug("ScheduleRefreshWork - running for ID: {id}, UID: {uid}", id=self.workID, uid=self.icalendarUID) # Get the unique list of pending attendees and split into batch to process # TODO: do a DELETE ... and rownum <= N returning attendee - but have to fix Oracle to # handle multi-row returning. Would be better than entire select + delete of each one, # but need to make sure to use UNIQUE as there may be duplicate attendees. sra = schema.SCHEDULE_REFRESH_ATTENDEES pendingAttendees = (yield Select( [sra.ATTENDEE, ], From=sra, Where=sra.RESOURCE_ID == self.resourceID, ).on(self.transaction)) pendingAttendees = list(set([row[0] for row in pendingAttendees])) # Nothing left so done if len(pendingAttendees) == 0: returnValue(None) attendeesToProcess = pendingAttendees[:config.Scheduling.Options.AttendeeRefreshBatch] pendingAttendees = pendingAttendees[config.Scheduling.Options.AttendeeRefreshBatch:] yield Delete( From=sra, Where=(sra.RESOURCE_ID == self.resourceID).And(sra.ATTENDEE.In(Parameter("attendeesToProcess", len(attendeesToProcess)))) ).on(self.transaction, attendeesToProcess=attendeesToProcess) # Reschedule work item if pending attendees remain. if len(pendingAttendees) != 0: notBefore = datetime.datetime.utcnow() + datetime.timedelta(seconds=config.Scheduling.Options.WorkQueues.AttendeeRefreshBatchIntervalSeconds) yield self.transaction.enqueue( self.__class__, icalendarUID=self.icalendarUID, homeResourceID=self.homeResourceID, resourceID=self.resourceID, attendeeCount=len(pendingAttendees), notBefore=notBefore ) self._enqueued() # Do refresh yield self._doDelayedRefresh(attendeesToProcess) self._dequeued() log.debug("ScheduleRefreshWork - done for ID: {id}, UID: {uid}", id=self.workID, uid=self.icalendarUID)