def test_delete(self): s = self.make_store() s2 = self.make_store() with write_locked(s): s['foo'] = 'bar' with write_locked(s): del s['foo'] with read_locked(s): self.assertThat(lambda:s['foo'], raises(KeyError)) # The change is immediately visible to other store instances. with read_locked(s2): self.assertThat(lambda:s2['foo'], raises(KeyError))
def discard(self, instances, force=False): """Discard instances. :param instances: A list of string ids previously returned from a provision() call. :param Force: When False (the default), as long as the cache is above the reserved count, discards will be passed to the discard routine immediately. Otherwise they will be held indefinitely. When True, instances are never held in reserve. """ instances = list(instances) prefix = self.name + '-' for instance in instances: assert instance.startswith(prefix), \ "instance %r not owned by cache %r" % (instance, self.name) instances = [instance[len(prefix):] for instance in instances] # Lock first, to avoid races. to_discard = [] with write_locked(self.store): allocated = len(self._get_set('allocated/' + self.name)) keep_count = self.reserve - allocated + len(instances) for pos, instance in enumerate(instances): if force or pos >= keep_count: to_discard.append(instance) self._set_remove('allocated/' + self.name, instances) # XXX: Future - avoid long locks by having a gc queue and moving # instances in there, and then doing the api call and finally cleanup. for instance in to_discard: del self.store['resource/' + instance] self._set_remove('pool/' + self.name, to_discard) if not to_discard: return self.source.discard(to_discard)
def fill_reserve(self): """If the cache is below the low watermark, fill it up.""" with write_locked(self.store): existing = set(self._get_set('pool/' + self.name)) missing = self.reserve - len(existing) if missing: self._get_resources(missing)
def test_get(self): s = self.make_store() s2 = self.make_store() with write_locked(s): s['foo'] = 'bar' with read_locked(s): self.assertEqual('bar', s['foo']) # The change is immediately visible to other store instances. with read_locked(s2): self.assertEqual('bar', s2['foo'])
def provision_from_cache(self, count): """Request up to count instances but only cached ones. This difference from provision() in that it will return up to the requested amount rather than all-or-nothing, and it never triggers a backend-provisioning call. """ with write_locked(self.store): existing = set(self._get_set('pool/' + self.name)) allocated = set(self._get_set('allocated/' + self.name)) cached = list(existing - allocated)[:count] self._update_set('allocated/' + self.name, cached) return self._external_name(cached)
def test_lock_read_reentrant(self): s = self.make_store() with write_locked(s): s['f'] = 't' s.lock_read() try: try: s.lock_read() s['f'] finally: s.unlock() s['f'] finally: s.unlock()
def provision(self, count): """Request count instances from the cache. Instance ids that are returned are prefixed with the cache name, to ensure no collisions between layered sources. :return: A list of instance ids. """ with write_locked(self.store): # XXX: Future, have a provisionally allocated set and move cached # entries there, then do the blocking API calls, then return # everything. existing = set(self._get_set('pool/' + self.name)) if self.maximum and (len(existing) + count) > self.maximum: raise ValueError('Instance limit exceeded.') allocated = set(self._get_set('allocated/' + self.name)) cached = list(existing - allocated)[:count] count = count - len(cached) new_instances = self._get_resources(count) instances = new_instances + cached self._update_set('allocated/' + self.name, instances) return set([self.name + '-' + instance for instance in instances])
def test_put(self): s = self.make_store() with write_locked(s): s['foo'] = 'bar'
def test_write_locked(self): s = memory.Store({}) self.assertEqual('u', s._lock) with write_locked(s): self.assertEqual('w', s._lock) self.assertEqual('u', s._lock)