def routineTrylock(key): l = Lock(key, rc.scheduler) locked = l.trylock() result.append(locked) for m in rc.waitWithTimeout(0.5): yield m l.unlock()
async def routineLock(key): l = Lock(key, rc.scheduler) await l.lock(rc) t = obj[0] await rc.do_events() obj[0] = t + 1 l.unlock()
def routineLock(key): l = Lock(key, rc.scheduler) for m in l.lock(rc): yield m t = obj[0] for m in rc.waitWithTimeout(0.5): yield m obj[0] = t + 1 l.unlock()
class SessionHandle(object): def __init__(self, sessionobj, container): self.sessionobj = sessionobj self.id = sessionobj.id self.vars = sessionobj.vars self._lock = Lock(sessionobj, container.scheduler) self.container = container def lock(self): "Lock session" for m in self._lock.lock(self.container): yield m def unlock(self): "Unlock session" self._lock.unlock()
async def routineLock(key): l = Lock(key, rc.scheduler) locked = l.beginlock(rc) if not locked: await rc.wait_with_timeout(0.2) await l.lock(rc) t = obj[0] await rc.do_events() obj[0] = t + 1 l.unlock() await rc.do_events() await l.lock(rc) t = obj[0] if t != 2: obj[0] = t - 1 l.unlock()
async def routineLock(key): l = Lock(key, rc.scheduler) locked = l.beginlock(rc) if not locked: await rc.wait_with_timeout(0.2) locked = l.trylock() if not locked: raise ValueError('Not locked') t = obj[0] await rc.do_events() obj[0] = t + 1 l.unlock() await rc.do_events() await l.lock(rc) t = obj[0] await rc.do_events() obj[0] = t + 1 l.unlock()
def routineLock(key): l = Lock(key, rc.scheduler) locked = l.beginlock(rc) if not locked: for m in rc.waitWithTimeout(0.5): yield m for m in l.lock(rc): yield m t = obj[0] for m in rc.waitWithTimeout(1.0): yield m obj[0] = t + 1 l.unlock() for m in rc.doEvents(): yield m for m in l.lock(rc): yield m t = obj[0] if t != 2: obj[0] = t - 1 l.unlock()
def routineLock(key): l = Lock(key, rc.scheduler) locked = l.beginlock(rc) if not locked: for m in rc.waitWithTimeout(1.0): yield m locked = l.trylock() if not locked: raise ValueError('Not locked') t = obj[0] for m in rc.waitWithTimeout(0.5): yield m obj[0] = t + 1 l.unlock() for m in rc.doEvents(): yield m for m in l.lock(rc): yield m t = obj[0] for m in rc.waitWithTimeout(1.0): yield m obj[0] = t + 1 l.unlock()
def ipam_requestpool(self, env, params): if params['AddressSpace'] != _GLOBAL_SPACE: raise ValueError( 'Unsupported address space: must use this IPAM driver together with network driver' ) if params['V6']: raise ValueError('IPv6 is not supported') new_pool = IPAMReserve.create_instance(uuid1().hex) new_pool.pool = params['Pool'] if new_pool.pool: subnet, mask = parse_ip4_network(new_pool.pool) new_pool.pool = ip4_addr.formatter(subnet) + '/' + str(mask) new_pool.subpool = params['SubPool'] if new_pool.subpool: subnet, mask = parse_ip4_network(new_pool.subpool) new_pool.subpool = ip4_addr.formatter(subnet) + '/' + str(mask) new_pool.options = params['Options'] if new_pool.pool: l = Lock(('dockerplugin_ipam_request_pool', new_pool.pool), self.scheduler) for m in l.lock(self): yield m else: l = None try: while True: fail = 0 rets = [] def _updater(keys, values, timestamp): reservepool = values[0] reserve_new_pool = set_new(values[1], new_pool) remove_keys = self._remove_staled_pools( reservepool, timestamp) used_cidrs = set( cidr for _, (cidr, _) in reservepool.reserved_pools.items()) if not reserve_new_pool.pool: # pool is not specified for _ in range(0, self.cidrrange_end): reservepool.nextalloc += 1 if reservepool.nextalloc >= self.cidrrange_end: reservepool.nextalloc = 0 new_subnet = self.cidrrange_subnet | ( reservepool.nextalloc << 8) new_cidr = ip4_addr.formatter(new_subnet) + '/24' if new_cidr not in used_cidrs: break reserve_new_pool.pool = new_cidr reserve_new_pool.subpool = '' rets[:] = [reserve_new_pool.pool] if reserve_new_pool.pool in used_cidrs: # We must wait until this CIDR is released raise IPAMUsingException reservepool.reserved_pools[reserve_new_pool.id] = \ [reserve_new_pool.pool, timestamp + self.pooltimeout * 1000000] marker = IPAMReserveMarker.create_instance( reserve_new_pool.pool) if marker.getkey() in remove_keys: remove_keys.remove(marker.getkey()) return (tuple(keys[0:2]) + tuple(remove_keys), (reservepool, reserve_new_pool) + (None, ) * len(remove_keys)) else: return (tuple(keys[0:2]) + (marker.getkey(), ) + tuple(remove_keys), (reservepool, reserve_new_pool, marker) + (None, ) * len(remove_keys)) try: for m in callAPI( self, 'objectdb', 'transact', { 'keys': (IPAMPoolReserve.default_key(), new_pool.getkey()), 'updater': _updater, 'withtime': True }): yield m except IPAMUsingException: # Wait for the CIDR to be released self._reqid += 1 fail += 1 reqid = ('dockerplugin_ipam', self._reqid) marker_key = IPAMReserveMarker.default_key(rets[0]) for m in callAPI(self, 'objectdb', 'get', { 'key': marker_key, 'requestid': reqid, 'nostale': True }): yield m retvalue = self.retvalue with watch_context([marker_key], [retvalue], reqid, self): if retvalue is not None and not retvalue.isdeleted(): for m in self.executeWithTimeout( self.pooltimeout, retvalue.waitif(self, lambda x: x.isdeleted())): yield m else: env.outputjson({ 'PoolID': new_pool.id, 'Pool': rets[0], 'Data': {} }) break finally: if l is not None: l.unlock()
async def routineTrylock(key): l = Lock(key, rc.scheduler) locked = l.trylock() result.append(locked) await rc.do_events() l.unlock()