async def _dumpkeys(self, keys): self._reqid += 1 reqid = ('testobjectdb', self._reqid) with request_context(reqid, self.apiroutine): retobjs = await call_api(self.apiroutine, 'objectdb', 'mget', { 'keys': keys, 'requestid': reqid }) return [dump(v) for v in retobjs]
async def _dumpkeys(self, keys, filter=None): self._reqid += 1 reqid = ('virtualrouter', self._reqid) with request_context(reqid, self.app_routine): retobjs = await call_api(self.app_routine,'objectdb','mget',{'keys':keys,'requestid':reqid}) if filter is None: return [dump(o) for o in retobjs] else: return [dump(o) for o in retobjs if o is not None and all(getattr(o, k, None) == v for k, v in filter.items())]
async def _waitforchange(self, key): with request_context('testobjectdb', self.apiroutine): setobj = await call_api(self.apiroutine, 'objectdb', 'watch', { 'key': key, 'requestid': 'testobjectdb' }) await setobj.wait(self.apiroutine) oldset = set() while True: for weakref in setobj.set.dataset().difference(oldset): self.apiroutine.subroutine( self._updateport(weakref.getkey())) oldset = set(setobj.set.dataset()) await setobj.waitif(self.apiroutine, lambda x: not x.isdeleted(), True)
async def listvirtualrouters(self, id: (str, None) = None, **kwargs): """ Query virtual router :param id: if specified, only return virtual router with this ID :param \*\*kwargs: customized filter :return: a list of dictionaries each stands for a matched virtual router. """ if id: return await self._dumpone(VRouter.default_key(id), kwargs) else: # we get all router from vrouterset index routersetkey = VRouterSet.default_key() self._reqid += 1 reqid = ("virtualrouter",self._reqid) def set_walker(key,set,walk,save): for weakobj in set.dataset(): routerkey = weakobj.getkey() try: router = walk(routerkey) except KeyError: pass else: if all(getattr(router,k,None) == v for k,v in kwargs.items()): save(routerkey) def walker_func(set_func): def walker(key,obj,walk,save): if obj is None: return set_walker(key,set_func(obj),walk,save) return walker with request_context(reqid, self.app_routine): _, values = await call_api(self.app_routine,"objectdb","walk", {"keys":[routersetkey],"walkerdict":{routersetkey:walker_func(lambda x:x.set)}, "requestid":reqid}) return [dump(r) for r in values]
async def updateinner(): self._reqid += 1 reqid = ('testobjectdb', self._reqid) with request_context(reqid, self.apiroutine): portobj = await call_api(self.apiroutine, 'objectdb', 'get', { 'key': key, 'requestid': reqid }) if portobj is not None: @updater def write_status(portobj): if portobj is None: raise ValueError('Already deleted') if not hasattr(portobj, 'owner'): portobj.owner = self._ownerid portobj.status = 'READY' return [portobj] else: raise ValueError('Already managed') try: await call_api(self.apiroutine, 'objectdb', 'transact', { 'keys': [portobj.getkey()], 'updater': write_status }) except ValueError: pass else: await portobj.waitif( self.apiroutine, lambda x: x.isdeleted() or hasattr(x, 'owner')) self._logger.info('Port managed: %r', dump(portobj)) while True: await portobj.waitif(self.apiroutine, lambda x: True, True) if portobj.isdeleted(): self._logger.info('Port deleted: %r', dump(portobj)) break else: self._logger.info('Port updated: %r', dump(portobj))
async def listrouterinterfaces(self, id: str, **kwargs): """ Query router ports from a virtual router :param id: virtual router ID :param \*\*kwargs: customized filters on router interfaces :return: a list of dictionaries each stands for a matched router interface """ if not id: raise ValueError("must specify router id") routerkey = VRouter.default_key(id) self._reqid += 1 reqid = ("virtualrouter", self._reqid) def set_walker(key, interfaces, walk, save): for weakobj in interfaces.dataset(): vpkey = weakobj.getkey() try: virtualport = walk(vpkey) except KeyError: pass else: if all(getattr(virtualport,k,None) == v for k,v in kwargs.items()): save(vpkey) def walk_func(filter_func): def walker(key,obj,walk,save): if obj is None: return set_walker(key,filter_func(obj),walk,save) return walker with request_context(reqid, self.app_routine): _, values = await call_api(self.app_routine,"objectdb","walk", {"keys":[routerkey],"walkerdict":{routerkey:walk_func(lambda x:x.interfaces)}, "requestid":reqid}) return [dump(r) for r in values]
async def getlogicalnetworks(self, id=None, physicalnetwork=None, **kwargs): def set_walker(key, set, walk, save): if set is None: return for o in set.dataset(): key = o.getkey() try: net = walk(key) except KeyError: pass else: for k, v in kwargs.items(): if getattr(net, k, None) != v: break else: save(key) def walker_func(set_func): def walker(key, obj, walk, save): if obj is None: return set_walker(key, set_func(obj), walk, save) return walker if id is not None: self._reqid += 1 reqid = ('testobjectdb', self._reqid) with request_context(reqid, self.apiroutine): result = await call_api(self.apiroutine, 'objectdb', 'get', { 'key': LogicalNetwork.default_key(id), 'requestid': reqid }) if result is None: return [] if physicalnetwork is not None and physicalnetwork != result.physicalnetwork.id: return [] for k, v in kwargs.items(): if getattr(result, k, None) != v: return [] return [dump(result)] elif physicalnetwork is not None: self._reqid += 1 reqid = ('testobjectdb', self._reqid) pm_key = PhysicalNetworkMap.default_key(physicalnetwork) with request_context(reqid, self.apiroutine): keys, result = await call_api( self.apiroutine, 'objectdb', 'walk', { 'keys': [pm_key], 'walkerdict': { pm_key: walker_func(lambda x: x.networks) }, 'requestid': reqid }) return [dump(r) for r in result] else: self._reqid += 1 reqid = ('testobjectdb', self._reqid) ns_key = LogicalNetworkSet.default_key() with request_context(reqid, self.apiroutine): keys, result = await call_api( self.apiroutine, 'objectdb', 'walk', { 'keys': [ns_key], 'walkerdict': { ns_key: walker_func(lambda x: x.set) }, 'requestid': reqid }) return [dump(r) for r in result]
async def ipam_requestpool(self, env, params): if params['AddressSpace'] != _GLOBAL_SPACE: raise ValueError( 'Unsupported address space: must use this IPAM driver together with network driver' ) if params['V6']: raise ValueError('IPv6 is not supported') new_pool = IPAMReserve.create_instance(uuid1().hex) new_pool.pool = params['Pool'] if new_pool.pool: subnet, mask = parse_ip4_network(new_pool.pool) new_pool.pool = ip4_addr.formatter(subnet) + '/' + str(mask) new_pool.subpool = params['SubPool'] if new_pool.subpool: subnet, mask = parse_ip4_network(new_pool.subpool) new_pool.subpool = ip4_addr.formatter(subnet) + '/' + str(mask) new_pool.options = params['Options'] if new_pool.pool: l = Lock(('dockerplugin_ipam_request_pool', new_pool.pool), self.scheduler) await l.lock(self) else: l = None try: while True: fail = 0 rets = [] def _updater(keys, values, timestamp): reservepool = values[0] reserve_new_pool = set_new(values[1], new_pool) remove_keys = self._remove_staled_pools( reservepool, timestamp) used_cidrs = set( cidr for _, (cidr, _) in reservepool.reserved_pools.items()) if not reserve_new_pool.pool: # pool is not specified for _ in range(0, self.cidrrange_end): reservepool.nextalloc += 1 if reservepool.nextalloc >= self.cidrrange_end: reservepool.nextalloc = 0 new_subnet = self.cidrrange_subnet | ( reservepool.nextalloc << 8) new_cidr = ip4_addr.formatter(new_subnet) + '/24' if new_cidr not in used_cidrs: break reserve_new_pool.pool = new_cidr reserve_new_pool.subpool = '' rets[:] = [reserve_new_pool.pool] if reserve_new_pool.pool in used_cidrs: # We must wait until this CIDR is released raise IPAMUsingException reservepool.reserved_pools[reserve_new_pool.id] = \ [reserve_new_pool.pool, timestamp + self.pooltimeout * 1000000] marker = IPAMReserveMarker.create_instance( reserve_new_pool.pool) if marker.getkey() in remove_keys: remove_keys.remove(marker.getkey()) return (tuple(keys[0:2]) + tuple(remove_keys), (reservepool, reserve_new_pool) + (None, ) * len(remove_keys)) else: return (tuple(keys[0:2]) + (marker.getkey(), ) + tuple(remove_keys), (reservepool, reserve_new_pool, marker) + (None, ) * len(remove_keys)) try: await call_api( self, 'objectdb', 'transact', { 'keys': (IPAMPoolReserve.default_key(), new_pool.getkey()), 'updater': _updater, 'withtime': True }) except IPAMUsingException: # Wait for the CIDR to be released self._reqid += 1 fail += 1 reqid = ('dockerplugin_ipam', self._reqid) marker_key = IPAMReserveMarker.default_key(rets[0]) with request_context(reqid, self): retvalue = await call_api(self, 'objectdb', 'get', { 'key': marker_key, 'requestid': reqid, 'nostale': True }) if retvalue is not None and not retvalue.isdeleted(): await self.execute_with_timeout( self.pooltimeout, retvalue.waitif(self, lambda x: x.isdeleted())) else: env.outputjson({ 'PoolID': new_pool.id, 'Pool': rets[0], 'Data': {} }) break finally: if l is not None: l.unlock()