コード例 #1
0
 def _getkeys(self,keys):
     self._reqid += 1
     reqid = ('virtualrouter',self._reqid)
     for m in callAPI(self.app_routine,'objectdb','mget',{'keys':keys,'requestid':reqid}):
         yield m
     with watch_context(keys,self.app_routine.retvalue,reqid,self.app_routine):
         pass
コード例 #2
0
ファイル: testobjectdb.py プロジェクト: vtanrun/vlcp
 def updateinner():
     self._reqid += 1
     reqid = ('testobjectdb', self._reqid)
     for m in callAPI(self.apiroutine, 'objectdb', 'get', {'key': key, 'requestid': reqid}):
         yield m
     portobj = self.apiroutine.retvalue
     with watch_context([key], [portobj], reqid, self.apiroutine):
         if portobj is not None:
             @updater
             def write_status(portobj):
                 if portobj is None:
                     raise ValueError('Already deleted')
                 if not hasattr(portobj, 'owner'):
                     portobj.owner = self._ownerid
                     portobj.status = 'READY'
                     return [portobj]
                 else:
                     raise ValueError('Already managed')
             try:
                 for m in callAPI(self.apiroutine, 'objectdb', 'transact', {'keys': [portobj.getkey()], 'updater': write_status}):
                     yield m
             except ValueError:
                 pass
             else:
                 for m in portobj.waitif(self.apiroutine, lambda x: x.isdeleted() or hasattr(x, 'owner')):
                     yield m
                 self._logger.info('Port managed: %r', dump(portobj))
                 while True:
                     for m in portobj.waitif(self.apiroutine, lambda x: True, True):
                         yield m
                     if portobj.isdeleted():
                         self._logger.info('Port deleted: %r', dump(portobj))
                         break
                     else:
                         self._logger.info('Port updated: %r', dump(portobj))
コード例 #3
0
ファイル: testobjectdb.py プロジェクト: vtanrun/vlcp
 def _dumpkeys(self, keys):
     self._reqid += 1
     reqid = ('testobjectdb', self._reqid)
     for m in callAPI(self.apiroutine, 'objectdb', 'mget', {'keys': keys, 'requestid': reqid}):
         yield m
     retobjs = self.apiroutine.retvalue
     with watch_context(keys, retobjs, reqid, self.apiroutine):
         self.apiroutine.retvalue = [dump(v) for v in retobjs]
コード例 #4
0
    def listrouterinterfaces(self, id, **kwargs):
        """
        Query router ports from a virtual router
        
        :param id: virtual router ID
        
        :param \*\*kwargs: customized filters on router interfaces
        
        :return: a list of dictionaries each stands for a matched router interface
        """
        if not id:
            raise ValueError(" must special router id")

        routerkey = VRouter.default_key(id)

        self._reqid = +1
        reqid = ("virtualrouter", self._reqid)

        def set_walker(key, interfaces, walk, save):

            for weakobj in interfaces.dataset():
                vpkey = weakobj.getkey()

                try:
                    virtualport = walk(vpkey)
                except KeyError:
                    pass
                else:
                    if all(
                            getattr(virtualport, k, None) == v
                            for k, v in kwargs.items()):
                        save(vpkey)

        def walk_func(filter_func):
            def walker(key, obj, walk, save):
                if obj is None:
                    return

                set_walker(key, filter_func(obj), walk, save)

            return walker

        for m in callAPI(
                self.app_routine, "objectdb", "walk", {
                    "keys": [routerkey],
                    "walkerdict": {
                        routerkey: walk_func(lambda x: x.interfaces)
                    },
                    "requestid": reqid
                }):
            yield m

        keys, values = self.app_routine.retvalue

        with watch_context(keys, values, reqid, self.app_routine):
            self.app_routine.retvalue = [dump(r) for r in values]
コード例 #5
0
    def listvirtualrouters(self,id=None,**kwargs):
        "list virtual routers info"
        if id:
            routerkey = [VRouter.default_key()]

            for m in self._getkeys([routerkey]):
                yield m

            retobj = self.app_routine.retvalue

            if all(getattr(retobj,k,None) == v for k,v in kwargs.items()):
                self.app_routine.retvalue = dump(retobj)
            else:
                self.app_routine.retvalue = []
        else:
            # we get all router from vrouterset index
            routersetkey = VRouterSet.default_key()

            self._reqid = +1
            reqid = ("virtualrouter",self._reqid)

            def set_walker(key,set,walk,save):

                for weakobj in set.dataset():
                    routerkey = weakobj.getkey()

                    try:
                        router = walk(routerkey)
                    except KeyError:
                        pass
                    else:
                        if all(getattr(router,k,None) == v for k,v in kwargs.items()):
                            save(routerkey)

            def walker_func(set_func):
                def walker(key,obj,walk,save):
                    
                    if obj is None:
                        return
                    
                    set_walker(key,set_func(obj),walk,save)

                return walker

            for m in callAPI(self.app_routine,"objectdb","walk",
                             {"keys":[routersetkey],"walkerdict":{routersetkey:walker_func(lambda x:x.set)},
                              "requestid":reqid}):
                yield m

            keys ,values = self.app_routine.retvalue

            with watch_context(keys,values,reqid,self.app_routine):
                self.app_routine.retvalue = [dump(r) for r in values]
コード例 #6
0
ファイル: testobjectdb.py プロジェクト: vtanrun/vlcp
 def _waitforchange(self, key):
     for m in callAPI(self.apiroutine, 'objectdb', 'watch', {'key': key, 'requestid': 'testobjectdb'}):
         yield m
     setobj = self.apiroutine.retvalue
     with watch_context([key], [setobj], 'testobjectdb', self.apiroutine):
         for m in setobj.wait(self.apiroutine):
             yield m
         oldset = set()
         while True:
             for weakref in setobj.set.dataset().difference(oldset):
                 self.apiroutine.subroutine(self._updateport(weakref.getkey()))
             oldset = set(setobj.set.dataset())
             for m in setobj.waitif(self.apiroutine, lambda x: not x.isdeleted(), True):
                 yield m
コード例 #7
0
    def ipam_requestaddress(self, env, params):
        poolid = params['PoolID']
        address = params['Address']
        if address:
            address = ip4_addr.formatter(ip4_addr(address))
        reserve_key = IPAMReserve.default_key(poolid)
        rets = []

        def _updater(keys, values, timestamp):
            pool = values[0]
            if pool is None:
                raise ValueError('PoolID %r does not exist' % (poolid, ))
            self._remove_staled_ips(pool, timestamp)
            if len(values) > 1:
                subnetmap = values[1]
                if not hasattr(
                        pool,
                        'subnetmap') or pool.subnetmap.getkey() != keys[1]:
                    raise RetryUpdateException
                subnet = values[2]
            else:
                subnetmap = None
                if hasattr(pool, 'subnetmap'):
                    raise RetryUpdateException
            if address:
                # check ip_address in cidr
                if address in pool.reserved_ips:
                    raise ValueError("IP address " + address +
                                     " has been reserved")
                if pool.subpool:
                    cidr = pool.subpool
                else:
                    cidr = pool.pool
                network, mask = parse_ip4_network(cidr)
                addr_num = ip4_addr(address)
                if not ip_in_network(addr_num, network, mask):
                    raise ValueError('IP address ' + address +
                                     " is not in the network CIDR")
                if subnetmap is not None:
                    start = ip4_addr(subnet.allocated_start)
                    end = ip4_addr(subnet.allocated_end)
                    try:
                        assert start <= addr_num <= end
                        if hasattr(subnet, 'gateway'):
                            assert addr_num != ip4_addr(subnet.gateway)
                    except Exception:
                        raise ValueError("specified IP address " + address +
                                         " is not valid")

                    if str(addr_num) in subnetmap.allocated_ips:
                        raise ValueError("IP address " + address +
                                         " has been used")
                new_address = address
            else:
                # allocated ip_address from cidr
                gateway = None
                cidr = pool.pool
                if pool.subpool:
                    cidr = pool.subpool
                network, prefix = parse_ip4_network(cidr)
                start = network_first(network, prefix)
                end = network_last(network, prefix)
                if subnetmap is not None:
                    start = max(start, ip4_addr(subnet.allocated_start))
                    end = min(end, ip4_addr(subnet.allocated_end))
                    if hasattr(subnet, "gateway"):
                        gateway = ip4_addr(subnet.gateway)
                for ip_address in range(start, end):
                    new_address = ip4_addr.formatter(ip_address)
                    if ip_address != gateway and \
                        (subnetmap is None or str(ip_address) not in subnetmap.allocated_ips) and \
                        new_address not in pool.reserved_ips:
                        break
                else:
                    raise ValueError("No available IP address can be used")
            pool.reserved_ips[
                new_address] = timestamp + self.iptimeout * 1000000
            _, mask = parse_ip4_network(pool.pool)
            rets[:] = [new_address + '/' + str(mask)]
            return ((keys[0], ), (pool, ))

        while True:
            # First get the current data, to determine that whether the pool has already
            # been connected to a network
            self._reqid += 1
            reqid = ('dockerplugin_ipam', self._reqid)
            for m in callAPI(self, 'objectdb', 'get', {
                    'key': reserve_key,
                    'requestid': reqid,
                    'nostale': True
            }):
                yield m
            reserve_pool = self.retvalue
            with watch_context([reserve_key], [reserve_pool], reqid, self):
                if reserve_pool is None or reserve_pool.isdeleted():
                    raise ValueError('PoolID %r does not exist' % (poolid, ))
                if hasattr(reserve_pool, 'subnetmap'):
                    # Retrieve both the reserve pool and the subnet map
                    keys = (reserve_key, reserve_pool.subnetmap.getkey(),
                            SubNet.default_key(reserve_pool.subnetmap.id))
                else:
                    keys = (reserve_key, )
                try:
                    for m in callAPI(self, 'objectdb', 'transact', {
                            'keys': keys,
                            'updater': _updater,
                            'withtime': True
                    }):
                        yield m
                except RetryUpdateException:
                    continue
                else:
                    env.outputjson({'Address': rets[0], 'Data': {}})
                    break
コード例 #8
0
    def ipam_requestpool(self, env, params):
        if params['AddressSpace'] != _GLOBAL_SPACE:
            raise ValueError(
                'Unsupported address space: must use this IPAM driver together with network driver'
            )
        if params['V6']:
            raise ValueError('IPv6 is not supported')
        new_pool = IPAMReserve.create_instance(uuid1().hex)
        new_pool.pool = params['Pool']
        if new_pool.pool:
            subnet, mask = parse_ip4_network(new_pool.pool)
            new_pool.pool = ip4_addr.formatter(subnet) + '/' + str(mask)
        new_pool.subpool = params['SubPool']
        if new_pool.subpool:
            subnet, mask = parse_ip4_network(new_pool.subpool)
            new_pool.subpool = ip4_addr.formatter(subnet) + '/' + str(mask)
        new_pool.options = params['Options']
        if new_pool.pool:
            l = Lock(('dockerplugin_ipam_request_pool', new_pool.pool),
                     self.scheduler)
            for m in l.lock(self):
                yield m
        else:
            l = None
        try:
            while True:
                fail = 0
                rets = []

                def _updater(keys, values, timestamp):
                    reservepool = values[0]
                    reserve_new_pool = set_new(values[1], new_pool)
                    remove_keys = self._remove_staled_pools(
                        reservepool, timestamp)
                    used_cidrs = set(
                        cidr
                        for _, (cidr, _) in reservepool.reserved_pools.items())
                    if not reserve_new_pool.pool:
                        # pool is not specified
                        for _ in range(0, self.cidrrange_end):
                            reservepool.nextalloc += 1
                            if reservepool.nextalloc >= self.cidrrange_end:
                                reservepool.nextalloc = 0
                            new_subnet = self.cidrrange_subnet | (
                                reservepool.nextalloc << 8)
                            new_cidr = ip4_addr.formatter(new_subnet) + '/24'
                            if new_cidr not in used_cidrs:
                                break
                        reserve_new_pool.pool = new_cidr
                        reserve_new_pool.subpool = ''
                    rets[:] = [reserve_new_pool.pool]
                    if reserve_new_pool.pool in used_cidrs:
                        # We must wait until this CIDR is released
                        raise IPAMUsingException
                    reservepool.reserved_pools[reserve_new_pool.id] = \
                                                       [reserve_new_pool.pool,
                                                       timestamp + self.pooltimeout * 1000000]
                    marker = IPAMReserveMarker.create_instance(
                        reserve_new_pool.pool)
                    if marker.getkey() in remove_keys:
                        remove_keys.remove(marker.getkey())
                        return (tuple(keys[0:2]) + tuple(remove_keys),
                                (reservepool, reserve_new_pool) +
                                (None, ) * len(remove_keys))
                    else:
                        return (tuple(keys[0:2]) + (marker.getkey(), ) +
                                tuple(remove_keys),
                                (reservepool, reserve_new_pool, marker) +
                                (None, ) * len(remove_keys))

                try:
                    for m in callAPI(
                            self, 'objectdb', 'transact',
                        {
                            'keys':
                            (IPAMPoolReserve.default_key(), new_pool.getkey()),
                            'updater':
                            _updater,
                            'withtime':
                            True
                        }):
                        yield m
                except IPAMUsingException:
                    # Wait for the CIDR to be released
                    self._reqid += 1
                    fail += 1
                    reqid = ('dockerplugin_ipam', self._reqid)
                    marker_key = IPAMReserveMarker.default_key(rets[0])
                    for m in callAPI(self, 'objectdb', 'get', {
                            'key': marker_key,
                            'requestid': reqid,
                            'nostale': True
                    }):
                        yield m
                    retvalue = self.retvalue
                    with watch_context([marker_key], [retvalue], reqid, self):
                        if retvalue is not None and not retvalue.isdeleted():
                            for m in self.executeWithTimeout(
                                    self.pooltimeout,
                                    retvalue.waitif(self,
                                                    lambda x: x.isdeleted())):
                                yield m
                else:
                    env.outputjson({
                        'PoolID': new_pool.id,
                        'Pool': rets[0],
                        'Data': {}
                    })
                    break
        finally:
            if l is not None:
                l.unlock()
コード例 #9
0
ファイル: testobjectdb.py プロジェクト: vtanrun/vlcp
 def getlogicalnetworks(self, id = None, physicalnetwork = None, **kwargs):
     def set_walker(key, set, walk, save):
         if set is None:
             return
         for o in set.dataset():
             key = o.getkey()
             try:
                 net = walk(key)
             except KeyError:
                 pass
             else:
                 for k,v in kwargs.items():
                     if getattr(net, k, None) != v:
                         break
                 else:
                     save(key)
     def walker_func(set_func):
         def walker(key, obj, walk, save):
             if obj is None:
                 return
             set_walker(key, set_func(obj), walk, save)
         return walker
     if id is not None:
         self._reqid += 1
         reqid = ('testobjectdb', self._reqid)
         for m in callAPI(self.apiroutine, 'objectdb', 'get', {'key' : LogicalNetwork.default_key(id), 'requestid': reqid}):
             yield m
         result = self.apiroutine.retvalue
         with watch_context([LogicalNetwork.default_key(id)], [result], reqid, self.apiroutine):
             if result is None:
                 self.apiroutine.retvalue = []
                 return
             if physicalnetwork is not None and physicalnetwork != result.physicalnetwork.id:
                 self.apiroutine.retvalue = []
                 return
             for k,v in kwargs.items():
                 if getattr(result, k, None) != v:
                     self.apiroutine.retvalue = []
                     return
             self.apiroutine.retvalue = [dump(result)]
     elif physicalnetwork is not None:
         self._reqid += 1
         reqid = ('testobjectdb', self._reqid)
         pm_key = PhysicalNetworkMap.default_key(physicalnetwork)
         for m in callAPI(self.apiroutine, 'objectdb', 'walk', {'keys': [pm_key],
                                                                'walkerdict': {pm_key: walker_func(lambda x: x.networks)},
                                                                'requestid': reqid}):
             yield m
         keys, result = self.apiroutine.retvalue
         with watch_context(keys, result, reqid, self.apiroutine):
             self.apiroutine.retvalue = [dump(r) for r in result]
     else:
         self._reqid += 1
         reqid = ('testobjectdb', self._reqid)
         ns_key = LogicalNetworkSet.default_key()
         for m in callAPI(self.apiroutine, 'objectdb', 'walk', {'keys': [ns_key],
                                                                'walkerdict': {ns_key: walker_func(lambda x: x.set)},
                                                                'requestid': reqid}):
             yield m
         keys, result = self.apiroutine.retvalue
         with watch_context(keys, result, reqid, self.apiroutine):
             self.apiroutine.retvalue = [dump(r) for r in result]