Example #1
0
 def resync(self, datapathid, vhost=''):
     '''
     Resync with current ports
     '''
     # Sometimes when the OVSDB connection is very busy, monitor message may be dropped.
     # We must deal with this and recover from it
     # Save current manged_ports
     if (vhost, datapathid) not in self.managed_ports:
         self.apiroutine.retvalue = None
         return
     else:
         for m in callAPI(self.apiroutine, 'ovsdbmanager', 'getconnection',
                          {
                              'datapathid': datapathid,
                              'vhost': vhost
                          }):
             yield m
         c = self.apiroutine.retvalue
         if c is not None:
             # For now, we restart the connection...
             for m in c.reconnect(False):
                 yield m
             for m in self.apiroutine.waitWithTimeout(0.1):
                 yield m
             for m in callAPI(self.apiroutine, 'ovsdbmanager',
                              'waitconnection', {
                                  'datapathid': datapathid,
                                  'vhost': vhost
                              }):
                 yield m
     self.apiroutine.retvalue = None
Example #2
0
 def updateinner():
     self._reqid += 1
     reqid = ('testobjectdb', self._reqid)
     for m in callAPI(self.apiroutine, 'objectdb', 'get', {'key': key, 'requestid': reqid}):
         yield m
     portobj = self.apiroutine.retvalue
     with watch_context([key], [portobj], reqid, self.apiroutine):
         if portobj is not None:
             @updater
             def write_status(portobj):
                 if portobj is None:
                     raise ValueError('Already deleted')
                 if not hasattr(portobj, 'owner'):
                     portobj.owner = self._ownerid
                     portobj.status = 'READY'
                     return [portobj]
                 else:
                     raise ValueError('Already managed')
             try:
                 for m in callAPI(self.apiroutine, 'objectdb', 'transact', {'keys': [portobj.getkey()], 'updater': write_status}):
                     yield m
             except ValueError:
                 pass
             else:
                 for m in portobj.waitif(self.apiroutine, lambda x: x.isdeleted() or hasattr(x, 'owner')):
                     yield m
                 self._logger.info('Port managed: %r', dump(portobj))
                 while True:
                     for m in portobj.waitif(self.apiroutine, lambda x: True, True):
                         yield m
                     if portobj.isdeleted():
                         self._logger.info('Port deleted: %r', dump(portobj))
                         break
                     else:
                         self._logger.info('Port updated: %r', dump(portobj))
Example #3
0
 def check_viperflow():
     first_vp_ports = {}
     for nid in network_ports:
         for m in callAPI(
                 self.apiroutine, 'viperflow', 'listlogicalports',
             {'logicalnetwork': 'docker-' + nid + '-lognet'}):
             yield m
         first_vp_ports[nid] = dict(
             (p['id'], p.get('ip_address'))
             for p in self.apiroutine.retvalue
             if p['id'].startswith('docker-'))
     print("Find %d endpoints from viperflow database, recheck in 5 seconds..." % \
             (sum(len(ports) for ports in first_vp_ports.values()),))
     for m in self.apiroutine.waitWithTimeout(5):
         yield m
     second_vp_ports = {}
     for nid in network_ports:
         for m in callAPI(
                 self.apiroutine, 'viperflow', 'listlogicalports',
             {'logicalnetwork': 'docker-' + nid + '-lognet'}):
             yield m
         second_vp_ports[nid] = dict(
             (p['id'], p.get('ip_address'))
             for p in self.apiroutine.retvalue
             if p['id'] in first_vp_ports[nid])
     print("Find %d endpoints from viperflow database from the intersection of two tries" % \
             (sum(len(ports) for ports in second_vp_ports.values()),))
     second_vp_ports = dict((nid,
                             dict((pid[len('docker-'):], addr)
                                  for pid, addr in v.items()))
                            for nid, v in second_vp_ports.items())
     self.apiroutine.retvalue = second_vp_ports
Example #4
0
 def deletenetwork(self, env, params):
     for m in callAPI(self, 'viperflow', 'deletesubnet',
                      {'id': 'docker-' + params['NetworkID'] + '-subnet'}):
         yield m
     for m in callAPI(self, 'viperflow', 'deletelogicalnetwork',
                      {'id': 'docker-' + params['NetworkID'] + '-lognet'}):
         yield m
Example #5
0
    def endpoint_join(self, env, params):
        logport_id = 'docker-' + params['EndpointID']
        for m in self.executeAll([
                callAPI(self, 'viperflow', 'listlogicalports',
                        {'id': logport_id}),
                callAPI(self, 'dockerplugin', 'getdockerinfo',
                        {'portid': logport_id})
        ]):
            yield m
        ((logport_results, ), (dockerinfo_results, )) = self.retvalue
        if not logport_results:
            raise KeyError(repr(params['EndpointID']) + ' not found')
        logport_result = logport_results[0]
        if dockerinfo_results:
            docker_port = dockerinfo_results[0]['docker_port']
        else:
            docker_port = logport_result['docker_port']
        result = {
            'InterfaceName': {
                'SrcName': docker_port,
                'DstPrefix': self._parent.dstprefix
            }
        }
        if 'subnet' in logport_result:
            subnet = logport_result['subnet']
            if 'gateway' in subnet:
                result['Gateway'] = subnet['gateway']
            if 'host_routes' in subnet:
                try:

                    def generate_route(r):
                        r_g = {'Destination': r[0]}
                        if ip4_addr(r[1]) == 0:
                            r_g['RouteType'] = 1
                        else:
                            r_g['RouteType'] = 0
                            r_g['NextHop'] = r[1]

                    result['StaticRoutes'] = [
                        generate_route(r) for r in subnet['host_routes']
                    ]
                except Exception:
                    self._logger.warning('Generate static routes failed',
                                         exc_info=True)
        sandboxkey = params['SandboxKey']

        @updater
        def _updater(dockerinfo):
            if dockerinfo is None:
                return ()
            else:
                dockerinfo.docker_sandbox = sandboxkey
                return (dockerinfo, )

        for m in callAPI(self, 'objectdb', 'transact', {
                'keys': [DockerInfo.default_key(logport_id)],
                'updater': _updater
        }):
            yield m
        env.outputjson(result)
Example #6
0
    def delete_endpoint(self, env, params):
        logport_id = 'docker-' + params['EndpointID']
        for m in callAPI(self, 'dockerplugin', 'getdockerinfo',
                         {'portid': logport_id}):
            yield m
        if not self.retvalue:
            raise KeyError(repr(params['EndpointID']) + ' not found')
        dockerinfo_result = self.retvalue[0]
        docker_port = dockerinfo_result['docker_port']

        def _unplug_port(ovs_command=self._parent.ovscommand,
                         bridge_name=self._parent.ovsbridge,
                         device_name=docker_port,
                         ip_command=self._parent.ipcommand):
            try:
                _unplug_ovs(ovs_command, bridge_name, device_name)
            except Exception:
                self._logger.warning('Remove veth from OpenvSwitch failed',
                                     exc_info=True)
            try:
                _delete_veth(ip_command, device_name)
            except Exception:
                self._logger.warning('Delete veth failed', exc_info=True)

        for m in self._parent.taskpool.runTask(self, _unplug_port):
            yield m
        for m in callAPI(self, 'viperflow', 'deletelogicalport',
                         {'id': logport_id}):
            yield m
        env.outputjson({})
Example #7
0
 def main(self):
     try:
         lastkeys = set()
         dataupdate = FlowUpdaterNotification.createMatcher(self, FlowUpdaterNotification.DATAUPDATED)
         startwalk = FlowUpdaterNotification.createMatcher(self, FlowUpdaterNotification.STARTWALK)
         self.subroutine(self._flowupdater(), False, '_flowupdateroutine')
         presave_update = set()
         while True:
             self._restartwalk = False
             presave_update.clear()
             presave_update.update(self._updatedset)
             self._updatedset.clear()
             _initialkeys = set(self._initialkeys)
             for m in callAPI(self, 'objectdb', 'walk', {'keys': self._initialkeys, 'walkerdict': self._walkerdict,
                                                         'requestid': self._requstid}):
                 yield m
             if self._updatedset:
                 if any(v.getkey() in _initialkeys for v in self._updatedset):
                     continue
             lastkeys = set(self._savedkeys)
             self._savedkeys, self._savedresult = self.retvalue
             removekeys = tuple(lastkeys.difference(self._savedkeys))
             self.reset_initialkeys(self._savedkeys, self._savedresult)
             _initialkeys = set(self._initialkeys)
             if self._dataupdateroutine:
                 self.terminate(self._dataupdateroutine)
             self.subroutine(self._dataobject_update_detect(), False, "_dataupdateroutine")
             self._updatedset.update(v for v in presave_update)
             if removekeys:
                 for m in callAPI(self, 'objectdb', 'munwatch', {'keys': removekeys,
                                                                 'requestid': self._requstid}):
                     yield m
             for m in self.walkcomplete(self._savedkeys, self._savedresult):
                 yield m
             self._updatedset2.update(self._updatedset)
             self._updatedset.clear()
             for m in self.waitForSend(FlowUpdaterNotification(self, FlowUpdaterNotification.FLOWUPDATE)):
                 yield m
             while not self._restartwalk:
                 if self._updatedset:
                     if any(v.getkey() in _initialkeys for v in self._updatedset):
                         break
                     else:
                         self._updatedset2.update(self._updatedset)
                         self._updatedset.clear()
                         self.scheduler.emergesend(FlowUpdaterNotification(self, FlowUpdaterNotification.FLOWUPDATE))
                 yield (dataupdate, startwalk)
     finally:
         self.subroutine(callAPI(self, 'objectdb', 'munwatch', {'keys': self._savedkeys,
                                                                'requestid': self._requstid}))
         if self._flowupdateroutine:
             self.terminate(self._flowupdateroutine)
             self._flowupdateroutine = None
         if self._dataupdateroutine:
             self.terminate(self._dataupdateroutine)
             self._dataupdateroutine = None
Example #8
0
 def createnetwork(self, env, params):
     lognet_id = 'docker-' + params['NetworkID'] + '-lognet'
     network_params = {'id': lognet_id}
     if 'Options' in params and 'com.docker.network.generic' in params[
             'Options']:
         for k, v in params['Options']['com.docker.network.generic'].items(
         ):
             if k.startswith('subnet:'):
                 pass
             elif k in ('mtu', 'vlanid', 'vni'):
                 network_params[k] = int(v)
             elif v[:1] == '`' and v[-1:] == '`':
                 try:
                     network_params[k] = ast.literal_eval(v[1:-1])
                 except Exception:
                     network_params[k] = v
             else:
                 network_params[k] = v
     for m in callAPI(self, 'viperflow', 'createlogicalnetwork',
                      network_params):
         yield m
     subnet_params = {
         'logicalnetwork': lognet_id,
         'cidr': params['IPv4Data'][0]['Pool'],
         'id': 'docker-' + params['NetworkID'] + '-subnet'
     }
     if params['IPv4Data'] and 'Gateway' in params['IPv4Data'][0]:
         gateway, _, _ = params['IPv4Data'][0]['Gateway'].rpartition('/')
         subnet_params['gateway'] = gateway
     if 'Options' in params and 'com.docker.network.generic' in params[
             'Options']:
         for k, v in params['Options']['com.docker.network.generic'].items(
         ):
             if k.startswith('subnet:'):
                 subnet_key = k[len('subnet:'):]
                 if v[:1] == '`' and v[-1:] == '`':
                     try:
                         subnet_params[subnet_key] = ast.literal_eval(
                             v[1:-1])
                     except Exception:
                         subnet_params[subnet_key] = v
                 else:
                     subnet_params[subnet_key] = v
     try:
         for m in callAPI(self, 'viperflow', 'createsubnet', subnet_params):
             yield m
     except Exception as exc:
         try:
             for m in callAPI(self, 'viperflow', 'deletelogicalnetwork',
                              {'id': lognet_id}):
                 yield m
         except Exception:
             pass
         raise exc
     env.outputjson({})
Example #9
0
 def move_key(key):
     try:
         for m in callAPI(self.apiroutine, src_service, "mget", {"keys": (key,), "vhost": src_vhost}):
             yield m
     except ValueError:
         print("Key %r is not valid, it cannot be loaded. Ignore this key." % (key,))
     else:
         for m in callAPI(
             self.apiroutine,
             dst_service,
             "mset",
             {"kvpairs": ((key, self.apiroutine.retvalue[0]),), "vhost": dst_vhost},
         ):
             yield m
Example #10
0
 def apiHandler(self, env, targetname, methodname, **kwargs):
     params = kwargs
     parent = self.parent
     if not params:
         if b'content-type' in env.headerdict and env.inputstream is not None and parent.acceptjson:
             m = Message()
             m['content-type'] = _str(env.headerdict[b'content-type'])
             if m.get_content_type() == 'application/json':
                 charset = m.get_content_charset('utf-8')
                 for m in env.inputstream.read(self):
                     yield m
                 params = json.loads(_str(self.data, charset),
                                     charset,
                                     object_hook=decode_object)
     elif parent.typeextension:
         for k in params.keys():
             v = params[k]
             if v[:1] == '`' and v[-1:] == '`':
                 try:
                     params[k] = ast.literal_eval(v[1:-1])
                 except:
                     pass
     if parent.allowtargets is not None:
         if targetname not in parent.allowtargets:
             for m in env.error(403):
                 yield m
             return
     elif parent.denytargets is not None:
         if targetname in parent.denytargets:
             for m in env.error(403):
                 yield m
             return
     if parent.authmethod:
         for m in callAPI(
                 self, parent.authtarget, parent.authmethod, {
                     'env': env,
                     'targetname': targetname,
                     'name': methodname,
                     'params': params
                 }):
             yield m
     for m in callAPI(self, targetname, methodname, params):
         yield m
     env.header('Content-Type', 'application/json')
     env.outputdata(
         json.dumps({
             'result': self.retvalue
         },
                    default=self.jsonencoder.jsonencoder).encode('ascii'))
Example #11
0
 def createlogicalports(self, ports):
     new_ports = [self._createlogicalport(**p) for p in ports]
     logical_networks = list(set([p.logicalnetwork.getkey() for p in new_ports]))
     logical_maps = [LogicalNetworkMap.default_key(*LogicalNetwork._getIndices(k)[1]) for k in logical_networks]
     @updater
     def create_ports(portset, *objs):
         old_ports = objs[:len(new_ports)]
         logmaps = list(objs[len(new_ports):len(new_ports) + len(logical_networks)])
         lognets = list(objs[len(new_ports) + len(logical_networks):])
         logdict = dict(zip(logical_networks, zip(lognets, logmaps)))
         return_ports = [None] * len(new_ports)
         for i in range(0, len(new_ports)):
             return_ports[i] = set_new(old_ports[i], new_ports[i])
         for p in return_ports:
             lognet, logmap = logdict[p.logicalnetwork.getkey()]
             if lognet is None:
                 _, (logid,) = LogicalNetwork._getIndices(p.logicalnetwork.getkey())
                 raise ValueError('Logical network %r does not exist' % (logid,))
             logmap.ports.dataset().add(p.create_weakreference())
         portset.set.dataset().add(p.create_weakreference())
         return [portset] + return_ports + logmaps
     for m in callAPI(self.apiroutine, 'objectdb', 'transact', {'keys': [LogicalPortSet.default_key()] +\
                                                                         [p.getkey() for p in new_ports] +\
                                                                         logical_maps +\
                                                                         logical_networks,
                                                                'updater': create_ports}):
         yield m
     for m in self._dumpkeys([p.getkey() for p in new_ports]):
         yield m
Example #12
0
 def waitinner():
     ports = self.managed_ports.get((vhost, datapathid))
     if ports is None:
         for m in callAPI(self.apiroutine, 'openflowmanager',
                          'waitconnection', {
                              'datapathid': datapathid,
                              'vhost': vhost,
                              'timeout': timeout
                          }):
             yield m
         c = self.apiroutine.retvalue
         ports = self.managed_ports.get((vhost, datapathid))
         if ports is None:
             yield (OpenflowPortSynchronized.createMatcher(c), )
         ports = self.managed_ports.get((vhost, datapathid))
         if ports is None:
             raise ConnectionResetException(
                 'Datapath %016x is not connected' % datapathid)
     for p in ports.values():
         if p.name == name:
             self.apiroutine.retvalue = p
             return
     yield (OpenflowAsyncMessageEvent.createMatcher(
         of13.OFPT_PORT_STATUS,
         datapathid,
         0,
         _ismatch=lambda x: x.message.desc.name == name), )
     self.apiroutine.retvalue = self.apiroutine.event.message.desc
Example #13
0
 def _getkeys(self,keys):
     self._reqid += 1
     reqid = ('virtualrouter',self._reqid)
     for m in callAPI(self.app_routine,'objectdb','mget',{'keys':keys,'requestid':reqid}):
         yield m
     with watch_context(keys,self.app_routine.retvalue,reqid,self.app_routine):
         pass
Example #14
0
    def deletevirtualrouters(self,routers):
        "delete virtual routers"
        idset = set()
        for router in routers:
            if 'id' not in router:
                raise ValueError(" must special id")
            else:
                if router['id'] in idset:
                    raise ValueError(" id repeat " + router['id'])
                else:
                    idset.add(router['id'])

        routersetkey = [VRouterSet.default_key()]
        routerkeys = [VRouter.default_key(v['id']) for v in routers]

        def deletevrouter(keys,values):
            routerset = values[0]

            for i in range(0,len(routers)):
                if values[i+1].interfaces.dataset():
                    raise ValueError("there interface in router, delete it first")
                routerset.set.dataset().discard(values[i+1].create_weakreference())

            return keys,[routerset] + [None] * len(routers)
        try:
            for m in callAPI(self.app_routine,"objectdb","transact",
                             {"keys":routersetkey + routerkeys,"updater":deletevrouter}):
                yield m
        except:
            raise
        else:
            self.app_routine.retvalue = {"status":"OK"}
Example #15
0
 def publish(self, keys = (), extrainfo = None):
     keys = [_bytes(k) for k in keys]
     if self._publish_wait:
         merged_keys = list(self._publish_wait.union(keys))
         self._publish_wait.clear()
     else:
         merged_keys = list(keys)
     if not merged_keys:
         return
     for m in callAPI(self, 'redisdb', 'getclient', {'vhost':self.vhostbind}):
         yield m
     client, encoder, _ = self.retvalue
     transactid = '%s-%016x' % (self._publishkey, self._publishno)
     self._publishno += 1
     msg = encoder({'id':transactid, 'keys':[_str(k) for k in merged_keys], 'extrainfo': extrainfo})
     try:
         if len(merged_keys) > self._singlecastlimit:
             for m in client.execute_command(self, 'PUBLISH', self.prefix, msg):
                 yield m
         else:
             for m in client.batch_execute(self, *((('MULTI',),) +
                                                 tuple(('PUBLISH', self.prefix + k, msg) for k in merged_keys) +
                                                 (('EXEC',),))):
                 yield m
     except (IOError, ConnectionResetException):
         self._logger.warning('Following keys are not published because exception occurred, delay to next publish: %r', merged_keys, exc_info = True)
         self._publish_wait.update(merged_keys)
Example #16
0
 def get(self, sessionid, refresh=True):
     for m in callAPI(
             self.apiroutine, 'memorystorage', 'get', {
                 'key': __name__ + '.' + sessionid,
                 'timeout': self.timeout if refresh else None
             }):
         yield m
Example #17
0
    def ipam_releasepool(self, env, params):
        poolid = params['PoolID']

        def _updater(keys, values, timestamp):
            # There are two situations for Release Pool:
            # 1. The pool is already used to create a network, in this situation, the pool should be
            #    released with the network removal
            # 2. The pool has not been used for network creation, we should release it from the reservation
            reservepool = values[0]
            removed_keys = self._remove_staled_pools(reservepool, timestamp)
            if poolid in reservepool.reserved_pools:
                removed_keys.append(IPAMReserve.default_key(poolid))
                removed_keys.append(
                    IPAMReserveMarker.default_key(
                        reservepool.reserved_pools[poolid][0]))
                del reservepool.reserved_pools[poolid]
            return ((keys[0], ) + tuple(removed_keys),
                    (values[0], ) + (None, ) * len(removed_keys))

        for m in callAPI(
                self, 'objectdb', 'transact', {
                    'keys': (IPAMPoolReserve.default_key(), ),
                    'updater': _updater,
                    'withtime': True
                }):
            yield m
        env.outputjson({})
Example #18
0
    def load(self, container):

        initkeys = [
            VRouterSet.default_key(),
            DVRouterForwardSet.default_key(),
            DVRouterExternalAddressInfo.default_key()
        ]

        def init(keys, values):

            if values[0] is None:
                values[0] = VRouterSet()

            if values[1] is None:
                values[1] = DVRouterForwardSet()

            if values[2] is None:
                values[2] = DVRouterExternalAddressInfo()

            return keys, values

        for m in callAPI(container, "objectdb", "transact", {
                "keys": initkeys,
                "updater": init
        }):
            yield m

        for m in Module.load(self, container):
            yield m
Example #19
0
 def _acquiretable(self):
     try:
         if not self._tablerequest:
             return
         def update_table():
             self._all_tables = dict((v,r[0]) for v,r in self._tableacquire_routine.event.result.items())
             self._path_tables = dict((v, r[1]) for v,r in self._tableacquire_routine.event.result.items())
             self.table_acquired()
         for m in callAPI(self._tableacquire_routine, 'openflowmanager', 'acquiretable', {'modulename': self.getServiceName()}):
             yield m
         table_update = TableAcquireUpdate.createMatcher()
         try:
             while True:
                 yield (table_update,)
                 if hasattr(self._tableacquire_routine.event, 'exception'):
                     raise self._tableacquire_routine.event.exception
                 elif not self._tableacquire_routine.event.result:
                     continue
                 else:
                     update_table()
                     break
         except Exception as exc:
             for m in self.changestate(ModuleLoadStateChanged.FAILED, self._tableacquire_routine):
                 yield m
             raise exc
         else:
             for m in self.changestate(ModuleLoadStateChanged.SUCCEEDED, self._tableacquire_routine):
                 yield m
         while True:
             yield (table_update,)
             if self._tableacquire_routine.matcher is table_update:
                 if hasattr(self._tableacquire_routine.event, 'exception'):
                     # Ignore a failed table acquire
                     continue
                 elif not self._tableacquire_routine.event.result:
                     for m in callAPI(self._tableacquire_routine, 'openflowmanager', 'acquiretable', {'modulename': self.getServiceName()}):
                         yield m
                 else:
                     update_table()
     finally:
         def unacquire():
             try:
                 for m in callAPI(self._tableacquire_routine, 'openflowmanager', 'unacquiretable', {'modulename': self.getServiceName()}):
                     yield m
             except QuitException:
                 pass
         self._tableacquire_routine.subroutine(unacquire(), False)
Example #20
0
 def _dumpkeys(self, keys):
     self._reqid += 1
     reqid = ('testobjectdb', self._reqid)
     for m in callAPI(self.apiroutine, 'objectdb', 'mget', {'keys': keys, 'requestid': reqid}):
         yield m
     retobjs = self.apiroutine.retvalue
     with watch_context(keys, retobjs, reqid, self.apiroutine):
         self.apiroutine.retvalue = [dump(v) for v in retobjs]
Example #21
0
 def get(self, sessionid, refresh=True):
     for m in callAPI(
         self.apiroutine,
         "memorystorage",
         "get",
         {"key": __name__ + "." + sessionid, "timeout": self.timeout if refresh else None},
     ):
         yield m
Example #22
0
 def unacquire():
     try:
         for m in callAPI(self._tableacquire_routine,
                          'openflowmanager', 'unacquiretable',
                          {'modulename': self.getServiceName()}):
             yield m
     except QuitException:
         pass
Example #23
0
 def load(self, container):
     self.scheduler.queue.addSubQueue(\
             self.objectupdatepriority, dataobj.DataObjectUpdateEvent.createMatcher(), 'dataobjectupdate')
     for m in callAPI(container, 'updatenotifier', 'createnotifier'):
         yield m
     self._notifier = container.retvalue
     for m in Module.load(self, container):
         yield m
     self.routines.append(self._notifier)
Example #24
0
 def _get_existing_ports(self):
     for m in callAPI(self.apiroutine, 'openflowmanager', 'getallconnections', {'vhost':None}):
         yield m
     with closing(self.apiroutine.executeAll([self._get_ports(c, c.protocol, False, False) for c in self.apiroutine.retvalue if c.openflow_auxiliaryid == 0])) as g:
         for m in g:
             yield m
     self._synchronized = True
     for m in self.apiroutine.waitForSend(ModuleNotification(self.getServiceName(), 'synchronized')):
         yield m
Example #25
0
 def clearup():
     try:
         for m in callAPI(container, module, 'munwatch', {
                 'keys': keys,
                 'requestid': reqid
         }):
             yield m
     except QuitException:
         pass
Example #26
0
    def updatevirtualrouters(self, routers):
        "Update multiple virtual routers"
        idset = set()
        for router in routers:
            if 'id' not in router:
                raise ValueError(" must specify id")
            else:
                if router['id'] in idset:
                    raise ValueError(" id repeat " + router['id'])
                else:
                    idset.add(router['id'])

            # if routers updating ,, check format
            if 'routes' in router:
                for r in router['routes']:
                    ip_prefix = r[0]
                    nexthop = r[1]

                    if ip_prefix and nexthop:
                        ip_prefix = parse_ip4_network(ip_prefix)
                        nexthop = check_ip_address(nexthop)
                    else:
                        raise ValueError("routes format error " + r)

        routerkeys = [VRouter.default_key(r['id']) for r in routers]

        def updaterouter(keys, values):

            for i in range(0, len(routers)):
                if values[i]:
                    for k, v in routers[i].items():

                        if k == 'routes':
                            # update routers accord new static routes
                            values[i].routes = []
                            for pn in v:
                                values[i].routes.append(pn)
                        else:
                            setattr(values[i], k, v)
                else:
                    raise ValueError("router object not exists " +
                                     routers[i]['id'])

            return keys, values

        try:
            for m in callAPI(self.app_routine, 'objectdb', 'transact', {
                    'keys': routerkeys,
                    'updater': updaterouter
            }):
                yield m
        except:
            raise
        else:

            for m in self._dumpkeys(routerkeys):
                yield m
Example #27
0
 def load(self, container):
     self.scheduler.queue.addSubQueue(\
             self.objectupdatepriority, dataobj.DataObjectUpdateEvent.createMatcher(), 'dataobjectupdate')
     for m in callAPI(container, 'updatenotifier', 'createnotifier'):
         yield m
     self._notifier = container.retvalue
     for m in Module.load(self, container):
         yield m
     self.routines.append(self._notifier)
Example #28
0
 def _create_lognet():
     try:
         for m in callAPI(self, 'viperflow', 'createlogicalnetwork',
                          network_params):
             yield m
     except Exception as exc:
         self.retvalue = exc
     else:
         self.retvalue = None
Example #29
0
 def sessionstart(self):
     "Start session. Must start service.utils.session.Session to use this method"
     if not hasattr(self, 'session') or not self.session:
         for m in callAPI(self.container, 'session', 'start', {'cookies':self.rawcookie}):
             yield m
         self.session, setcookies = self.container.retvalue
         for nc in setcookies:
             self.sent_cookies = [c for c in self.sent_cookies if c.key != nc.key]
             self.sent_cookies.append(nc)
Example #30
0
    def listrouterinterfaces(self, id, **kwargs):
        """
        Query router ports from a virtual router
        
        :param id: virtual router ID
        
        :param \*\*kwargs: customized filters on router interfaces
        
        :return: a list of dictionaries each stands for a matched router interface
        """
        if not id:
            raise ValueError(" must special router id")

        routerkey = VRouter.default_key(id)

        self._reqid = +1
        reqid = ("virtualrouter", self._reqid)

        def set_walker(key, interfaces, walk, save):

            for weakobj in interfaces.dataset():
                vpkey = weakobj.getkey()

                try:
                    virtualport = walk(vpkey)
                except KeyError:
                    pass
                else:
                    if all(
                            getattr(virtualport, k, None) == v
                            for k, v in kwargs.items()):
                        save(vpkey)

        def walk_func(filter_func):
            def walker(key, obj, walk, save):
                if obj is None:
                    return

                set_walker(key, filter_func(obj), walk, save)

            return walker

        for m in callAPI(
                self.app_routine, "objectdb", "walk", {
                    "keys": [routerkey],
                    "walkerdict": {
                        routerkey: walk_func(lambda x: x.interfaces)
                    },
                    "requestid": reqid
                }):
            yield m

        keys, values = self.app_routine.retvalue

        with watch_context(keys, values, reqid, self.app_routine):
            self.app_routine.retvalue = [dump(r) for r in values]
Example #31
0
 def sessiondestroy(self):
     if hasattr(self, 'session') and self.session:
         for m in callAPI(self.container, 'session', 'destroy', {'sessionid':self.session.id}):
             yield m
         self.session.unlock()
         del self.session
         setcookies = self.container.retvalue
         for nc in setcookies:
             self.sent_cookies = [c for c in self.sent_cookies if c.key != nc.key]
             self.sent_cookies.append(nc)
Example #32
0
 def create(self):
     sid = uuid4().hex
     sobj = self.SessionObject(sid)
     for m in callAPI(self.apiroutine, 'memorystorage', 'set', {
             'key': __name__ + '.' + sid,
             'value': sobj,
             'timeout': self.timeout
     }):
         yield m
     self.apiroutine.retvalue = self.SessionHandle(sobj, self.apiroutine)
Example #33
0
 def destroy(self, sessionid):
     for m in callAPI(self.apiroutine, "memorystorage", "delete", {"key": __name__ + "." + sessionid}):
         yield m
     m = Morsel()
     m.key = self.cookiename
     m.value = "deleted"
     m.coded_value = "deleted"
     opts = {"path": "/", "httponly": True, "max-age": 0}
     m.update(opts)
     self.apiroutine.retvalue = [m]
Example #34
0
 def _manage_existing(self):
     for m in callAPI(self.apiroutine, "openflowserver", "getconnections", {}):
         yield m
     vb = self.vhostbind
     for c in self.apiroutine.retvalue:
         if vb is None or c.protocol.vhost in vb:
             self._add_connection(c)
     self._synchronized = True
     for m in self.apiroutine.waitForSend(ModuleNotification(self.getServiceName(), 'synchronized')):
         yield m
Example #35
0
 def destroy(self, sessionid):
     for m in callAPI(self.apiroutine, 'memorystorage', 'delete',
                      {'key': __name__ + '.' + sessionid}):
         yield m
     m = Morsel()
     m.key = self.cookiename
     m.value = 'deleted'
     m.coded_value = 'deleted'
     opts = {'path': '/', 'httponly': True, 'max-age': 0}
     m.update(opts)
     self.apiroutine.retvalue = [m]
Example #36
0
 def move_key(key):
     try:
         for m in callAPI(self.apiroutine, src_service, 'mget',
                          {
                              'keys': (key, ),
                              'vhost': src_vhost
                          }):
             yield m
     except ValueError:
         print(
             'Key %r is not valid, it cannot be loaded. Ignore this key.'
             % (key, ))
     else:
         for m in callAPI(
                 self.apiroutine, dst_service, 'mset', {
                     'kvpairs':
                     ((key, self.apiroutine.retvalue[0]), ),
                     'vhost': dst_vhost
                 }):
             yield m
Example #37
0
 def create(self):
     sid = uuid4().hex
     sobj = self.SessionObject(sid)
     for m in callAPI(
         self.apiroutine,
         "memorystorage",
         "set",
         {"key": __name__ + "." + sid, "value": sobj, "timeout": self.timeout},
     ):
         yield m
     self.apiroutine.retvalue = self.SessionHandle(sobj, self.apiroutine)
Example #38
0
 def _get_existing_ports(self):
     for m in callAPI(self.apiroutine, 'ovsdbmanager', 'getallconnections', {'vhost':None}):
         yield m
     matchers = []
     for c in self.apiroutine.retvalue:
         self.monitor_routines.add(self.apiroutine.subroutine(self._get_ports(c, c.protocol)))
         matchers.append(OVSDBConnectionPortsSynchronized.createMatcher(c))
     for m in self.apiroutine.waitForAll(*matchers):
         yield m
     self._synchronized = True
     for m in self.apiroutine.waitForSend(ModuleNotification(self.getServiceName(), 'synchronized')):
         yield m
Example #39
0
        def _cleanup_ipam(docker_ipam_poolid):
            @updater
            def _remove_reserve(pool):
                return (None, )

            for m in callAPI(
                    self, 'objectdb', 'transact', {
                        'keys':
                        (IPAMReserve.default_key(docker_ipam_poolid), ),
                        'updater': _remove_reserve
                    }):
                yield m
Example #40
0
 def _manage_existing(self):
     for m in callAPI(self.apiroutine, "openflowserver", "getconnections",
                      {}):
         yield m
     vb = self.vhostbind
     for c in self.apiroutine.retvalue:
         if vb is None or c.protocol.vhost in vb:
             self._add_connection(c)
     self._synchronized = True
     for m in self.apiroutine.waitForSend(
             ModuleNotification(self.getServiceName(), 'synchronized')):
         yield m
Example #41
0
 def get(self, sessionid, refresh = True):
     """
     Get the seesion object of the session id
     
     :param sessionid: a session ID
     
     :param refresh: if True, refresh the expiration time of this session
     
     :return: Session object or None if not exists
     """
     for m in callAPI(self.apiroutine, 'memorystorage', 'get', {'key': __name__ + '.' + sessionid, 'timeout': self.timeout if refresh else None}):
         yield m
Example #42
0
 def apiHandler(self, env, targetname, methodname, **kwargs):
     params = kwargs
     parent = self.parent
     if not params:
         if b'content-type' in env.headerdict and env.inputstream is not None and parent.acceptjson:
             m = Message()
             m['content-type'] = _str(env.headerdict[b'content-type'])
             if m.get_content_type() == 'application/json':
                 charset = m.get_content_charset('utf-8')
                 for m in env.inputstream.read(self):
                     yield m
                 params = json.loads(_str(self.data, charset), charset, object_hook=decode_object)
     elif parent.typeextension:
         for k in params.keys():
             v = params[k]
             if v[:1] == '`' and v[-1:] == '`':
                 try:
                     params[k] = ast.literal_eval(v[1:-1])
                 except:
                     pass
     if parent.allowtargets is not None:
         if targetname not in parent.allowtargets:
             for m in env.error(403):
                 yield m
             return
     elif parent.denytargets is not None:
         if targetname in parent.denytargets:
             for m in env.error(403):
                 yield m
             return
     if parent.authmethod:
         for m in callAPI(self, parent.authtarget, parent.authmethod,
                          {'env':env, 'targetname':targetname, 'name':methodname, 'params': params}):
             yield m
     for m in callAPI(self, targetname, methodname, params):
         yield m
     env.header('Content-Type', 'application/json')
     env.outputdata(json.dumps({'result':self.retvalue}, default=self.jsonencoder.jsonencoder).encode('ascii'))
Example #43
0
 def _waitforchange(self, key):
     for m in callAPI(self.apiroutine, 'objectdb', 'watch', {'key': key, 'requestid': 'testobjectdb'}):
         yield m
     setobj = self.apiroutine.retvalue
     with watch_context([key], [setobj], 'testobjectdb', self.apiroutine):
         for m in setobj.wait(self.apiroutine):
             yield m
         oldset = set()
         while True:
             for weakref in setobj.set.dataset().difference(oldset):
                 self.apiroutine.subroutine(self._updateport(weakref.getkey()))
             oldset = set(setobj.set.dataset())
             for m in setobj.waitif(self.apiroutine, lambda x: not x.isdeleted(), True):
                 yield m
Example #44
0
 def resync(self, datapathid, vhost = ''):
     '''
     Resync with current ports
     '''
     # Sometimes when the OVSDB connection is very busy, monitor message may be dropped.
     # We must deal with this and recover from it
     # Save current manged_ports
     if (vhost, datapathid) not in self.managed_ports:
         self.apiroutine.retvalue = None
         return
     else:
         for m in callAPI(self.apiroutine, 'ovsdbmanager', 'getconnection', {'datapathid': datapathid, 'vhost':vhost}):
             yield m
         c = self.apiroutine.retvalue
         if c is not None:
             # For now, we restart the connection...
             for m in c.reconnect(False):
                 yield m
             for m in self.apiroutine.waitWithTimeout(0.1):
                 yield m
             for m in callAPI(self.apiroutine, 'ovsdbmanager', 'waitconnection', {'datapathid': datapathid,
                                                                                  'vhost': vhost}):
                 yield m
     self.apiroutine.retvalue = None
Example #45
0
 def _manage_existing(self):
     for m in callAPI(self.apiroutine, "jsonrpcserver", "getconnections", {}):
         yield m
     vb = self.vhostbind
     conns = self.apiroutine.retvalue
     for c in conns:
         if vb is None or c.protocol.vhost in vb:
             if not hasattr(c, '_ovsdb_manager_get_bridges'):
                 c._ovsdb_manager_get_bridges = self.apiroutine.subroutine(self._get_bridges(c, c.protocol))
     matchers = [OVSDBConnectionSetup.createMatcher(None, c, c.connmark) for c in conns]
     for m in self.apiroutine.waitForAll(*matchers):
         yield m
     self._synchronized = True
     for m in self.apiroutine.waitForSend(ModuleNotification(self.getServiceName(), 'synchronized')):
         yield m
Example #46
0
 def createphysicalnetwork(self, type = 'vlan', id = None, **kwargs):
     new_network, new_map = self._createphysicalnetwork(type, id, **kwargs)
     @updater
     def create_phy(physet, phynet, phymap):
         phynet = set_new(phynet, new_network)
         phymap = set_new(phymap, new_map)
         physet.set.dataset().add(phynet.create_weakreference())
         return [physet, phynet, phymap]
     for m in callAPI(self.apiroutine, 'objectdb', 'transact', {'keys':[PhysicalNetworkSet.default_key(),
                                                                        new_network.getkey(),
                                                                        new_map.getkey()],'updater':create_phy}):
         yield m
     for m in self._dumpkeys([new_network.getkey()]):
         yield m
     self.apiroutine.retvalue = self.apiroutine.retvalue[0]
Example #47
0
 def createphysicalnetworks(self, networks):
     new_networks = [self._createphysicalnetwork(**n) for n in networks]
     @updater
     def create_phys(physet, *phynets):
         return_nets = [None, None] * len(new_networks)
         for i in range(0, len(new_networks)):
             return_nets[i * 2] = set_new(phynets[i * 2], new_networks[i][0])
             return_nets[i * 2 + 1] = set_new(phynets[i * 2 + 1], new_networks[i][1])
             physet.set.dataset().add(new_networks[i][0].create_weakreference())
         return [physet] + return_nets
     keys = [sn.getkey() for n in new_networks for sn in n]
     for m in callAPI(self.apiroutine, 'objectdb', 'transact', {'keys':[PhysicalNetworkSet.default_key()] + keys,'updater':create_phys}):
         yield m
     for m in self._dumpkeys([n[0].getkey() for n in new_networks]):
         yield m
Example #48
0
 def waitinner():
     ports = self.managed_ports.get((vhost, datapathid))
     if ports is None:
         for m in callAPI(self.apiroutine, 'openflowmanager', 'waitconnection', {'datapathid': datapathid, 'vhost':vhost, 'timeout': timeout}):
             yield m
         c = self.apiroutine.retvalue
         ports = self.managed_ports.get((vhost, datapathid))
         if ports is None:
             yield (OpenflowPortSynchronized.createMatcher(c),)
         ports = self.managed_ports.get((vhost, datapathid))
         if ports is None:
             raise ConnectionResetException('Datapath %016x is not connected' % datapathid)
     if portno not in ports:
         yield (OpenflowAsyncMessageEvent.createMatcher(of13.OFPT_PORT_STATUS, datapathid, 0, _ismatch = lambda x: x.message.desc.port_no == portno),)
         self.apiroutine.retvalue = self.apiroutine.event.message.desc
     else:
         self.apiroutine.retvalue = ports[portno]
Example #49
0
 def load(self, container):
     @updater
     def initialize(phynetset, lognetset, logportset, phyportset):
         if phynetset is None:
             phynetset = PhysicalNetworkSet()
             phynetset.set = DataObjectSet()
         if lognetset is None:
             lognetset = LogicalNetworkSet()
             lognetset.set = DataObjectSet()
         if logportset is None:
             logportset = LogicalPortSet()
             logportset.set = DataObjectSet()
         if phyportset is None:
             phyportset = PhysicalPortSet()
             phyportset.set = DataObjectSet()
         return [phynetset, lognetset, logportset, phyportset]
     for m in callAPI(container, 'objectdb', 'transact', {'keys':[PhysicalNetworkSet.default_key(),
                                                                LogicalNetworkSet.default_key(),
                                                                LogicalPortSet.default_key(),
                                                                PhysicalPortSet.default_key()],
                                                          'updater': initialize}):
         yield m
     for m in Module.load(self, container):
         yield m
Example #50
0
    def run(self):

        saved_lgnet_keys = []
        saved_phymap_keys = []
        saved_physicalnetworkmap_to_logicalnetwork = {}

        def walk_phynetset(key,value,walk,save):
            for weak_phynet in value.set.dataset():
                try:
                    phynetobj = walk(weak_phynet.getkey())
                except KeyError:
                    pass
                else:
                    phynetmapkey = PhysicalNetworkMap.default_key(phynetobj.id)
                    try:
                        phynetmapobj = walk(phynetmapkey)
                    except KeyError:
                        pass
                    else:
                        save(phynetmapobj.getkey())
                        saved_phymap_keys.append(phynetmapobj.getkey())
                        saved_physicalnetworkmap_to_logicalnetwork[phynetmapobj.getkey()] = []
                        for lgnet in phynetmapobj.logicnetworks.dataset():
                            try:
                                lgnetobj = walk(lgnet.getkey())
                            except KeyError:
                                pass
                            else:
                                if not lgnetobj:
                                    save(lgnet.getkey())
                                    saved_lgnet_keys.append(lgnet.getkey())
                                    saved_physicalnetworkmap_to_logicalnetwork[phynetmapobj.getkey()]\
                                        .append(lgnet.getkey())

        for m in callAPI(self.apiroutine,"objectdb","walk",
                    {"keys":[PhysicalNetworkSet.default_key()],
                    "walkerdict":{PhysicalNetworkSet.default_key():walk_phynetset},
                    "requestid":1}):
            yield m

        saved_lgnet_keys = list(set(saved_lgnet_keys))
        saved_phymap_keys = list(set(saved_phymap_keys))

        print("we have find all physicalnetworkmap %s" % saved_phymap_keys)

        for k in saved_phymap_keys:
            print("# # begin to repair physicalnetwork %s" % k)
            print("# # # # remove invaild lgnet %s" % saved_physicalnetworkmap_to_logicalnetwork[k])

        def updater(keys,values):

            start = 0
            for i in range(len(saved_phymap_keys)):

                lgnet_keys = keys[1+start:start+1+len(saved_physicalnetworkmap_to_logicalnetwork[keys[start]])]
                lgnet = values[1+start:start+1+len(saved_physicalnetworkmap_to_logicalnetwork[keys[start]])]

                lgnet_dict = dict(zip(lgnet_keys,lgnet))

                for n in list(values[start].logicnetworks.dataset()):
                    if n.getkey() in lgnet_dict and lgnet_dict[n.getkey()] is None:
                        print("remove %s from %s" % (n.getkey(),keys[start]))
                        values[start].logicnetworks.dataset().discard(n)

                start = len(saved_physicalnetworkmap_to_logicalnetwork[keys[start]]) + 1

            return keys,values

        transact_keys = []

        for k in saved_phymap_keys:
            transact_keys.append(k)
            for key in saved_physicalnetworkmap_to_logicalnetwork[k]:
                transact_keys.append(key)

        if transact_keys:
            for m in callAPI(self.apiroutine,"objectdb","transact",{"keys":transact_keys,
                                                                "updater":updater}):
                yield m

        print("# # # # # # repair success !")
Example #51
0
 def _acquire_tables(self):
     try:
         while self._acquire_updated:
             result = None
             exception = None
             # Delay the update so we are not updating table acquires for every module
             for m in self.apiroutine.waitForSend(TableAcquireDelayEvent()):
                 yield m
             yield (TableAcquireDelayEvent.createMatcher(),)
             module_list = list(self.table_modules)
             self._acquire_updated = False
             try:
                 for m in self.apiroutine.executeAll((callAPI(self.apiroutine, module, 'gettablerequest', {}) for module in module_list)):
                     yield m
             except QuitException:
                 raise
             except Exception as exc:
                 self._logger.exception('Acquiring table failed')
                 exception = exc
             else:
                 requests = [r[0] for r in self.apiroutine.retvalue]
                 vhosts = set(vh for _, vhs in requests if vhs is not None for vh in vhs)
                 vhost_result = {}
                 # Requests should be list of (name, (ancester, ancester, ...), pathname)
                 for vh in vhosts:
                     graph = {}
                     table_path = {}
                     try:
                         for r in requests:
                             if r[1] is None or vh in r[1]:
                                 for name, ancesters, pathname in r[0]:
                                     if name in table_path:
                                         if table_path[name] != pathname:
                                             raise ValueError("table conflict detected: %r can not be in two path: %r and %r" % (name, table_path[name], pathname))
                                     else:
                                         table_path[name] = pathname
                                     if name not in graph:
                                         graph[name] = (set(ancesters), set())
                                     else:
                                         graph[name][0].update(ancesters)
                                     for anc in ancesters:
                                         graph.setdefault(anc, (set(), set()))[1].add(name)
                     except ValueError as exc:
                         self._logger.error(str(exc))
                         exception = exc
                         break
                     else:
                         sequences = []
                         def dfs_sort(current):
                             sequences.append(current)
                             for d in graph[current][1]:
                                 anc = graph[d][0]
                                 anc.remove(current)
                                 if not anc:
                                     dfs_sort(d)
                         nopre_tables = sorted([k for k,v in graph.items() if not v[0]], key = lambda x: (table_path.get(name, ''),name))
                         for t in nopre_tables:
                             dfs_sort(t)
                         if len(sequences) < len(graph):
                             rest_tables = set(graph.keys()).difference(sequences)
                             self._logger.error("Circle detected in table acquiring, following tables are related: %r, vhost = %r", sorted(rest_tables), vh)
                             self._logger.error("Circle dependencies are: %s", ", ".join(repr(tuple(graph[t][0])) + "=>" + t for t in rest_tables))
                             exception = ValueError("Circle detected in table acquiring, following tables are related: %r, vhost = %r" % (sorted(rest_tables),vh))
                             break
                         elif len(sequences) > 255:
                             self._logger.error("Table limit exceeded: %d tables (only 255 allowed), vhost = %r", len(sequences), vh)
                             exception = ValueError("Table limit exceeded: %d tables (only 255 allowed), vhost = %r" % (len(sequences),vh))
                             break
                         else:
                             full_indices = list(zip(sequences, itertools.count()))
                             tables = dict((k,tuple(g)) for k,g in itertools.groupby(sorted(full_indices, key = lambda x: table_path.get(x[0], '')),
                                                        lambda x: table_path.get(x[0], '')))
                             vhost_result[vh] = (full_indices, tables)
     finally:
         self._acquiring = False
     if exception:
         for m in self.apiroutine.waitForSend(TableAcquireUpdate(exception = exception)):
             yield m
     else:
         result = vhost_result
         if result != self._lastacquire:
             self._lastacquire = result
             self._reinitall()
         for m in self.apiroutine.waitForSend(TableAcquireUpdate(result = result)):
             yield m
Example #52
0
 def resync(self, datapathid, vhost = ''):
     '''
     Resync with current ports
     '''
     # Sometimes when the OpenFlow connection is very busy, PORT_STATUS message may be dropped.
     # We must deal with this and recover from it
     # Save current manged_ports
     if (vhost, datapathid) not in self.managed_ports:
         self.apiroutine.retvalue = None
         return
     else:
         last_ports = set(self.managed_ports[(vhost, datapathid)].keys())
     add = set()
     remove = set()
     ports = {}
     for _ in range(0, 10):
         for m in callAPI(self.apiroutine, 'openflowmanager', 'getconnection', {'datapathid': datapathid, 'vhost':vhost}):
             yield m
         c = self.apiroutine.retvalue
         if c is None:
             # Disconnected, will automatically resync when reconnected
             self.apiroutine.retvalue = None
             return
         ofdef = c.openflowdef
         protocol = c.protocol
         try:
             if hasattr(ofdef, 'ofp_multipart_request'):
                 # Openflow 1.3, use ofp_multipart_request to get ports
                 for m in protocol.querymultipart(ofdef.ofp_multipart_request(type=ofdef.OFPMP_PORT_DESC), c, self.apiroutine):
                     yield m
                 for msg in self.apiroutine.openflow_reply:
                     for p in msg.ports:
                         ports[p.port_no] = p
             else:
                 # Openflow 1.0, use features_request
                 request = ofdef.ofp_msg()
                 request.header.type = ofdef.OFPT_FEATURES_REQUEST
                 for m in protocol.querywithreply(request):
                     yield m
                 reply = self.apiroutine.retvalue
                 for p in reply.ports:
                     ports[p.port_no] = p
         except ConnectionResetException:
             break
         except OpenflowProtocolException:
             break
         else:
             if (vhost, datapathid) not in self.managed_ports:
                 self.apiroutine.retvalue = None
                 return
             current_ports = set(self.managed_ports[(vhost, datapathid)])
             # If a port is already removed
             remove.intersection_update(current_ports)
             # If a port is already added
             add.difference_update(current_ports)
             # If a port is not acquired, we do not add it
             acquired_keys = set(ports.keys())
             add.difference_update(acquired_keys)
             # Add and remove previous added/removed ports
             current_ports.difference_update(remove)
             current_ports.update(add)
             # If there are changed ports, the changed ports may or may not appear in the acquired port list
             # We only deal with following situations:
             # 1. If both lack ports, we add them
             # 2. If both have additional ports, we remote them
             to_add = acquired_keys.difference(current_ports.union(last_ports))
             to_remove = current_ports.intersection(last_ports).difference(acquired_keys)
             if not to_add and not to_remove and current_ports == last_ports:
                 break
             else:
                 add.update(to_add)
                 remove.update(to_remove)
                 current_ports.update(to_add)
                 current_ports.difference_update(to_remove)
                 last_ports = current_ports
     # Actual add and remove
     mports = self.managed_ports[(vhost, datapathid)]
     add_ports = []
     remove_ports = []
     for k in add:
         if k not in mports:
             add_ports.append(ports[k])
         mports[k] = ports[k]
     for k in remove:
         try:
             oldport = mports.pop(k)
         except KeyError:
             pass
         else:
             remove_ports.append(oldport)
     for m in self.apiroutine.waitForSend(ModuleNotification(self.getServiceName(), 'update',
                                                              datapathid = datapathid,
                                                              connection = c,
                                                              vhost = vhost,
                                                              add = add_ports, remove = remove_ports,
                                                              reason = 'resync')):
         yield m
     self.apiroutine.retvalue = None
Example #53
0
 def main(self):
     try:
         timestamp = '%012x' % (int(time() * 1000),) + '-'
         transactno = 1
         for m in callAPI(self, 'redisdb', 'getclient', {'vhost':self.vhostbind}):
             yield m
         client, encoder, decoder = self.retvalue
         try:
             for m in client.subscribe(self, self.prefix):
                 yield m
         except Exception:
             _no_connection_start = True
         else:
             _no_connection_start = False
         self._matchers[b''] = self.retvalue[0]
         if self._deflate:
             oldencoder = encoder
             olddecoder = decoder
             def encoder(x):
                 return compress(oldencoder(x))
             def decoder(x):
                 try:
                     return olddecoder(decompress(x))
                 except zlib_error:
                     return olddecoder(x)
         if not _no_connection_start:
             self.subroutine(self._modifier(client), True, "modifierroutine")
         listen_modify = ModifyListen.createMatcher(self, ModifyListen.LISTEN)
         connection_down = client.subscribe_state_matcher(self, False)
         connection_up = client.subscribe_state_matcher(self, True)
         module_loaded = ModuleLoadStateChanged.createMatcher(state = ModuleLoadStateChanged.LOADED,
                                              _ismatch = lambda x: x._instance.getServiceName() == 'redisdb')
         matchers = tuple(self._matchers.values()) + (listen_modify, connection_down)
         last_transact = None
         while True:
             if not _no_connection_start:
                 yield matchers
             if not _no_connection_start and self.matcher is listen_modify:
                 matchers = tuple(self._matchers.values()) + (listen_modify, connection_down)
             elif _no_connection_start or self.matcher is connection_down:
                 # Connection is down, wait for restore
                 # The module may be reloaded
                 if _no_connection_start:
                     recreate_matchers = True
                 else:
                     recreate_matchers = False
                 last_transact = None
                 while True:
                     yield (connection_up, module_loaded)
                     if self.matcher is module_loaded:
                         self.terminate(self.modifierroutine)
                         for m in callAPI(self, 'redisdb', 'getclient', {'vhost':self.vhostbind}):
                             yield m
                         client, encoder, decoder = self.retvalue
                         if self._deflate:
                             oldencoder = encoder
                             olddecoder = decoder
                             def encoder(x):
                                 return compress(oldencoder(x))
                             def decoder(x):
                                 try:
                                     return olddecoder(decompress(x))
                                 except zlib_error:
                                     return olddecoder(x)
                         # Recreate listeners
                         connection_down = client.subscribe_state_matcher(self, False)
                         connection_up = client.subscribe_state_matcher(self, True)
                         try:
                             for m in client.subscribe(self, *tuple(self._matchers.keys())):
                                 yield m
                         except Exception:
                             recreate_matchers = True
                             continue
                         else:
                             self._matchers = dict(zip(self._matchers.keys(), self.retvalue))
                             self.subroutine(self._modifier(client), True, "modifierroutine")
                             matchers = tuple(self._matchers.values()) + (listen_modify, connection_down)
                             break
                     else:
                         if recreate_matchers:
                             try:
                                 for m in client.subscribe(self, *[self.prefix + k for k in self._matchers.keys()]):
                                     yield m
                             except Exception:
                                 recreate_matchers = True
                                 continue
                             else:
                                 self._matchers = dict(zip(self._matchers.keys(), self.retvalue))
                                 self.subroutine(self._modifier(client), True, "modifierroutine")
                                 matchers = tuple(self._matchers.values()) + (listen_modify, connection_down)
                                 break
                         else:
                             matchers = tuple(self._matchers.values()) + (listen_modify, connection_down)
                             break
                 if self._publish_wait:
                     self.subroutine(self.publish())
                 transactid = '%s%016x' % (timestamp, transactno)
                 transactno += 1
                 def send_restore_notify(transactid):
                     if self._matchadd_wait or self._matchremove_wait:
                         # Wait for next subscribe success
                         for m in self.waitWithTimeout(1, ModifyListen.createMatcher(self, ModifyListen.LISTEN)):
                             yield m
                     for m in self.waitForSend(
                             UpdateNotification(self, transactid, tuple(self._matchers.keys()), UpdateNotification.RESTORED, False, extrainfo = None)):
                         yield m
                 self.subroutine(send_restore_notify(transactid), False)
             else:
                 transact = decoder(self.event.message)
                 if transact['id'] == last_transact:
                     # Ignore duplicated updates
                     continue
                 last_transact = transact['id']
                 pubkey, sep, pubno = last_transact.partition('-')
                 fromself = (sep and pubkey == self._publishkey)
                 transactid = '%s%016x' % (timestamp, transactno)
                 transactno += 1
                 self.subroutine(self.waitForSend(
                             UpdateNotification(self, transactid, tuple(_bytes(k) for k in transact['keys']), UpdateNotification.UPDATED, fromself, extrainfo = transact.get('extrainfo'))
                                                                        ), False)
     finally:
         if hasattr(self ,'modifierroutine') and self.modifierroutine:
             self.terminate(self.modifierroutine)
Example #54
0
    def updateflow(self, connection, addvalues, removevalues, updatedvalues):
        try:
            allobjects = set(o for o in self._savedresult if o is not None and not o.isdeleted())

            lastsubnetsinfo = self._lastsubnetsinfo
            currentlognetsinfo = dict((n,id) for n,id in self._lastlognets if n in allobjects)
            currentrouterportsinfo = dict((o.subnet,o) for o in allobjects
                                            if o.isinstance(RouterPort))
            currentsubnetsinfo = dict((o,(getattr(currentrouterportsinfo[o],"ip_address",getattr(o,"gateway",None)),
                                          self.parent.inroutermac,o.network.id,currentlognetsinfo[o.network]))
                                        for o in allobjects if o.isinstance(SubNet)
                                            and hasattr(o,"router") and o in currentrouterportsinfo
                                            and o.network in currentlognetsinfo
                                            and (hasattr(currentrouterportsinfo[o],"ip_address")
                                                or hasattr(o,"gateway"))
                                            and ( not hasattr(o,"isexternal") or o.isexternal == False))
            self._lastsubnetsinfo = currentsubnetsinfo

            ofdef = connection.openflowdef
            vhost = connection.protocol.vhost
            l3input = self.parent._gettableindex("l3input",vhost)

            cmds = []

            if connection.protocol.disablenxext:
                def match_network(nid):
                    return ofdef.create_oxm(ofdef.OXM_OF_METADATA_W, (nid & 0xffff) << 32,
                                            b'\x00\x00\xff\xff\x00\x00\x00\x00')
            else:
                def match_network(nid):
                    return ofdef.create_oxm(ofdef.NXM_NX_REG4, nid)

            # prepush or not ,, it is same , so ..
            def _deleteicmpflows(subnetinfo):
                    ipaddress,macaddress,_,networkid = subnetinfo

                    return [
                        ofdef.ofp_flow_mod(
                            cookie = 0x2,
                            cookie_mask = 0xffffffffffffffff,
                            table_id = l3input,
                            command = ofdef.OFPFC_DELETE,
                            priority = ofdef.OFP_DEFAULT_PRIORITY + 1,
                            buffer_id = ofdef.OFP_NO_BUFFER,
                            out_port = ofdef.OFPP_ANY,
                            out_group = ofdef.OFPG_ANY,
                            match = ofdef.ofp_match_oxm(
                                oxm_fields = [
                                    ofdef.create_oxm(ofdef.NXM_NX_REG4,networkid),
                                    ofdef.create_oxm(ofdef.OXM_OF_ETH_DST,mac_addr_bytes(macaddress)),
                                    ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE,ofdef.ETHERTYPE_IP),
                                    ofdef.create_oxm(ofdef.OXM_OF_IPV4_DST,ip4_addr_bytes(ipaddress)),
                                    ofdef.create_oxm(ofdef.OXM_OF_IP_PROTO,ofdef.IPPROTO_ICMP),
                                    ofdef.create_oxm(ofdef.OXM_OF_ICMPV4_TYPE,8),
                                    ofdef.create_oxm(ofdef.OXM_OF_ICMPV4_CODE,0)
                                ]
                            )
                        )
                    ]

            if not self.parent.prepush:
                def _createicmpflows(subnetinfo):
                    ipaddress,macaddress,_,networkid = subnetinfo
                    return [
                        ofdef.ofp_flow_mod(
                            cookie = 0x2,
                            cookie_mask = 0xffffffffffffffff,
                            table_id = l3input,
                            command = ofdef.OFPFC_ADD,
                            # icmp to router matcher same as ip forward to router
                            # so priority + 1
                            priority = ofdef.OFP_DEFAULT_PRIORITY + 1,
                            buffer_id = ofdef.OFP_NO_BUFFER,
                            out_port = ofdef.OFPP_ANY,
                            out_group = ofdef.OFPG_ANY,
                            match = ofdef.ofp_match_oxm(
                                oxm_fields = [
                                    match_network(networkid),
                                    ofdef.create_oxm(ofdef.OXM_OF_ETH_DST,mac_addr_bytes(macaddress)),
                                    ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE,ofdef.ETHERTYPE_IP),
                                    ofdef.create_oxm(ofdef.OXM_OF_IPV4_DST,ip4_addr_bytes(ipaddress)),
                                    ofdef.create_oxm(ofdef.OXM_OF_IP_PROTO,ofdef.IPPROTO_ICMP),
                                    ofdef.create_oxm(ofdef.OXM_OF_ICMPV4_TYPE,8),
                                    ofdef.create_oxm(ofdef.OXM_OF_ICMPV4_CODE,0)
                                ]
                            ),
                            instructions = [
                                ofdef.ofp_instruction_actions(
                                    actions = [
                                        ofdef.ofp_action_output(
                                            port = ofdef.OFPP_CONTROLLER,
                                            max_len = ofdef.OFPCML_NO_BUFFER
                                        )
                                    ]
                                )
                            ]
                        )
                    ]
            else:
                def _createicmpflows(subnetinfo):
                    ipaddress,macaddress,_,networkid = subnetinfo
                    return [
                        ofdef.ofp_flow_mod(
                            cookie = 0x2,
                            cookie_mask = 0xffffffffffffffff,
                            table_id = l3input,
                            command = ofdef.OFPFC_ADD,
                            # icmp to router matcher same as ip forward to router
                            # so priority + 1
                            priority = ofdef.OFP_DEFAULT_PRIORITY + 1,
                            buffer_id = ofdef.OFP_NO_BUFFER,
                            out_port = ofdef.OFPP_ANY,
                            out_group = ofdef.OFPG_ANY,
                            match = ofdef.ofp_match_oxm(
                                oxm_fields = [
                                    match_network(networkid),
                                    ofdef.create_oxm(ofdef.OXM_OF_ETH_DST,mac_addr_bytes(macaddress)),
                                    ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE,ofdef.ETHERTYPE_IP),
                                    ofdef.create_oxm(ofdef.OXM_OF_IPV4_DST,ip4_addr_bytes(ipaddress)),
                                    ofdef.create_oxm(ofdef.OXM_OF_IP_PROTO,ofdef.IPPROTO_ICMP),
                                    ofdef.create_oxm(ofdef.OXM_OF_ICMPV4_TYPE,8),
                                    ofdef.create_oxm(ofdef.OXM_OF_ICMPV4_CODE,0)
                                ]
                            ),
                            instructions = [
                                ofdef.ofp_instruction_actions(
                                    actions = [
                                        ofdef.nx_action_reg_move(
                                            n_bits = 48,
                                            src = ofdef.OXM_OF_ETH_SRC,
                                            dst = ofdef.OXM_OF_ETH_DST
                                        ),
                                        ofdef.ofp_action_set_field(
                                            field = ofdef.create_oxm(
                                                ofdef.OXM_OF_ETH_SRC,
                                                ofdef.mac_addr(macaddress)
                                            )
                                        ),
                                        ofdef.nx_action_reg_move(
                                            n_bits = 32,
                                            src = ofdef.OXM_OF_IPV4_SRC,
                                            dst = ofdef.OXM_OF_IPV4_DST
                                        ),
                                        ofdef.ofp_action_set_field(
                                            field = ofdef.create_oxm(
                                                ofdef.OXM_OF_IPV4_SRC,
                                                ofdef.ip4_addr(ipaddress)
                                            )
                                        ),
                                        ofdef.ofp_action_set_field(
                                            field = ofdef.create_oxm(
                                                ofdef.OXM_OF_ICMPV4_TYPE,
                                                ICMP_ECHOREPLY
                                            )
                                        ),
                                        ofdef.ofp_action_nw_ttl(
                                            nw_ttl = 128
                                        ),
                                        ofdef.ofp_action_output(
                                            port = ofdef.OFPP_IN_PORT
                                        )
                                    ]
                                )
                            ]
                        )
                    ]

            for obj in removevalues:
                
                if obj in lastsubnetsinfo:

                    remove_arp = set([(lastsubnetsinfo[obj][0],lastsubnetsinfo[obj][1],lastsubnetsinfo[obj][2],True)])
                    for m in callAPI(self, 'arpresponder', 'removeproxyarp', {'connection': connection,
                                                                          'arpentries': remove_arp}):
                        yield m
                    cmds.extend(_deleteicmpflows(lastsubnetsinfo[obj]))

            for obj in updatedvalues:
               
                if obj in lastsubnetsinfo and (obj not in currentsubnetsinfo or
                                                lastsubnetsinfo[obj] != currentsubnetsinfo[obj]):

                    remove_arp = set([(lastsubnetsinfo[obj][0],lastsubnetsinfo[obj][1],lastsubnetsinfo[obj][2],True)])
                    for m in callAPI(self, 'arpresponder', 'removeproxyarp', {'connection': connection,
                                                                          'arpentries': remove_arp}):
                        yield m
                    cmds.extend(_deleteicmpflows(lastsubnetsinfo[obj]))

            for m in self.execute_commands(connection,cmds):
                yield m

            del cmds[:]
            for obj in addvalues:
                if obj in currentsubnetsinfo and obj not in lastsubnetsinfo:
                    
                    add_arp = set([(currentsubnetsinfo[obj][0],currentsubnetsinfo[obj][1],currentsubnetsinfo[obj][2],True)])
                    for m in callAPI(self, 'arpresponder', 'createproxyarp', {'connection': connection,
                                                                          'arpentries': add_arp}):
                        yield m
                    cmds.extend(_createicmpflows(currentsubnetsinfo[obj]))

            for obj in updatedvalues:
                if obj in currentsubnetsinfo and (obj not in lastsubnetsinfo or
                                                  currentsubnetsinfo[obj] != lastsubnetsinfo[obj]):

                    add_arp = set([(currentsubnetsinfo[obj][0],currentsubnetsinfo[obj][1],currentsubnetsinfo[obj][2],True)])
                    for m in callAPI(self, 'arpresponder', 'createproxyarp', {'connection': connection,
                                                                          'arpentries': add_arp}):
                        yield m

                    cmds.extend(_createicmpflows(currentsubnetsinfo[obj]))

            for m in self.execute_commands(connection,cmds):
                yield m
        except Exception:
            self._logger.warning("Unexpected exception in icmp_flow_updater, ignore it! Continue",exc_info=True)
Example #55
0
 def proc2_4():
     for m in callAPI(r, "testmodule1", "method3", {'a' : 1, 'b' : 2}):
         yield m
Example #56
0
 def testproc():
     yield (ModuleLoadStateChanged.createMatcher(),)
     for m in callAPI(r, "testmodule1", "method1", {}):
         yield m
     apiResults.append(r.retvalue)
     for m in callAPI(r, "testmodule1", "method2", {'a' : 1, 'b' : 2}):
         yield m
     apiResults.append(r.retvalue)
     try:
         for m in callAPI(r, "testmodule1", "method4", {}):
             yield m
         apiResults.append(None)
     except ValueError as exc:
         apiResults.append(exc.args[0])
     from .gensrc.testmodule2 import ModuleTestEvent2
     matcher = ModuleTestEvent2.createMatcher()
     self.event = False
     def proc2():            
         for m in callAPI(r, "testmodule1", "method3", {'a' : 1, 'b' : 2}):
             yield m
     def callback(event, matcher):
         self.event = event
         if False:
             yield
     for m in r.withCallback(proc2(), callback, matcher):
         yield m
     if not self.event:
         for m in r.waitWithTimeout(0.1, matcher):
             yield m
         if not r.timeout:
             self.event = r.event
     if self.event:
         apiResults.append((self.event.result, self.event.version))
     else:
         apiResults.append(False)
     for m in callAPI(r, "testmodule1", "discover", {}):
         yield m
     apiResults.append(r.retvalue)
     with open(os.path.join(basedir, 'testmodule1.py'), 'wb') as f:
         f.write(module1v2)
     for m in s.moduleloader.delegate(s.moduleloader.reloadModules(['tests.gensrc.testmodule1.TestModule1'])):
         yield m
     for m in callAPI(r, "testmodule1", "method1", {}):
         yield m
     apiResults.append(r.retvalue)
     matcher = ModuleTestEvent2.createMatcher()
     self.event = False
     def proc2_2():
         for m in callAPI(r, "testmodule1", "method3", {'a' : 1, 'b' : 2}):
             yield m
     def callback_2(event, matcher):
         self.event = event
         if False:
             yield
     for m in r.withCallback(proc2_2(), callback_2, matcher):
         yield m
     if not self.event:
         for m in r.waitWithTimeout(0.1, matcher):
             yield m
         if not r.timeout:
             self.event = r.event
     if self.event:
         apiResults.append((self.event.result, self.event.version))
     else:
         apiResults.append(False)
     with open(os.path.join(basedir, 'testmodule2.py'), 'wb') as f:
         f.write(module2v2)
     for m in s.moduleloader.delegate(s.moduleloader.reloadModules(['tests.gensrc.testmodule2.TestModule2'])):
         yield m
     matcher = ModuleTestEvent2.createMatcher()
     self.event = False
     def proc2_3():
         for m in callAPI(r, "testmodule1", "method3", {'a' : 1, 'b' : 2}):
             yield m
     def callback_3(event, matcher):
         self.event = event
         if False:
             yield
     for m in r.withCallback(proc2_3(), callback_3, matcher):
         yield m
     if not self.event:
         for m in r.waitWithTimeout(0.1, matcher):
             yield m
         if not r.timeout:
             self.event = r.event
     if self.event:
         apiResults.append((self.event.result, self.event.version))
     else:
         apiResults.append(False)
     with open(os.path.join(basedir, 'testmodule1.py'), 'wb') as f:
         f.write(module1v3)
     with open(os.path.join(basedir, 'testmodule2.py'), 'wb') as f:
         f.write(module2v3)
     for m in s.moduleloader.delegate(s.moduleloader.reloadModules(['tests.gensrc.testmodule1.TestModule1','tests.gensrc.testmodule2.TestModule2'])):
         yield m
     for m in callAPI(r, "testmodule1", "method1", {}):
         yield m
     apiResults.append(r.retvalue)
     matcher = ModuleTestEvent2.createMatcher()
     self.event = False
     def proc2_4():
         for m in callAPI(r, "testmodule1", "method3", {'a' : 1, 'b' : 2}):
             yield m
     def callback_4(event, matcher):
         self.event = event
         if False:
             yield
     for m in r.withCallback(proc2_4(), callback_4, matcher):
         yield m
     if not self.event:
         for m in r.waitWithTimeout(0.1, matcher):
             yield m
         if not r.timeout:
             self.event = r.event
     if self.event:
         apiResults.append((self.event.result, self.event.version))
     else:
         apiResults.append(False)
     try:
         for m in r.executeWithTimeout(1.0, callAPI(r, "testmodule1", "notexists", {})):
             yield m
     except ValueError:
         apiResults.append(True)
     except:
         apiResults.append(False)
     else:
         apiResults.append(False)
     for m in s.moduleloader.delegate(s.moduleloader.unloadByPath("tests.gensrc.testmodule1.TestModule1")):
         yield m
Example #57
0
 def callapi(modulename, functionname, **kwargs):
     return callAPI(self.apiroutine, modulename, functionname, kwargs)