def setUp(self): #self.scheduler logging.basicConfig() self.scheduler = Scheduler(DefaultPolling()) #self.scheduler.debugging = True #self.scheduler.logger.setLevel('DEBUG') #Client.logger.setLevel('DEBUG') import tests import os.path rootpath, _ = os.path.split(tests.__path__[0]) if rootpath: os.chdir(rootpath) self.scheduler.queue.addSubQueue(3, PollEvent.createMatcher(category=PollEvent.WRITE_READY), 'write', None, None, CBQueue.AutoClassQueue.initHelper('fileno')) self.scheduler.queue.addSubQueue(1, PollEvent.createMatcher(category=PollEvent.READ_READY), 'read', None, None, CBQueue.AutoClassQueue.initHelper('fileno')) self.scheduler.queue.addSubQueue(5, PollEvent.createMatcher(category=PollEvent.ERROR), 'error') self.scheduler.queue.addSubQueue(2, ConnectionControlEvent.createMatcher(), 'control') self.scheduler.queue.addSubQueue(4, ConnectionWriteEvent.createMatcher(), 'connectionwrite', 40, 40, CBQueue.AutoClassQueue.initHelper('connection')) self.scheduler.queue.addSubQueue(10, RoutineControlEvent.createMatcher(), 'routine') self.scheduler.queue.addSubQueue(9, TimerEvent.createMatcher(), 'timer') self.scheduler.queue.addSubQueue(8, ResolveResponseEvent.createMatcher(), 'resolve') self.scheduler.queue.addSubQueue(8, ResolveRequestEvent.createMatcher(), 'resolvereq') self.scheduler.queue.addSubQueue(20, SystemControlEvent.createMatcher(), 'sysctl') self.scheduler.queue.addSubQueue(0, SystemControlLowPriorityEvent.createMatcher(), 'sysctllow') #Client.logger.setLevel('DEBUG') #TcpServer.logger.setLevel('DEBUG') self.protocolServer = TestProtocol(True) self.protocolClient = TestProtocol(False) self.resolver = Resolver(scheduler=self.scheduler) self.resolver.start()
def __init__(self): ''' Constructor ''' if hasattr(self, 'logging'): if isinstance(self.logging, dict): logging_config = dict(self.logging) else: logging_config = self.logging.todict() logging_config.setdefault('disable_existing_loggers', False) logging.config.dictConfig(logging_config) elif hasattr(self, 'loggingconfig'): logging.config.fileConfig(self.loggingconfig, disable_existing_loggers=False) self.scheduler = Scheduler(DefaultPolling(), getattr(self, 'processevents', None), getattr(self, 'queuedefaultsize', None), getattr(self, 'queuemaxsize', None), defaultQueueClass=CBQueue.AutoClassQueue.initHelper('_classname0'), defaultQueuePriority = 400) if self.debugging: self.scheduler.debugging = True self.scheduler.logger.setLevel(logging.DEBUG) self.scheduler.queue.addSubQueue(self.pollwritepriority, PollEvent.createMatcher(category=PollEvent.WRITE_READY), 'write', None, None, CBQueue.AutoClassQueue.initHelper('fileno')) self.scheduler.queue.addSubQueue(self.pollreadpriority, PollEvent.createMatcher(category=PollEvent.READ_READY), 'read', None, None, CBQueue.AutoClassQueue.initHelper('fileno')) self.scheduler.queue.addSubQueue(self.pollerrorpriority, PollEvent.createMatcher(category=PollEvent.ERROR), 'error') self.scheduler.queue.addSubQueue(self.connectioncontrolpriority, ConnectionControlEvent.createMatcher(), 'control') self.scheduler.queue.addSubQueue(self.connectionwritepriority, ConnectionWriteEvent.createMatcher(), 'connectionwrite', self.totalwritelimit, self.totalwritelimit, CBQueue.AutoClassQueue.initHelper('connection', self.preservefornew, subqueuelimit = self.writelimitperconnection)) self.scheduler.queue.addSubQueue(self.streamdatapriority, StreamDataEvent.createMatcher(), 'streamdata', self.streamdatalimit, self.streamdatalimit, CBQueue.AutoClassQueue.initHelper('stream', self.preservefornew, subqueuelimit = self.datalimitperstream)) self.scheduler.queue.addSubQueue(self.routinecontrolpriority, RoutineControlEvent.createMatcher(), 'routine') self.scheduler.queue.addSubQueue(self.timerpriority, TimerEvent.createMatcher(), 'timer') self.scheduler.queue.addSubQueue(self.resolverresppriority, ResolveResponseEvent.createMatcher(), 'resolve') self.scheduler.queue.addSubQueue(self.resolverreqpriority, ResolveRequestEvent.createMatcher(), 'resolvereq', 16) self.scheduler.queue.addSubQueue(self.sysctlpriority, SystemControlEvent.createMatcher(), 'sysctl') self.scheduler.queue.addSubQueue(self.sysctllowpriority, SystemControlLowPriorityEvent.createMatcher(), 'sysctllow') self.scheduler.queue.addSubQueue(self.moduleapicallpriority, ModuleAPICall.createMatcher(), 'moduleapi', None, None, CBQueue.AutoClassQueue.initHelper('target', 2, subqueuelimit = 5)) self.scheduler.queue.addSubQueue(self.moduleapireplypriority, ModuleAPIReply.createMatcher(), 'moduleapireply') self.scheduler.queue.addSubQueue(self.modulenotifypriority, ModuleNotification.createMatcher(), 'modulenotify', None, None, CBQueue.AutoClassQueue.initHelper('target', subqueuelimit=5)) self.scheduler.queue.addSubQueue(self.moduleloadeventpriority, ModuleLoadStateChanged.createMatcher(), 'moduleload') self.scheduler.queue.addSubQueue(self.lockpriority, LockEvent.createMatcher(), 'lock', None, None, CBQueue.AutoClassQueue.initHelper('key', subqueuelimit=1)) self.resolver = Resolver(self.scheduler, self.resolverpoolsize) self.moduleloader = ModuleLoader(self)
def __init__(self): ''' Constructor ''' if hasattr(self, 'logging'): if isinstance(self.logging, dict): logging_config = dict(self.logging) else: logging_config = self.logging.todict() logging_config.setdefault('disable_existing_loggers', False) logging.config.dictConfig(logging_config) elif hasattr(self, 'loggingconfig'): logging.config.fileConfig(self.loggingconfig, disable_existing_loggers=False) self.scheduler = Scheduler(DefaultPolling(), getattr(self, 'processevents', None), getattr(self, 'queuedefaultsize', None), getattr(self, 'queuemaxsize', None), defaultQueueClass=CBQueue.AutoClassQueue.initHelper('_classname0'), defaultQueuePriority = 400) if self.debugging: self.scheduler.debugging = True self.scheduler.logger.setLevel(logging.DEBUG) self.scheduler.queue.addSubQueue(self.pollwritepriority, PollEvent.createMatcher(category=PollEvent.WRITE_READY), 'write', None, None, CBQueue.AutoClassQueue.initHelper('fileno')) self.scheduler.queue.addSubQueue(self.pollreadpriority, PollEvent.createMatcher(category=PollEvent.READ_READY), 'read', None, None, CBQueue.AutoClassQueue.initHelper('fileno')) self.scheduler.queue.addSubQueue(self.pollerrorpriority, PollEvent.createMatcher(category=PollEvent.ERROR), 'error') self.scheduler.queue.addSubQueue(self.connectioncontrolpriority, ConnectionControlEvent.createMatcher(), 'control') self.scheduler.queue.addSubQueue(self.connectionwritepriority, ConnectionWriteEvent.createMatcher(), 'connectionwrite', self.totalwritelimit, self.totalwritelimit, CBQueue.AutoClassQueue.initHelper('connection', self.preservefornew, subqueuelimit = self.writelimitperconnection)) self.scheduler.queue.addSubQueue(self.streamdatapriority, StreamDataEvent.createMatcher(), 'streamdata', self.streamdatalimit, self.streamdatalimit, CBQueue.AutoClassQueue.initHelper('stream', self.preservefornew, subqueuelimit = self.datalimitperstream)) self.scheduler.queue.addSubQueue(self.routinecontrolpriority, RoutineControlEvent.createMatcher(), 'routine') self.scheduler.queue.addSubQueue(self.timerpriority, TimerEvent.createMatcher(), 'timer') self.scheduler.queue.addSubQueue(self.resolverresppriority, ResolveResponseEvent.createMatcher(), 'resolve') self.scheduler.queue.addSubQueue(self.resolverreqpriority, ResolveRequestEvent.createMatcher(), 'resolvereq', 16) self.scheduler.queue.addSubQueue(self.sysctlpriority, SystemControlEvent.createMatcher(), 'sysctl') self.scheduler.queue.addSubQueue(self.sysctllowpriority, SystemControlLowPriorityEvent.createMatcher(), 'sysctllow') self.scheduler.queue.addSubQueue(self.moduleapicallpriority, ModuleAPICall.createMatcher(), 'moduleapi', None, None, CBQueue.AutoClassQueue.initHelper('target', 2, subqueuelimit = 5)) self.scheduler.queue.addSubQueue(self.moduleapireplypriority, ModuleAPIReply.createMatcher(), 'moduleapireply') self.scheduler.queue.addSubQueue(self.modulenotifypriority, ModuleNotification.createMatcher(), 'modulenotify', None, None, CBQueue.AutoClassQueue.initHelper('target', subqueuelimit=5)) self.scheduler.queue.addSubQueue(self.moduleloadeventpriority, ModuleLoadStateChanged.createMatcher(), 'moduleload') self.scheduler.queue.addSubQueue(self.lockpriority, LockEvent.createMatcher(), 'lock', None, None, CBQueue.AutoClassQueue.initHelper('key', subqueuelimit=1)) self.scheduler.queue.addSubQueue(self.futurepriority, FutureEvent.createMatcher(), 'future') self.resolver = Resolver(self.scheduler, self.resolverpoolsize) self.moduleloader = ModuleLoader(self)
class Server(Configurable): ''' Create a server with all necessary parts ''' _default_pollwritepriority = 700 _default_connectionwritepriority = 600 _default_pollreadpriority = 500 _default_pollerrorpriority = 800 _default_resolverresppriority = 490 _default_resolverreqpriority = 650 _default_connectioncontrolpriority = 450 _default_routinecontrolpriority = 1000 _default_streamdatapriority = 640 _default_timerpriority = 900 _default_lockpriority = 990 _default_futurepriority = 989 _default_moduleloadeventpriority = 890 _default_sysctlpriority = 2000 _default_sysctllowpriority = 10 _default_moduleapicallpriority = 630 _default_moduleapireplypriority = 420 _default_modulenotifypriority = 410 _default_totalwritelimit = 100000 _default_writelimitperconnection = 5 _default_preservefornew = 100 _default_streamdatalimit = 100000 _default_datalimitperstream = 5 _default_resolverpoolsize = 64 _default_ulimitn = 32768 _default_startup = () _default_debugging = False def __init__(self): ''' Constructor ''' if hasattr(self, 'logging'): if isinstance(self.logging, dict): logging_config = dict(self.logging) else: logging_config = self.logging.todict() logging_config.setdefault('disable_existing_loggers', False) logging.config.dictConfig(logging_config) elif hasattr(self, 'loggingconfig'): logging.config.fileConfig(self.loggingconfig, disable_existing_loggers=False) self.scheduler = Scheduler(DefaultPolling(), getattr(self, 'processevents', None), getattr(self, 'queuedefaultsize', None), getattr(self, 'queuemaxsize', None), defaultQueueClass=CBQueue.AutoClassQueue.initHelper('_classname0'), defaultQueuePriority = 400) if self.debugging: self.scheduler.debugging = True self.scheduler.logger.setLevel(logging.DEBUG) self.scheduler.queue.addSubQueue(self.pollwritepriority, PollEvent.createMatcher(category=PollEvent.WRITE_READY), 'write', None, None, CBQueue.AutoClassQueue.initHelper('fileno')) self.scheduler.queue.addSubQueue(self.pollreadpriority, PollEvent.createMatcher(category=PollEvent.READ_READY), 'read', None, None, CBQueue.AutoClassQueue.initHelper('fileno')) self.scheduler.queue.addSubQueue(self.pollerrorpriority, PollEvent.createMatcher(category=PollEvent.ERROR), 'error') self.scheduler.queue.addSubQueue(self.connectioncontrolpriority, ConnectionControlEvent.createMatcher(), 'control') self.scheduler.queue.addSubQueue(self.connectionwritepriority, ConnectionWriteEvent.createMatcher(), 'connectionwrite', self.totalwritelimit, self.totalwritelimit, CBQueue.AutoClassQueue.initHelper('connection', self.preservefornew, subqueuelimit = self.writelimitperconnection)) self.scheduler.queue.addSubQueue(self.streamdatapriority, StreamDataEvent.createMatcher(), 'streamdata', self.streamdatalimit, self.streamdatalimit, CBQueue.AutoClassQueue.initHelper('stream', self.preservefornew, subqueuelimit = self.datalimitperstream)) self.scheduler.queue.addSubQueue(self.routinecontrolpriority, RoutineControlEvent.createMatcher(), 'routine') self.scheduler.queue.addSubQueue(self.timerpriority, TimerEvent.createMatcher(), 'timer') self.scheduler.queue.addSubQueue(self.resolverresppriority, ResolveResponseEvent.createMatcher(), 'resolve') self.scheduler.queue.addSubQueue(self.resolverreqpriority, ResolveRequestEvent.createMatcher(), 'resolvereq', 16) self.scheduler.queue.addSubQueue(self.sysctlpriority, SystemControlEvent.createMatcher(), 'sysctl') self.scheduler.queue.addSubQueue(self.sysctllowpriority, SystemControlLowPriorityEvent.createMatcher(), 'sysctllow') self.scheduler.queue.addSubQueue(self.moduleapicallpriority, ModuleAPICall.createMatcher(), 'moduleapi', None, None, CBQueue.AutoClassQueue.initHelper('target', 2, subqueuelimit = 5)) self.scheduler.queue.addSubQueue(self.moduleapireplypriority, ModuleAPIReply.createMatcher(), 'moduleapireply') self.scheduler.queue.addSubQueue(self.modulenotifypriority, ModuleNotification.createMatcher(), 'modulenotify', None, None, CBQueue.AutoClassQueue.initHelper('target', subqueuelimit=5)) self.scheduler.queue.addSubQueue(self.moduleloadeventpriority, ModuleLoadStateChanged.createMatcher(), 'moduleload') self.scheduler.queue.addSubQueue(self.lockpriority, LockEvent.createMatcher(), 'lock', None, None, CBQueue.AutoClassQueue.initHelper('key', subqueuelimit=1)) self.scheduler.queue.addSubQueue(self.futurepriority, FutureEvent.createMatcher(), 'future') self.resolver = Resolver(self.scheduler, self.resolverpoolsize) self.moduleloader = ModuleLoader(self) def serve(self): if self.ulimitn is not None: try: import resource curr_ulimit = resource.getrlimit(resource.RLIMIT_NOFILE) if curr_ulimit[0] >= self.ulimitn: # We do not decrease ulimit pass elif curr_ulimit[1] >= self.ulimitn: # Only increase soft limit, keep the hard limit unchanged resource.setrlimit(resource.RLIMIT_NOFILE, (self.ulimitn, curr_ulimit[1])) else: try: resource.setrlimit(resource.RLIMIT_NOFILE, (self.ulimitn, self.ulimitn)) except: # Maybe we do not have permission to change hard limit, instead we increase soft limit to the hard limit resource.setrlimit(resource.RLIMIT_NOFILE, (curr_ulimit[1], curr_ulimit[1])) except: pass # If logging is not configured, configure it to the default (console) logging.basicConfig() self.resolver.start() self.moduleloader.start() for path in self.startup: self.moduleloader.subroutine(self.moduleloader.loadByPath(path)) self.scheduler.main()
class TestConnection(unittest.TestCase): def setUp(self): #self.scheduler logging.basicConfig() self.scheduler = Scheduler(DefaultPolling()) #self.scheduler.debugging = True #self.scheduler.logger.setLevel('DEBUG') #Client.logger.setLevel('DEBUG') import tests import os.path rootpath, _ = os.path.split(tests.__path__[0]) if rootpath: os.chdir(rootpath) self.scheduler.queue.addSubQueue(3, PollEvent.createMatcher(category=PollEvent.WRITE_READY), 'write', None, None, CBQueue.AutoClassQueue.initHelper('fileno')) self.scheduler.queue.addSubQueue(1, PollEvent.createMatcher(category=PollEvent.READ_READY), 'read', None, None, CBQueue.AutoClassQueue.initHelper('fileno')) self.scheduler.queue.addSubQueue(5, PollEvent.createMatcher(category=PollEvent.ERROR), 'error') self.scheduler.queue.addSubQueue(2, ConnectionControlEvent.createMatcher(), 'control') self.scheduler.queue.addSubQueue(4, ConnectionWriteEvent.createMatcher(), 'connectionwrite', 40, 40, CBQueue.AutoClassQueue.initHelper('connection')) self.scheduler.queue.addSubQueue(10, RoutineControlEvent.createMatcher(), 'routine') self.scheduler.queue.addSubQueue(9, TimerEvent.createMatcher(), 'timer') self.scheduler.queue.addSubQueue(8, ResolveResponseEvent.createMatcher(), 'resolve') self.scheduler.queue.addSubQueue(8, ResolveRequestEvent.createMatcher(), 'resolvereq') self.scheduler.queue.addSubQueue(20, SystemControlEvent.createMatcher(), 'sysctl') self.scheduler.queue.addSubQueue(0, SystemControlLowPriorityEvent.createMatcher(), 'sysctllow') #Client.logger.setLevel('DEBUG') #TcpServer.logger.setLevel('DEBUG') self.protocolServer = TestProtocol(True) self.protocolClient = TestProtocol(False) self.resolver = Resolver(scheduler=self.scheduler) self.resolver.start() def tearDown(self): pass def testSelfConnection(self): c1 = Client('tcp://localhost:199', self.protocolClient, self.scheduler) c2 = Client('ptcp://localhost:199', self.protocolServer, self.scheduler) r = RoutineContainer(self.scheduler, True) ret = bytearray() def mainA(): m = TestDataEvent.createMatcher() while True: yield (m,) if r.event.connection is c2: ret.extend(b'A') else: ret.extend(b'B') r.main = mainA r.start() def waitAndStart(): for m in r.waitWithTimeout(0.5): yield m c1.start() r.subroutine(waitAndStart()) c2.start() self.scheduler.main() self.assertEqual(ret, b'ABABABABABABABABABAB') def testServerClient(self): c1 = Client('tcp://localhost:199', self.protocolClient, self.scheduler) s1 = TcpServer('ltcp://localhost:199', self.protocolServer, self.scheduler) r = RoutineContainer(self.scheduler, True) ret = bytearray() def mainA(): m = TestDataEvent.createMatcher() stopped = False while True: yield (m,) if r.event.connection is c1: ret.extend(b'B') else: ret.extend(b'A') if not stopped: for m in s1.shutdown(): yield m stopped = True r.main = mainA r.start() s1.start() def waitAndStart(): for m in r.waitWithTimeout(0.5): yield m c1.start() r.subroutine(waitAndStart()) self.scheduler.main() self.assertEqual(ret, b'ABABABABABABABABABAB') def testMultipleClients(self): c1 = Client('tcp://localhost:199', self.protocolClient, self.scheduler) c2 = Client('tcp://localhost:199', self.protocolClient, self.scheduler) s1 = TcpServer('ltcp://localhost:199', self.protocolServer, self.scheduler) r = RoutineContainer(self.scheduler, True) counter = {c1:0, c2:0} ret = bytearray() def mainA(): m = TestDataEvent.createMatcher() c1c = False c2c = False shutdown = False while True: yield (m,) counter[r.event.connection] = counter.get(r.event.connection, 0) + 1 if r.event.connection is c1: ret.extend(b'A') c1c = True elif r.event.connection is c2: ret.extend(b'B') c2c = True if c1c and c2c and not shutdown: for m in s1.shutdown(): yield m shutdown = True r.main = mainA r.start() s1.start() def waitAndStart(c): for m in r.waitWithTimeout(0.5): yield m c.start() r.subroutine(waitAndStart(c1)) r.subroutine(waitAndStart(c2)) self.scheduler.main() print(ret) self.assertEqual(counter[c1], 10) self.assertEqual(counter[c2], 10) def testSelfConnectionSsl(self): c1 = Client('ssl://localhost:199', self.protocolClient, self.scheduler, 'testcerts/client.key','testcerts/client.crt','testcerts/root.crt') c2 = Client('pssl://localhost:199', self.protocolServer, self.scheduler, 'testcerts/server.key','testcerts/server.crt','testcerts/root.crt') r = RoutineContainer(self.scheduler, True) ret = bytearray() def mainA(): m = TestDataEvent.createMatcher() while True: yield (m,) if r.event.connection is c2: ret.extend(b'A') else: ret.extend(b'B') r.main = mainA r.start() def waitAndStart(c): for m in r.waitWithTimeout(0.5): yield m c.start() r.subroutine(waitAndStart(c1)) c2.start() self.scheduler.main() self.assertEqual(ret, b'ABABABABABABABABABAB') def testServerClientSsl(self): c1 = Client('ssl://localhost:199', self.protocolClient, self.scheduler, 'testcerts/client.key', 'testcerts/client.crt', 'testcerts/root.crt') s1 = TcpServer('lssl://localhost:199', self.protocolServer, self.scheduler, 'testcerts/server.key', 'testcerts/server.crt', 'testcerts/root.crt') r = RoutineContainer(self.scheduler, True) ret = bytearray() def mainA(): m = TestDataEvent.createMatcher() stopped = False while True: yield (m,) if r.event.connection is c1: ret.extend(b'B') else: ret.extend(b'A') if not stopped: for m in s1.shutdown(): yield m stopped = True r.main = mainA r.start() s1.start() def waitAndStart(c): for m in r.waitWithTimeout(0.5): yield m c.start() r.subroutine(waitAndStart(c1)) self.scheduler.main() self.assertEqual(ret, b'ABABABABABABABABABAB') def testCAVerify(self): c1 = Client('ssl://localhost:199', self.protocolClient, self.scheduler, 'testcerts/selfsigned.key','testcerts/selfsigned.crt','testcerts/selfsigned.crt') c2 = Client('pssl://localhost:199', self.protocolServer, self.scheduler, 'testcerts/server.key','testcerts/server.crt','testcerts/root.crt') r = RoutineContainer(self.scheduler, True) ret = bytearray() def mainA(): m = TestDataEvent.createMatcher() while True: yield (m,) if r.event.connection is c2: ret.extend(b'A') else: ret.extend(b'B') self.notconnected = False def notConnected(connection): if connection is c1: self.notconnected = True if False: yield self.protocolClient.notconnected = notConnected r.main = mainA r.start() def waitAndStart(c): for m in r.waitWithTimeout(0.5): yield m c.start() r.subroutine(waitAndStart(c1)) c2.start() self.scheduler.main() self.assertTrue(self.notconnected) self.assertEqual(ret, b'') def testCAVerify2(self): c1 = Client('ssl://localhost:199', self.protocolClient, self.scheduler, 'testcerts/selfsigned.key','testcerts/selfsigned.crt',None) c2 = Client('pssl://localhost:199', self.protocolServer, self.scheduler, 'testcerts/server.key','testcerts/server.crt','testcerts/root.crt') r = RoutineContainer(self.scheduler, True) ret = bytearray() def mainA(): m = TestDataEvent.createMatcher() while True: yield (m,) if r.event.connection is c2: ret.extend(b'A') else: ret.extend(b'B') self.notconnected = False def notConnected(connection): if connection is c1: self.notconnected = True if False: yield self.protocolClient.notconnected = notConnected r.main = mainA r.start() def waitAndStart(c): for m in r.waitWithTimeout(0.5): yield m c.start() r.subroutine(waitAndStart(c1)) c2.start() self.scheduler.main() self.assertTrue(self.notconnected) self.assertEqual(ret, b'') def testCAVerify3(self): c1 = Client('ssl://localhost:199', self.protocolClient, self.scheduler, None, None, 'testcerts/root.crt') c2 = Client('pssl://localhost:199', self.protocolServer, self.scheduler, 'testcerts/server.key','testcerts/server.crt','testcerts/root.crt') r = RoutineContainer(self.scheduler, True) ret = bytearray() def mainA(): m = TestDataEvent.createMatcher() while True: yield (m,) if r.event.connection is c2: ret.extend(b'A') else: ret.extend(b'B') self.notconnected = False def notConnected(connection): if connection is c1: self.notconnected = True if False: yield self.protocolClient.notconnected = notConnected r.main = mainA r.start() def waitAndStart(c): for m in r.waitWithTimeout(0.5): yield m c.start() r.subroutine(waitAndStart(c1)) c2.start() self.scheduler.main() self.assertTrue(self.notconnected) self.assertEqual(ret, b'') def testSelfConnectionSslWithoutClientCertificate(self): c1 = Client('ssl://localhost:199', self.protocolClient, self.scheduler, None, None,'testcerts/root.crt') c2 = Client('pssl://localhost:199', self.protocolServer, self.scheduler, 'testcerts/server.key','testcerts/server.crt',None) r = RoutineContainer(self.scheduler, True) ret = bytearray() def mainA(): m = TestDataEvent.createMatcher() while True: yield (m,) if r.event.connection is c2: ret.extend(b'A') else: ret.extend(b'B') r.main = mainA r.start() def waitAndStart(c): for m in r.waitWithTimeout(0.5): yield m c.start() r.subroutine(waitAndStart(c1)) c2.start() self.scheduler.main() self.assertEqual(ret, b'ABABABABABABABABABAB') def testSelfConnectionUdp(self): c1 = Client('udp://localhost:199', self.protocolClient, self.scheduler) c2 = Client('pudp://localhost:199', self.protocolServer, self.scheduler) r = RoutineContainer(self.scheduler, True) ret = bytearray() def mainA(): m = TestDataEvent.createMatcher() while True: yield (m,) if r.event.connection is c2: ret.extend(b'A') else: ret.extend(b'B') r.main = mainA r.start() def waitAndStart(c): for m in r.waitWithTimeout(0.5): yield m c.start() r.subroutine(waitAndStart(c1)) c2.start() self.scheduler.main() self.assertEqual(ret, b'ABABABABABABABABABAB') def testSelfConnectionUnix(self): if not hasattr(socket, 'AF_UNIX'): print('Skip UNIX socket test because not supported') return try: os.remove('/var/run/unixsocktest.sock') except Exception: pass c1 = Client('unix:/var/run/unixsocktest.sock', self.protocolClient, self.scheduler) c2 = Client('punix:/var/run/unixsocktest.sock', self.protocolServer, self.scheduler) r = RoutineContainer(self.scheduler, True) ret = bytearray() def mainA(): m = TestDataEvent.createMatcher() while True: yield (m,) if r.event.connection is c2: ret.extend(b'A') else: ret.extend(b'B') r.main = mainA r.start() def waitAndStart(c): for m in r.waitWithTimeout(0.5): yield m c.start() r.subroutine(waitAndStart(c1)) c2.start() self.scheduler.main() self.assertEqual(ret, b'ABABABABABABABABABAB') def testSelfConnectionUnixDgram(self): if not hasattr(socket, 'AF_UNIX'): print('Skip UNIX socket test because not supported') return try: os.remove('/var/run/unixsocktestudp1.sock') except Exception: pass try: os.remove('/var/run/unixsocktestudp2.sock') except Exception: pass c1 = Client('dunix:/var/run/unixsocktestudp2.sock', self.protocolClient, self.scheduler, bindaddress = ((socket.AF_UNIX, '/var/run/unixsocktestudp1.sock'),)) c2 = Client('pdunix:/var/run/unixsocktestudp2.sock', self.protocolServer, self.scheduler) r = RoutineContainer(self.scheduler, True) ret = bytearray() def mainA(): m = TestDataEvent.createMatcher() while True: yield (m,) if r.event.connection is c2: ret.extend(b'A') else: ret.extend(b'B') r.main = mainA r.start() def waitAndStart(c): for m in r.waitWithTimeout(0.5): yield m c.start() r.subroutine(waitAndStart(c1)) c2.start() self.scheduler.main() self.assertEqual(ret, b'ABABABABABABABABABAB')
class TestConnection(unittest.TestCase): def setUp(self): #self.scheduler logging.basicConfig() self.scheduler = Scheduler(DefaultPolling()) #self.scheduler.debugging = True #self.scheduler.logger.setLevel('DEBUG') #Client.logger.setLevel('DEBUG') import tests import os.path rootpath, _ = os.path.split(tests.__path__[0]) if rootpath: os.chdir(rootpath) self.scheduler.queue.addSubQueue(3, PollEvent.createMatcher(category=PollEvent.WRITE_READY), 'write', None, None, CBQueue.AutoClassQueue.initHelper('fileno')) self.scheduler.queue.addSubQueue(1, PollEvent.createMatcher(category=PollEvent.READ_READY), 'read', None, None, CBQueue.AutoClassQueue.initHelper('fileno')) self.scheduler.queue.addSubQueue(5, PollEvent.createMatcher(category=PollEvent.ERROR), 'error') self.scheduler.queue.addSubQueue(2, ConnectionControlEvent.createMatcher(), 'control') self.scheduler.queue.addSubQueue(4, ConnectionWriteEvent.createMatcher(), 'connectionwrite', 40, 40, CBQueue.AutoClassQueue.initHelper('connection')) self.scheduler.queue.addSubQueue(10, RoutineControlEvent.createMatcher(), 'routine') self.scheduler.queue.addSubQueue(9, TimerEvent.createMatcher(), 'timer') self.scheduler.queue.addSubQueue(8, ResolveResponseEvent.createMatcher(), 'resolve') self.scheduler.queue.addSubQueue(8, ResolveRequestEvent.createMatcher(), 'resolvereq') self.scheduler.queue.addSubQueue(20, SystemControlEvent.createMatcher(), 'sysctl') self.scheduler.queue.addSubQueue(0, SystemControlLowPriorityEvent.createMatcher(), 'sysctllow') #Client.logger.setLevel('DEBUG') #TcpServer.logger.setLevel('DEBUG') self.protocolServer = TestProtocol(True) self.protocolClient = TestProtocol(False) self.resolver = Resolver(scheduler=self.scheduler) self.resolver.start() def tearDown(self): pass def testSelfConnection(self): c1 = Client('tcp://localhost:199', self.protocolClient, self.scheduler) c2 = Client('ptcp://localhost:199', self.protocolServer, self.scheduler) r = RoutineContainer(self.scheduler, True) ret = bytearray() def mainA(): m = TestDataEvent.createMatcher() while True: yield (m,) if r.event.connection is c2: ret.extend(b'A') else: ret.extend(b'B') r.main = mainA r.start() def waitAndStart(): for m in r.waitWithTimeout(0.5): yield m c1.start() r.subroutine(waitAndStart()) c2.start() self.scheduler.main() self.assertEqual(ret, b'ABABABABABABABABABAB') def testServerClient(self): c1 = Client('tcp://localhost:199', self.protocolClient, self.scheduler) s1 = TcpServer('ltcp://localhost:199', self.protocolServer, self.scheduler) r = RoutineContainer(self.scheduler, True) ret = bytearray() def mainA(): m = TestDataEvent.createMatcher() stopped = False while True: yield (m,) if r.event.connection is c1: ret.extend(b'B') else: ret.extend(b'A') if not stopped: for m in s1.shutdown(): yield m stopped = True r.main = mainA r.start() s1.start() def waitAndStart(): for m in r.waitWithTimeout(0.5): yield m c1.start() r.subroutine(waitAndStart()) self.scheduler.main() self.assertEqual(ret, b'ABABABABABABABABABAB') def testMultipleClients(self): c1 = Client('tcp://localhost:199', self.protocolClient, self.scheduler) c2 = Client('tcp://localhost:199', self.protocolClient, self.scheduler) s1 = TcpServer('ltcp://localhost:199', self.protocolServer, self.scheduler) r = RoutineContainer(self.scheduler, True) counter = {c1:0, c2:0} ret = bytearray() def mainA(): m = TestDataEvent.createMatcher() c1c = False c2c = False shutdown = False while True: yield (m,) counter[r.event.connection] = counter.get(r.event.connection, 0) + 1 if r.event.connection is c1: ret.extend(b'A') c1c = True elif r.event.connection is c2: ret.extend(b'B') c2c = True if c1c and c2c and not shutdown: for m in s1.shutdown(): yield m shutdown = True r.main = mainA r.start() s1.start() def waitAndStart(c): for m in r.waitWithTimeout(0.5): yield m c.start() r.subroutine(waitAndStart(c1)) r.subroutine(waitAndStart(c2)) self.scheduler.main() print(ret) self.assertEqual(counter[c1], 10) self.assertEqual(counter[c2], 10) def testSelfConnectionSsl(self): c1 = Client('ssl://localhost:199', self.protocolClient, self.scheduler, 'testcerts/client.key','testcerts/client.crt','testcerts/root.crt') c2 = Client('pssl://localhost:199', self.protocolServer, self.scheduler, 'testcerts/server.key','testcerts/server.crt','testcerts/root.crt') r = RoutineContainer(self.scheduler, True) ret = bytearray() def mainA(): m = TestDataEvent.createMatcher() while True: yield (m,) if r.event.connection is c2: ret.extend(b'A') else: ret.extend(b'B') r.main = mainA r.start() def waitAndStart(c): for m in r.waitWithTimeout(0.5): yield m c.start() r.subroutine(waitAndStart(c1)) c2.start() self.scheduler.main() self.assertEqual(ret, b'ABABABABABABABABABAB') def testServerClientSsl(self): c1 = Client('ssl://localhost:199', self.protocolClient, self.scheduler, 'testcerts/client.key', 'testcerts/client.crt', 'testcerts/root.crt') s1 = TcpServer('lssl://localhost:199', self.protocolServer, self.scheduler, 'testcerts/server.key', 'testcerts/server.crt', 'testcerts/root.crt') r = RoutineContainer(self.scheduler, True) ret = bytearray() def mainA(): m = TestDataEvent.createMatcher() stopped = False while True: yield (m,) if r.event.connection is c1: ret.extend(b'B') else: ret.extend(b'A') if not stopped: for m in s1.shutdown(): yield m stopped = True r.main = mainA r.start() s1.start() def waitAndStart(c): for m in r.waitWithTimeout(0.5): yield m c.start() r.subroutine(waitAndStart(c1)) self.scheduler.main() self.assertEqual(ret, b'ABABABABABABABABABAB') def testCAVerify(self): c1 = Client('ssl://localhost:199', self.protocolClient, self.scheduler, 'testcerts/selfsigned.key','testcerts/selfsigned.crt','testcerts/selfsigned.crt') c2 = Client('pssl://localhost:199', self.protocolServer, self.scheduler, 'testcerts/server.key','testcerts/server.crt','testcerts/root.crt') r = RoutineContainer(self.scheduler, True) ret = bytearray() def mainA(): m = TestDataEvent.createMatcher() while True: yield (m,) if r.event.connection is c2: ret.extend(b'A') else: ret.extend(b'B') self.notconnected = False def notConnected(connection): if connection is c1: self.notconnected = True if False: yield self.protocolClient.notconnected = notConnected r.main = mainA r.start() def waitAndStart(c): for m in r.waitWithTimeout(0.5): yield m c.start() r.subroutine(waitAndStart(c1)) c2.start() self.scheduler.main() self.assertTrue(self.notconnected) self.assertEqual(ret, b'') def testCAVerify2(self): c1 = Client('ssl://localhost:199', self.protocolClient, self.scheduler, 'testcerts/selfsigned.key','testcerts/selfsigned.crt',None) c2 = Client('pssl://localhost:199', self.protocolServer, self.scheduler, 'testcerts/server.key','testcerts/server.crt','testcerts/root.crt') r = RoutineContainer(self.scheduler, True) ret = bytearray() def mainA(): m = TestDataEvent.createMatcher() while True: yield (m,) if r.event.connection is c2: ret.extend(b'A') else: ret.extend(b'B') self.notconnected = False def notConnected(connection): if connection is c1: self.notconnected = True if False: yield self.protocolClient.notconnected = notConnected r.main = mainA r.start() def waitAndStart(c): for m in r.waitWithTimeout(0.5): yield m c.start() r.subroutine(waitAndStart(c1)) c2.start() self.scheduler.main() self.assertTrue(self.notconnected) self.assertEqual(ret, b'') def testCAVerify3(self): c1 = Client('ssl://localhost:199', self.protocolClient, self.scheduler, None, None, 'testcerts/root.crt') c2 = Client('pssl://localhost:199', self.protocolServer, self.scheduler, 'testcerts/server.key','testcerts/server.crt','testcerts/root.crt') r = RoutineContainer(self.scheduler, True) ret = bytearray() def mainA(): m = TestDataEvent.createMatcher() while True: yield (m,) if r.event.connection is c2: ret.extend(b'A') else: ret.extend(b'B') self.notconnected = False def notConnected(connection): if connection is c1: self.notconnected = True if False: yield self.protocolClient.notconnected = notConnected r.main = mainA r.start() def waitAndStart(c): for m in r.waitWithTimeout(0.5): yield m c.start() r.subroutine(waitAndStart(c1)) c2.start() self.scheduler.main() self.assertTrue(self.notconnected) self.assertEqual(ret, b'') def testSelfConnectionSslWithoutClientCertificate(self): c1 = Client('ssl://localhost:199', self.protocolClient, self.scheduler, None, None,'testcerts/root.crt') c2 = Client('pssl://localhost:199', self.protocolServer, self.scheduler, 'testcerts/server.key','testcerts/server.crt',None) r = RoutineContainer(self.scheduler, True) ret = bytearray() def mainA(): m = TestDataEvent.createMatcher() while True: yield (m,) if r.event.connection is c2: ret.extend(b'A') else: ret.extend(b'B') r.main = mainA r.start() def waitAndStart(c): for m in r.waitWithTimeout(0.5): yield m c.start() r.subroutine(waitAndStart(c1)) c2.start() self.scheduler.main() self.assertEqual(ret, b'ABABABABABABABABABAB') def testSelfConnectionUdp(self): c1 = Client('udp://localhost:199', self.protocolClient, self.scheduler) c2 = Client('pudp://localhost:199', self.protocolServer, self.scheduler) r = RoutineContainer(self.scheduler, True) ret = bytearray() def mainA(): m = TestDataEvent.createMatcher() while True: yield (m,) if r.event.connection is c2: ret.extend(b'A') else: ret.extend(b'B') r.main = mainA r.start() def waitAndStart(c): for m in r.waitWithTimeout(0.5): yield m c.start() r.subroutine(waitAndStart(c1)) c2.start() self.scheduler.main() self.assertEqual(ret, b'ABABABABABABABABABAB') def testSelfConnectionUnix(self): if not hasattr(socket, 'AF_UNIX'): print('Skip UNIX socket test because not supported') return try: os.remove('/var/run/unixsocktest.sock') except: pass c1 = Client('unix:/var/run/unixsocktest.sock', self.protocolClient, self.scheduler) c2 = Client('punix:/var/run/unixsocktest.sock', self.protocolServer, self.scheduler) r = RoutineContainer(self.scheduler, True) ret = bytearray() def mainA(): m = TestDataEvent.createMatcher() while True: yield (m,) if r.event.connection is c2: ret.extend(b'A') else: ret.extend(b'B') r.main = mainA r.start() def waitAndStart(c): for m in r.waitWithTimeout(0.5): yield m c.start() r.subroutine(waitAndStart(c1)) c2.start() self.scheduler.main() self.assertEqual(ret, b'ABABABABABABABABABAB') def testSelfConnectionUnixDgram(self): if not hasattr(socket, 'AF_UNIX'): print('Skip UNIX socket test because not supported') return try: os.remove('/var/run/unixsocktestudp1.sock') except: pass try: os.remove('/var/run/unixsocktestudp2.sock') except: pass c1 = Client('dunix:/var/run/unixsocktestudp2.sock', self.protocolClient, self.scheduler, bindaddress = ((socket.AF_UNIX, '/var/run/unixsocktestudp1.sock'),)) c2 = Client('pdunix:/var/run/unixsocktestudp2.sock', self.protocolServer, self.scheduler) r = RoutineContainer(self.scheduler, True) ret = bytearray() def mainA(): m = TestDataEvent.createMatcher() while True: yield (m,) if r.event.connection is c2: ret.extend(b'A') else: ret.extend(b'B') r.main = mainA r.start() def waitAndStart(c): for m in r.waitWithTimeout(0.5): yield m c.start() r.subroutine(waitAndStart(c1)) c2.start() self.scheduler.main() self.assertEqual(ret, b'ABABABABABABABABABAB')
class Server(Configurable): ''' Create a server with all necessary parts ''' # Startup modules list. Should be a tuple of "package.classname" like:: # # ('vlcp.service.sdn.viperflow.ViperFlow', # 'vlcp.service.sdn.vrouterapi.VRouterApi') # # Startup modules automatically load their dependencies, so it is not necessary # (though not an error) to write them explicitly. # # If server.startup is null or empty, server tries to load modules # in __main__. _default_startup = () # enable debugging log for scheduler _default_debugging = False # File-can-write event priority, usually means socket send() can be used _default_pollwritepriority = 700 # ConnectionWrite events priority _default_connectionwritepriority = 600 # File-can-read event priority, usually means data received from socket _default_pollreadpriority = 500 # error event priority, usually means the socket is in an error status _default_pollerrorpriority = 800 # responses from resolver _default_resolverresppriority = 490 # requests to resolver _default_resolverreqpriority = 650 # shutdown/reset/restart commands for connections _default_connectioncontrolpriority = 450 # asynchronously starts a routine _default_routinecontrolpriority = 1000 # streams (vlcp.event.stream.Stream) data _default_streamdatapriority = 640 # timers _default_timerpriority = 900 # a lock can be acquired _default_lockpriority = 990 # future objects been set _default_futurepriority = 989 # a module is loaded/unloaded _default_moduleloadeventpriority = 890 # system high-priority events _default_sysctlpriority = 2000 # system low-priority events _default_sysctllowpriority = 10 # module API call _default_moduleapicallpriority = 630 # module API response _default_moduleapireplypriority = 420 # module notify _default_modulenotifypriority = 410 # default connection write queue limit for all connections _default_totalwritelimit = 100000 # connection write events limit per connection _default_writelimitperconnection = 5 # preserve space for newly created connections in default connection write queue _default_preservefornew = 100 # stream data limit for all streams _default_streamdatalimit = 100000 # stream data limit per stream _default_datalimitperstream = 5 # the default multi-threading resolver pool size _default_resolverpoolsize = 64 # try to set open files limit (ulimit -n) to this number if it is smaller _default_ulimitn = 32768 # Use logging.config.dictConfig with this dictionary to configure the logging system. # It is supported in Python 2.7+ _default_logging = None # Use logging.config.fileConfig with this file to configure the logging system. _default_loggingconfig = None # Force polling the sockets even if there are still unfinished events after processing # this number of events. May be helpful for very high stress. Should be set together # with server.queuemaxsize or/and server.queuedefaultsize to prevent memory overflow. _default_processevents = None # Limit the default queue size, so more events will be blocked until some events are # processed. This further limit the processing speed for producers to keep the system # stable on very high stress, but may slightly reduce the performance. _default_queuedefaultsize = None # Limit the total size of the event queue, so more events will be blocked until some events are # processed. This further limit the processing speed for producers to keep the system # stable on very high stress, but may slightly reduce the performance. _default_queuemaxsize = None def __init__(self): ''' Constructor ''' if hasattr(self, 'logging') and self.logging is not None: if isinstance(self.logging, dict): logging_config = dict(self.logging) else: logging_config = self.logging.todict() logging_config.setdefault('disable_existing_loggers', False) logging.config.dictConfig(logging_config) elif hasattr(self, 'loggingconfig') and self.loggingconfig is not None: logging.config.fileConfig(self.loggingconfig, disable_existing_loggers=False) self.scheduler = Scheduler( DefaultPolling(), getattr(self, 'processevents', None), getattr(self, 'queuedefaultsize', None), getattr(self, 'queuemaxsize', None), defaultQueueClass=CBQueue.AutoClassQueue.initHelper('_classname0'), defaultQueuePriority=400) if self.debugging: self.scheduler.debugging = True self.scheduler.logger.setLevel(logging.DEBUG) self.scheduler.queue.addSubQueue( self.pollwritepriority, PollEvent.createMatcher(category=PollEvent.WRITE_READY), 'write', None, None, CBQueue.AutoClassQueue.initHelper('fileno')) self.scheduler.queue.addSubQueue( self.pollreadpriority, PollEvent.createMatcher(category=PollEvent.READ_READY), 'read', None, None, CBQueue.AutoClassQueue.initHelper('fileno')) self.scheduler.queue.addSubQueue( self.pollerrorpriority, PollEvent.createMatcher(category=PollEvent.ERROR), 'error') self.scheduler.queue.addSubQueue( self.connectioncontrolpriority, ConnectionControlEvent.createMatcher(), 'control') self.scheduler.queue.addSubQueue( self.connectionwritepriority, ConnectionWriteEvent.createMatcher(), 'connectionwrite', self.totalwritelimit, self.totalwritelimit, CBQueue.AutoClassQueue.initHelper( 'connection', self.preservefornew, subqueuelimit=self.writelimitperconnection)) self.scheduler.queue.addSubQueue( self.streamdatapriority, StreamDataEvent.createMatcher(), 'streamdata', self.streamdatalimit, self.streamdatalimit, CBQueue.AutoClassQueue.initHelper( 'stream', self.preservefornew, subqueuelimit=self.datalimitperstream)) self.scheduler.queue.addSubQueue(self.routinecontrolpriority, RoutineControlEvent.createMatcher(), 'routine') self.scheduler.queue.addSubQueue(self.timerpriority, TimerEvent.createMatcher(), 'timer') self.scheduler.queue.addSubQueue(self.resolverresppriority, ResolveResponseEvent.createMatcher(), 'resolve') self.scheduler.queue.addSubQueue(self.resolverreqpriority, ResolveRequestEvent.createMatcher(), 'resolvereq', 16) self.scheduler.queue.addSubQueue(self.sysctlpriority, SystemControlEvent.createMatcher(), 'sysctl') self.scheduler.queue.addSubQueue( self.sysctllowpriority, SystemControlLowPriorityEvent.createMatcher(), 'sysctllow') self.scheduler.queue.addSubQueue( self.moduleapicallpriority, ModuleAPICall.createMatcher(), 'moduleapi', None, None, CBQueue.AutoClassQueue.initHelper('target', 2, subqueuelimit=5)) self.scheduler.queue.addSubQueue(self.moduleapireplypriority, ModuleAPIReply.createMatcher(), 'moduleapireply') self.scheduler.queue.addSubQueue( self.modulenotifypriority, ModuleNotification.createMatcher(), 'modulenotify', None, None, CBQueue.AutoClassQueue.initHelper('target', subqueuelimit=5)) self.scheduler.queue.addSubQueue( self.moduleloadeventpriority, ModuleLoadStateChanged.createMatcher(), 'moduleload') self.scheduler.queue.addSubQueue( self.lockpriority, LockEvent.createMatcher(), 'lock', None, None, CBQueue.AutoClassQueue.initHelper('key', subqueuelimit=1)) self.scheduler.queue.addSubQueue(self.futurepriority, FutureEvent.createMatcher(), 'future') self.resolver = Resolver(self.scheduler, self.resolverpoolsize) self.moduleloader = ModuleLoader(self) def serve(self): """ Start the server """ if self.ulimitn is not None: try: import resource curr_ulimit = resource.getrlimit(resource.RLIMIT_NOFILE) if curr_ulimit[0] >= self.ulimitn: # We do not decrease ulimit pass elif curr_ulimit[1] >= self.ulimitn: # Only increase soft limit, keep the hard limit unchanged resource.setrlimit(resource.RLIMIT_NOFILE, (self.ulimitn, curr_ulimit[1])) else: try: resource.setrlimit(resource.RLIMIT_NOFILE, (self.ulimitn, self.ulimitn)) except: # Maybe we do not have permission to change hard limit, instead we increase soft limit to the hard limit resource.setrlimit(resource.RLIMIT_NOFILE, (curr_ulimit[1], curr_ulimit[1])) except: pass # If logging is not configured, configure it to the default (console) logging.basicConfig() self.resolver.start() self.moduleloader.start() for path in self.startup: self.moduleloader.subroutine(self.moduleloader.loadByPath(path)) self.scheduler.main()
class Server(Configurable): ''' Create a server with all necessary parts ''' # Startup modules list. Should be a tuple of "package.classname" like:: # # ('vlcp.service.sdn.viperflow.ViperFlow', # 'vlcp.service.sdn.vrouterapi.VRouterApi') # # Startup modules automatically load their dependencies, so it is not necessary # (though not an error) to write them explicitly. # # If server.startup is null or empty, server tries to load modules # in __main__. _default_startup = () # enable debugging log for scheduler _default_debugging = False # File-can-write event priority, usually means socket send() can be used _default_pollwritepriority = 700 # ConnectionWrite events priority _default_connectionwritepriority = 600 # File-can-read event priority, usually means data received from socket _default_pollreadpriority = 500 # error event priority, usually means the socket is in an error status _default_pollerrorpriority = 800 # responses from resolver _default_resolverresppriority = 490 # requests to resolver _default_resolverreqpriority = 650 # shutdown/reset/restart commands for connections _default_connectioncontrolpriority = 450 # asynchronously starts a routine _default_routinecontrolpriority = 1000 # streams (vlcp.event.stream.Stream) data _default_streamdatapriority = 640 # timers _default_timerpriority = 900 # a lock can be acquired _default_lockpriority = 990 # future objects been set _default_futurepriority = 989 # a module is loaded/unloaded _default_moduleloadeventpriority = 890 # system high-priority events _default_sysctlpriority = 2000 # system low-priority events _default_sysctllowpriority = 10 # module API call _default_moduleapicallpriority = 630 # module API response _default_moduleapireplypriority = 420 # module notify _default_modulenotifypriority = 410 # default connection write queue limit for all connections _default_totalwritelimit = 100000 # connection write events limit per connection _default_writelimitperconnection = 5 # preserve space for newly created connections in default connection write queue _default_preservefornew = 100 # stream data limit for all streams _default_streamdatalimit = 100000 # stream data limit per stream _default_datalimitperstream = 5 # the default multi-threading resolver pool size _default_resolverpoolsize = 64 # try to set open files limit (ulimit -n) to this number if it is smaller _default_ulimitn = 32768 # Use logging.config.dictConfig with this dictionary to configure the logging system. # It is supported in Python 2.7+ _default_logging = None # Use logging.config.fileConfig with this file to configure the logging system. _default_loggingconfig = None # Force polling the sockets even if there are still unfinished events after processing # this number of events. May be helpful for very high stress. Should be set together # with server.queuemaxsize or/and server.queuedefaultsize to prevent memory overflow. _default_processevents = None # Limit the default queue size, so more events will be blocked until some events are # processed. This further limit the processing speed for producers to keep the system # stable on very high stress, but may slightly reduce the performance. _default_queuedefaultsize = None # Limit the total size of the event queue, so more events will be blocked until some events are # processed. This further limit the processing speed for producers to keep the system # stable on very high stress, but may slightly reduce the performance. _default_queuemaxsize = None def __init__(self): ''' Constructor ''' if hasattr(self, 'logging') and self.logging is not None: if isinstance(self.logging, dict): logging_config = dict(self.logging) else: logging_config = self.logging.todict() logging_config.setdefault('disable_existing_loggers', False) logging.config.dictConfig(logging_config) elif hasattr(self, 'loggingconfig') and self.loggingconfig is not None: logging.config.fileConfig(self.loggingconfig, disable_existing_loggers=False) self.scheduler = Scheduler(DefaultPolling(), getattr(self, 'processevents', None), getattr(self, 'queuedefaultsize', None), getattr(self, 'queuemaxsize', None), defaultQueueClass=CBQueue.AutoClassQueue.initHelper('_classname0'), defaultQueuePriority = 400) if self.debugging: self.scheduler.debugging = True self.scheduler.logger.setLevel(logging.DEBUG) self.scheduler.queue.addSubQueue(self.pollwritepriority, PollEvent.createMatcher(category=PollEvent.WRITE_READY), 'write', None, None, CBQueue.AutoClassQueue.initHelper('fileno')) self.scheduler.queue.addSubQueue(self.pollreadpriority, PollEvent.createMatcher(category=PollEvent.READ_READY), 'read', None, None, CBQueue.AutoClassQueue.initHelper('fileno')) self.scheduler.queue.addSubQueue(self.pollerrorpriority, PollEvent.createMatcher(category=PollEvent.ERROR), 'error') self.scheduler.queue.addSubQueue(self.connectioncontrolpriority, ConnectionControlEvent.createMatcher(), 'control') self.scheduler.queue.addSubQueue(self.connectionwritepriority, ConnectionWriteEvent.createMatcher(), 'connectionwrite', self.totalwritelimit, self.totalwritelimit, CBQueue.AutoClassQueue.initHelper('connection', self.preservefornew, subqueuelimit = self.writelimitperconnection)) self.scheduler.queue.addSubQueue(self.streamdatapriority, StreamDataEvent.createMatcher(), 'streamdata', self.streamdatalimit, self.streamdatalimit, CBQueue.AutoClassQueue.initHelper('stream', self.preservefornew, subqueuelimit = self.datalimitperstream)) self.scheduler.queue.addSubQueue(self.routinecontrolpriority, RoutineControlEvent.createMatcher(), 'routine') self.scheduler.queue.addSubQueue(self.timerpriority, TimerEvent.createMatcher(), 'timer') self.scheduler.queue.addSubQueue(self.resolverresppriority, ResolveResponseEvent.createMatcher(), 'resolve') self.scheduler.queue.addSubQueue(self.resolverreqpriority, ResolveRequestEvent.createMatcher(), 'resolvereq', 16) self.scheduler.queue.addSubQueue(self.sysctlpriority, SystemControlEvent.createMatcher(), 'sysctl') self.scheduler.queue.addSubQueue(self.sysctllowpriority, SystemControlLowPriorityEvent.createMatcher(), 'sysctllow') self.scheduler.queue.addSubQueue(self.moduleapicallpriority, ModuleAPICall.createMatcher(), 'moduleapi', None, None, CBQueue.AutoClassQueue.initHelper('target', 2, subqueuelimit = 5)) self.scheduler.queue.addSubQueue(self.moduleapireplypriority, ModuleAPIReply.createMatcher(), 'moduleapireply') self.scheduler.queue.addSubQueue(self.modulenotifypriority, ModuleNotification.createMatcher(), 'modulenotify', None, None, CBQueue.AutoClassQueue.initHelper('target', subqueuelimit=5)) self.scheduler.queue.addSubQueue(self.moduleloadeventpriority, ModuleLoadStateChanged.createMatcher(), 'moduleload') self.scheduler.queue.addSubQueue(self.lockpriority, LockEvent.createMatcher(), 'lock', None, None, CBQueue.AutoClassQueue.initHelper('key', subqueuelimit=1)) self.scheduler.queue.addSubQueue(self.futurepriority, FutureEvent.createMatcher(), 'future') self.resolver = Resolver(self.scheduler, self.resolverpoolsize) self.moduleloader = ModuleLoader(self) def serve(self): """ Start the server """ if self.ulimitn is not None: try: import resource curr_ulimit = resource.getrlimit(resource.RLIMIT_NOFILE) if curr_ulimit[0] >= self.ulimitn: # We do not decrease ulimit pass elif curr_ulimit[1] >= self.ulimitn: # Only increase soft limit, keep the hard limit unchanged resource.setrlimit(resource.RLIMIT_NOFILE, (self.ulimitn, curr_ulimit[1])) else: try: resource.setrlimit(resource.RLIMIT_NOFILE, (self.ulimitn, self.ulimitn)) except: # Maybe we do not have permission to change hard limit, instead we increase soft limit to the hard limit resource.setrlimit(resource.RLIMIT_NOFILE, (curr_ulimit[1], curr_ulimit[1])) except: pass # If logging is not configured, configure it to the default (console) logging.basicConfig() self.resolver.start() self.moduleloader.start() for path in self.startup: self.moduleloader.subroutine(self.moduleloader.loadByPath(path)) self.scheduler.main()
class Server(Configurable): ''' Create a server with all necessary parts ''' _default_pollwritepriority = 700 _default_connectionwritepriority = 600 _default_pollreadpriority = 500 _default_pollerrorpriority = 800 _default_resolverresppriority = 490 _default_resolverreqpriority = 650 _default_connectioncontrolpriority = 450 _default_routinecontrolpriority = 1000 _default_streamdatapriority = 640 _default_timerpriority = 900 _default_lockpriority = 990 _default_moduleloadeventpriority = 890 _default_sysctlpriority = 2000 _default_sysctllowpriority = 10 _default_moduleapicallpriority = 630 _default_moduleapireplypriority = 420 _default_modulenotifypriority = 410 _default_totalwritelimit = 100000 _default_writelimitperconnection = 5 _default_preservefornew = 100 _default_streamdatalimit = 100000 _default_datalimitperstream = 5 _default_resolverpoolsize = 64 _default_ulimitn = 32768 _default_startup = () _default_debugging = False def __init__(self): ''' Constructor ''' if hasattr(self, 'logging'): if isinstance(self.logging, dict): logging_config = dict(self.logging) else: logging_config = self.logging.todict() logging_config.setdefault('disable_existing_loggers', False) logging.config.dictConfig(logging_config) elif hasattr(self, 'loggingconfig'): logging.config.fileConfig(self.loggingconfig, disable_existing_loggers=False) self.scheduler = Scheduler(DefaultPolling(), getattr(self, 'processevents', None), getattr(self, 'queuedefaultsize', None), getattr(self, 'queuemaxsize', None), defaultQueueClass=CBQueue.AutoClassQueue.initHelper('_classname0'), defaultQueuePriority = 400) if self.debugging: self.scheduler.debugging = True self.scheduler.logger.setLevel(logging.DEBUG) self.scheduler.queue.addSubQueue(self.pollwritepriority, PollEvent.createMatcher(category=PollEvent.WRITE_READY), 'write', None, None, CBQueue.AutoClassQueue.initHelper('fileno')) self.scheduler.queue.addSubQueue(self.pollreadpriority, PollEvent.createMatcher(category=PollEvent.READ_READY), 'read', None, None, CBQueue.AutoClassQueue.initHelper('fileno')) self.scheduler.queue.addSubQueue(self.pollerrorpriority, PollEvent.createMatcher(category=PollEvent.ERROR), 'error') self.scheduler.queue.addSubQueue(self.connectioncontrolpriority, ConnectionControlEvent.createMatcher(), 'control') self.scheduler.queue.addSubQueue(self.connectionwritepriority, ConnectionWriteEvent.createMatcher(), 'connectionwrite', self.totalwritelimit, self.totalwritelimit, CBQueue.AutoClassQueue.initHelper('connection', self.preservefornew, subqueuelimit = self.writelimitperconnection)) self.scheduler.queue.addSubQueue(self.streamdatapriority, StreamDataEvent.createMatcher(), 'streamdata', self.streamdatalimit, self.streamdatalimit, CBQueue.AutoClassQueue.initHelper('stream', self.preservefornew, subqueuelimit = self.datalimitperstream)) self.scheduler.queue.addSubQueue(self.routinecontrolpriority, RoutineControlEvent.createMatcher(), 'routine') self.scheduler.queue.addSubQueue(self.timerpriority, TimerEvent.createMatcher(), 'timer') self.scheduler.queue.addSubQueue(self.resolverresppriority, ResolveResponseEvent.createMatcher(), 'resolve') self.scheduler.queue.addSubQueue(self.resolverreqpriority, ResolveRequestEvent.createMatcher(), 'resolvereq', 16) self.scheduler.queue.addSubQueue(self.sysctlpriority, SystemControlEvent.createMatcher(), 'sysctl') self.scheduler.queue.addSubQueue(self.sysctllowpriority, SystemControlLowPriorityEvent.createMatcher(), 'sysctllow') self.scheduler.queue.addSubQueue(self.moduleapicallpriority, ModuleAPICall.createMatcher(), 'moduleapi', None, None, CBQueue.AutoClassQueue.initHelper('target', 2, subqueuelimit = 5)) self.scheduler.queue.addSubQueue(self.moduleapireplypriority, ModuleAPIReply.createMatcher(), 'moduleapireply') self.scheduler.queue.addSubQueue(self.modulenotifypriority, ModuleNotification.createMatcher(), 'modulenotify', None, None, CBQueue.AutoClassQueue.initHelper('target', subqueuelimit=5)) self.scheduler.queue.addSubQueue(self.moduleloadeventpriority, ModuleLoadStateChanged.createMatcher(), 'moduleload') self.scheduler.queue.addSubQueue(self.lockpriority, LockEvent.createMatcher(), 'lock', None, None, CBQueue.AutoClassQueue.initHelper('key', subqueuelimit=1)) self.resolver = Resolver(self.scheduler, self.resolverpoolsize) self.moduleloader = ModuleLoader(self) def serve(self): if self.ulimitn is not None: try: import resource curr_ulimit = resource.getrlimit(resource.RLIMIT_NOFILE) if curr_ulimit[0] >= self.ulimitn: # We do not decrease ulimit pass elif curr_ulimit[1] >= self.ulimitn: # Only increase soft limit, keep the hard limit unchanged resource.setrlimit(resource.RLIMIT_NOFILE, (self.ulimitn, curr_ulimit[1])) else: try: resource.setrlimit(resource.RLIMIT_NOFILE, (self.ulimitn, self.ulimitn)) except: # Maybe we do not have permission to change hard limit, instead we increase soft limit to the hard limit resource.setrlimit(resource.RLIMIT_NOFILE, (curr_ulimit[1], curr_ulimit[1])) except: pass # If logging is not configured, configure it to the default (console) logging.basicConfig() self.resolver.start() self.moduleloader.start() for path in self.startup: self.moduleloader.subroutine(self.moduleloader.loadByPath(path)) self.scheduler.main()