def _sighup_handler(self, signum, frame): self.ufc.configure() # Si hemos cambiado la configuración de base de datos debemos abrir # de nuevo todas las conexiones. log.info("Restarting threadpool...") reactor.getThreadPool().stop() reactor.getThreadPool().start()
def test_make_worker_with_threadpool_size(self): """ The reactor threadpool can be resized with a command line option. """ from twisted.internet import reactor old_maxthreads = reactor.getThreadPool().max self.add_cleanup(reactor.suggestThreadPoolSize, old_maxthreads) # Explicitly set the threadpool size to something different from the # value we're testing with. reactor.suggestThreadPoolSize(5) self.mk_config_file('worker', ["transport_name: sphex"]) maker = VumiWorkerServiceMaker() # By default, we don't touch the threadpool. options = StartWorkerOptions() options.parseOptions([ '--worker-class', 'vumi.demos.words.EchoWorker', '--config', self.config_file['worker'], ]) worker = maker.makeService(options) self.assertEqual({'transport_name': 'sphex'}, worker.config) self.assertEqual(reactor.getThreadPool().max, 5) # If asked, we set the threadpool's maximum size. options_mt = StartWorkerOptions() options_mt.parseOptions([ '--worker-class', 'vumi.demos.words.EchoWorker', '--config', self.config_file['worker'], '--maxthreads', '2', ]) worker = maker.makeService(options_mt) self.assertEqual({'transport_name': 'sphex'}, worker.config) self.assertEqual(reactor.getThreadPool().max, 2)
def update_status_in_thread(self): # TODO: make sure performance is not a problem as current approach queries # database many times. """ tasks = get_tasks_by_service(service_id) clusters = [] for task in tasks: if task.job.cluster not in clusters: clusters.append(task.job.cluster) """ logger.info("updating clusters status, " "%d task in queue, %d workers, %d total threads", reactor.getThreadPool().q.qsize(), len(reactor.getThreadPool().working), len(reactor.getThreadPool().threads)) try: self.start_time = time.time() for cluster in Cluster.objects.filter(active=True).all(): self.update_cluster_status(cluster) logger.info("spent %f seconds for updating clusters status", time.time() - self.start_time) logger.info("gc: %r", gc.get_count()) logger.info("usage: %r", resource.getrusage(resource.RUSAGE_SELF)) except Exception as e: logger.warning("%Failed to update statu: %r", e) finally: # reactor.callLater is NOT thread-safe but reactor.callFromThread is, so we # put the callLater to the main loop. reactor.callFromThread( reactor.callLater, self.collector_config.period, self.update_status)
def _start_in_multi_user_mode(args, root_resource, services_factory): try: protected_resources = _setup_multi_user(args, root_resource, services_factory) start_site(args, protected_resources) reactor.getThreadPool().adjustPoolsize(5, 15) return defer.succeed(None) except Exception as e: return defer.fail(e)
def dataReceived(self, data): """ Overridden to stop trying to read data while outputting a response. This stops netcat from quitting before it gets the output! """ reactor.removeReader(self.reader) retval = LineOnlyReceiver.dataReceived(self, data) reactor.getThreadPool().callInThreadWithCallback( self.processLinesDone, self.processLines) return retval
def _start_in_multi_user_mode(args, root_resource, services_factory): if args.provider is None: raise ValueError('provider name is required') init_monkeypatches() events_server.ensure_server() config, provider = initialize_leap_provider(args.provider, args.leap_provider_cert, args.leap_provider_cert_fingerprint, args.leap_home) protected_resource = set_up_protected_resources(root_resource, provider, services_factory) start_site(args, protected_resource) reactor.getThreadPool().adjustPoolsize(5, 15) return defer.succeed(None)
def update_metrics_in_thread(self, metricsRawData): try: logger.info("%r updating metrics, " "%d task in queue, %d workers, %d total threads", self.task, reactor.getThreadPool().q.qsize(), len(reactor.getThreadPool().working), len(reactor.getThreadPool().threads)) start_time = time.time() # analyze the metric if needed if self.need_analyze: if metricsRawData: metrics = json.loads(metricsRawData) metrics_saved = {} for bean_output in metrics["beans"]: bean_name = bean_output["name"] for metric_name, metric_value in bean_output.iteritems(): if metric_name in ["name", "modelerType"]: continue metric_type = type(metric_value) # Do some hadoop/hbase specific work :) if metric_name in BOOL_METRIC_MAP: metric_value = int(metric_value == BOOL_METRIC_MAP[metric_name]) elif metric_type is list or metric_type is dict: # Just store the length. metric_value = len(metric_value) elif metric_type is bool: metric_value = int(metric_value) elif metric_value is None: metric_value = 0 elif not (metric_type is int or metric_type is float or metric_type is unicode or metric_type is str): logger.warning("Unexpected metric type %s/%s: %r/%r", bean_name, metric_name, metric_type, metric_value) continue # TODO: comment this out temporarily, remove it forever if we don't # want to use it. #metric = MetricObjectCache.get(bean_name, metric_name) group = metrics_saved.setdefault(bean_name, {}) group[metric_name] = metric_value self.task.last_metrics = json.dumps(metrics_saved) self.analyze_metrics(metrics) self.task.save() logger.info("%r spent %f seconds for saving task status", self.task, time.time() - start_time) except Exception, e: logger.warning("%r failed to update metric: %r", self.task, e) traceback.print_exc()
def run(runSearch=True): #module.descServer = reactor.listenTCP(0, Site(DescriptionServerPage())) #@UndefinedVariable module.descServer.listen() module.discovery.listen() if runSearch: module.discovery.search() module.localDeviceManager._sendAlive() reactor.addSystemEventTrigger("before", "shutdown", module.localDeviceManager.byeBye) reactor.run() #@UndefinedVariable reactor.getThreadPool().stop() #@UndefinedVariable
def start(self, stop_after_crawl=True): """ This method starts a Twisted `reactor`_, adjusts its pool size to :setting:`REACTOR_THREADPOOL_MAXSIZE`, and installs a DNS cache based on :setting:`DNSCACHE_ENABLED` and :setting:`DNSCACHE_SIZE`. If ``stop_after_crawl`` is True, the reactor will be stopped after all crawlers have finished, using :meth:`join`. :param boolean stop_after_crawl: stop or not the reactor when all crawlers have finished """ if stop_after_crawl: d = self.join() # Don't start the reactor if the deferreds are already fired if d.called: return d.addBoth(self._stop_reactor) ## 为 reactor 安装解析器 ## 是 Twisted 模块的事件管理器,只要把需要执行的事件方法注册到 reactor 中,然后 ## 调用它的 run 方法,它就会帮你执行注册好的事件方法,如果遇到网络 IO 等待,它会 ## 自动帮你切换可执行的事件方法,非常高效 reactor.installResolver(self._get_dns_resolver()) ## 获取线程池 tp = reactor.getThreadPool() ## 调整 reactor 的线程池大小(通过修改 REACTOR_THREADPOOL_MAXSIZE 调整) tp.adjustPoolsize( maxthreads=self.settings.getint('REACTOR_THREADPOOL_MAXSIZE')) ## 添加系统事件触发器 reactor.addSystemEventTrigger('before', 'shutdown', self.stop) ## 开始执行 reactor.run(installSignalHandlers=False) # blocking call
def start(self, stop_after_crawl=True): """ This method starts a Twisted `reactor`_, adjusts its pool size to :setting:`REACTOR_THREADPOOL_MAXSIZE`, and installs a DNS cache based on :setting:`DNSCACHE_ENABLED` and :setting:`DNSCACHE_SIZE`. If `stop_after_crawl` is True, the reactor will be stopped after all crawlers have finished, using :meth:`join`. :param boolean stop_after_crawl: stop or not the reactor when all crawlers have finished """ if stop_after_crawl: d = self.join() # Don't start the reactor if the deferreds are already fired if d.called: return d.addBoth(self._stop_reactor) # 对reactor进行定制化处理,只能针对ipv4,设置一个内部解释器用于域名的查找 reactor.installResolver(self._get_dns_resolver()) #返回一个线程池跟reactor.callInThread有关系 tp = reactor.getThreadPool() #调节线程池的大小adjustPoolsize(self, minthreads=None, maxthreads=None) tp.adjustPoolsize( maxthreads=self.settings.getint('REACTOR_THREADPOOL_MAXSIZE')) # 添加系统事件触发事件当系统关闭的时候,系统事件激活之前,reactor将会被激活进行停止的操作 reactor.addSystemEventTrigger('before', 'shutdown', self.stop) reactor.run(installSignalHandlers=False) # blocking call
def _init_search(cls): """ Initializes everything needed for search. """ config_path = cls.search_config_path if not os.path.exists(config_path): raise OSError(errno.ENOENT, "Config %r does not exist." % config_path, config_path) config_dir = os.path.dirname(config_path) # Read config. with open(config_path, 'rb') as fh: config = json.load(fh) cls.search_config = config # Connect to mongo. host = config['mongo']['host'] port = config['mongo'].get('port', None) or 27017 thread_pool = reactor.getThreadPool() pool_size = int(math.ceil((thread_pool.min + thread_pool.max) / 2)) cls.search_mongo = txmongo.lazyMongoConnectionPool(host=host, port=port, pool_size=pool_size) cls.search_order_db = cls.search_mongo[config['mongo']['order_dbname']] cls.search_order_tb = cls.search_order_db[config['mongo']['order_tbname']] # Initialize PyLucene. lucene.initVM() # Open index. index_path = os.path.abspath(os.path.join(config_dir, config['lucene']['index_path'])) if not os.path.exists(index_path): raise OSError(errno.ENOENT, "Index %r does not exist." % index_path, index_path) elif not os.path.isdir(index_path): raise OSError(errno.ENOTDIR, "Index %r is not a directory." % index_path, index_path) index_dir = lucene.NIOFSDirectory(lucene.File(index_path)) #index_dir = lucene.SimpleFSDirectory(lucene.File(index_path)) # windows cls.search_searcher = lucene.IndexSearcher(index_dir)
def setup_twisted(self): """Setting up the twisted service. Soaplib has to be setup first.""" self.resource = WSGIResource(reactor, reactor.getThreadPool(), self.wsgi_application) self.root = Resource() self.root.putChild(self.soapchildpath, self.resource) self.site = Site(self.root)
def start(self, stop_after_crawl=True, install_signal_handlers=True): """ This method starts a :mod:`~twisted.internet.reactor`, adjusts its pool size to :setting:`REACTOR_THREADPOOL_MAXSIZE`, and installs a DNS cache based on :setting:`DNSCACHE_ENABLED` and :setting:`DNSCACHE_SIZE`. If ``stop_after_crawl`` is True, the reactor will be stopped after all crawlers have finished, using :meth:`join`. :param bool stop_after_crawl: stop or not the reactor when all crawlers have finished :param bool install_signal_handlers: whether to install the shutdown handlers (default: True) """ from twisted.internet import reactor if stop_after_crawl: d = self.join() # Don't start the reactor if the deferreds are already fired if d.called: return d.addBoth(self._stop_reactor) if install_signal_handlers: install_shutdown_handlers(self._signal_shutdown) resolver_class = load_object(self.settings["DNS_RESOLVER"]) resolver = create_instance(resolver_class, self.settings, self, reactor=reactor) resolver.install_on_reactor() tp = reactor.getThreadPool() tp.adjustPoolsize(maxthreads=self.settings.getint('REACTOR_THREADPOOL_MAXSIZE')) reactor.addSystemEventTrigger('before', 'shutdown', self.stop) reactor.run(installSignalHandlers=False) # blocking call
def run(): structlog.configure( processors=[ structlog.processors.StackInfoRenderer(), structlog.twisted.JSONRenderer() ], context_class=dict, logger_factory=structlog.twisted.LoggerFactory(), wrapper_class=structlog.twisted.BoundLogger, cache_logger_on_first_use=True, ) # grab all of the events that are dispatched to stdlib logger # new relic uses this. handler = logging.StreamHandler(sys.stdout) root_logger = logging.getLogger() root_logger.addHandler(handler) # start the twisted logger twLog.startLogging(sys.stdout) # api is the WSGI resource returned by Falcon. api = falcon.API() api.add_route('/quote', QuoteResource()) app = newrelic.agent.WSGIApplicationWrapper(api) resource = WSGIResource(reactor, reactor.getThreadPool(), app) site = Site(resource) reactor.listenTCP(port=8713, factory=site) reactor.run()
def start_user_agent_in_single_user_mode(root_resource, services_factory, leap_home, leap_session): log.info('Bootstrap done, loading services for user %s' % leap_session.user_auth.username) _services = services.Services(leap_session) yield _services.setup() if leap_session.fresh_account: yield add_welcome_mail(leap_session.mail_store) services_factory.add_session(leap_session.user_auth.uuid, _services) root_resource.initialize() # soledad needs lots of threads reactor.getThreadPool().adjustPoolsize(5, 15) log.info('Done, the user agent is ready to be used')
def getService(config, reactor=None, web=True): if reactor is None: from twisted.internet import reactor root = service.MultiService() sm = ServerManager(reactor, config["servers"]) smTrigId = reactor.addSystemEventTrigger("before", "shutdown", sm.loseConnections) tp = reactor.getThreadPool() root.updater = Updater(Transactor(tp), sm) updater = UpdaterService(int(config.get("workers", 10)), int(config.get("interval", 300)), ) root.addService(updater) updater.parent = root if web: site = Site(getResource(config.get("rest", {}), root.updater)) reactor.listenTCP(8080, site, interface="127.0.0.1") def _cleanup(res=None): sm.loseConnections() reactor.removeSystemEventTrigger(smTrigId) return root
def makeService(self, options): config = options import cap import sys sys.path.insert(1,cap.__path__[0]) del sys.modules["cap"] os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cap.settings") mysql_url = options["mysql_url"].strip() try: a, b = mysql_url.split(":") mysql_host = a mysql_port, mysql_db = b.split("/") mysql_port = int(mysql_port) except: print "mysql相关配置错误" raise Exception("mysql相关配置错误") else: mysql_user = options["mysql_user"] mysql_password = options["mysql_password"] os.config = [mysql_host,mysql_port,mysql_db,mysql_user,mysql_password] from django.core.handlers.wsgi import WSGIHandler application = WSGIHandler() resource = WSGIResource(reactor, reactor.getThreadPool(), application) ui_service=TCPServer(9912,server.Site(resource),interface=config["host"]) return ui_service
def main(argv: List[str]) -> Any: from twisted.internet import reactor from twisted.web.server import Site from twisted.web.wsgi import WSGIResource from twisted.python import log parser = argparse.ArgumentParser() parser.add_argument("-c", "--cache", action="store_true", help="use local repo cache") parser.add_argument("-p", "--port", type=int, default=8160, help="port number") parser.add_argument("-d", "--debug", action="store_true") args = parser.parse_args() app.config["CACHE_LOCAL"] = args.cache print("http://localhost:%d" % args.port) if args.debug: app.debug = True log.startLogging(sys.stdout) wsgiResource = WSGIResource(reactor, reactor.getThreadPool(), app) site = Site(wsgiResource) reactor.listenTCP(args.port, site) reactor.run()
def tearDown(self): # Unit tests that spawn a (blocking) client in a thread might still # have threads running at this point, if one is stuck waiting for a # message from a companion which has exited with an error. Our # relay's .stopService() drops all connections, which ought to # encourage those threads to terminate soon. If they don't, print a # warning to ease debugging. # XXX FIXME there's something in _noclobber test that's not # waiting for a close, I think -- was pretty relieably getting # unclean-reactor, but adding a slight pause here stops it... tp = reactor.getThreadPool() if not tp.working: yield self.sp.stopService() yield task.deferLater(reactor, 0.1, lambda: None) defer.returnValue(None) # disconnect all callers d = defer.maybeDeferred(self.sp.stopService) # wait a second, then check to see if it worked yield task.deferLater(reactor, 1.0, lambda: None) if len(tp.working): log.msg("wormhole.test.common.ServerBase.tearDown:" " I was unable to convince all threads to exit.") tp.dumpStats() print("tearDown warning: threads are still active") print("This test will probably hang until one of the" " clients gives up of their own accord.") else: log.msg("wormhole.test.common.ServerBase.tearDown:" " I convinced all threads to exit.") yield d
def __init__(self, wsgiapp, wsprotocol, port=PORT): ''' Start protocol factory and reactor ''' self.app = wx.GetApp() ip = self.app.IP self.ip = "127.0.0.1" if ip is None else ip self.port = port self.url = "ws://%s:%s" % (self.ip, self.port) # Set Protocol factory self.factory = websocket.WebSocketServerFactory(self.url) self.factory.protocol = wsprotocol self.root = WSGIRoot() self.root.WSGI = WSGIResource(reactor, reactor.getThreadPool(), wsgiapp) self.root.putChild(b"ws", resource.WebSocketResource(self.factory)) static = File(os.path.join(self.app.path['www'], 'static')) self.root.putChild(b"static", static) # Only one File #self.root.putChild(b"documentation", File( os.path.join(self.app.path['www'], 'documentation') ) ) self.site = Site(self.root) # Use the existing reactor reactor.listenTCP(self.port, self.site) echo("Web Server: Starting at %s" % self.root) echo("Websockets Server: Starting at %s" % self.url) topic = "ws.local" self.topic = topic self.factory.protocol.topic = topic self.Initialize()
def attach_app(self, subOptions): app = None fromAppOpts = subOptions.parent.get('appOpts', {}).get('app') if fromAppOpts is not None: app = fromAppOpts elif subOptions['app'] is not None: app = import_string(subOptions['app']) else: # no app nor app import path given, let's guess! files_in_cwd = os.listdir(os.getcwd()) if 'manage.py' in files_in_cwd: sys.path.insert(0, os.getcwd()) from txdevserver.django_helpers import get_django_app django_app = get_django_app('manage.py') if django_app is not None: app = django_app if app is None: app = NoResource("Couldn't find the app!") rv = LoggedWSGIResource(reactor, reactor.getThreadPool(), app, subOptions.get('log_data_factory')) self.app = rv
def run_server(config): if config['debug']: debug = True else: debug = False debug = True observer = log.PythonLoggingObserver() observer.start() if debug: log.startLogging(sys.stdout) ServerFactory = BroadcastServerFactory factory = ServerFactory( "ws://%s:%s" % (config['host'], config['port']), debug=debug, debugCodePaths=debug, externalPort=config['external_port']) factory.protocol = BroadcastServerProtocol wsResource = WebSocketResource(factory) ## create a Twisted Web WSGI resource for our Pyramid server app = make_app(config) wsgiResource = WSGIResource(reactor, reactor.getThreadPool(), app) ## create a root resource serving everything via WSGI/, but ## the path "/ws" served by our WebSocket stuff rootResource = WSGIRootResource(wsgiResource, {'ws': wsResource}) ## create a Twisted Web Site and run everything ## site = Site(rootResource) reactor.listenTCP(config['port'], site, interface=config['host']) reactor.run()
def run(port=8880): print "running windpyfoam web server %s" % __version__ print "listening on port %s" % port root = WSGIResource(reactor, reactor.getThreadPool(), app) factory = Site(root) reactor.listenTCP(port, factory) reactor.run()
def maybeDeferToThread(f, *args, **kwargs): """Call the function C{f} using a thread from the given threadpool Return sthe result as a Deferred. @param f: The function to call. May return a deferred. @param *args: positional arguments to pass to f. @param **kwargs: keyword arguments to pass to f. @return: A Deferred which fires a callback with the result of f, or an errback with a L{twisted.python.failure.Failure} if f throws an exception. """ threadpool = reactor.getThreadPool() d = Deferred() def realOnResult(result): if not isinstance(result, Failure): reactor.callFromThread(d.callback, result) else: reactor.callFromThread(d.errback, result) def onResult(success, result): assert success assert isinstance(result, Deferred) result.addBoth(realOnResult) threadpool.callInThreadWithCallback(onResult, maybeDeferred, f, *args, **kwargs) return d
def add_server(self, port, app_server, cert, key, cafile, client_req=False): """ Adds a web/application server on a given port. port - The port number to listen on. app_server - A WSGI style application server (such as Flask). cert - The path to the SSL cert file. key - The path to the SSL key file. cafile - The path to the CA file for verifying clients. client_req - If cafile provided, is a client certificate REQUIRED? If True, clients without a cert will be rejected. cert & key are optional (if set to None the server will be HTTP-only) cafile is optional, if set to None, client certificate are disabled. Returns None. """ # Set-up site/resource resource = WSGIAuth(reactor, reactor.getThreadPool(), app_server) site = server.Site(resource) # Build SSL options (if required) ssl_opts = self.__build_sslopts(cert, key, cafile, client_req) # Attach everything to a port if ssl_opts: reactor.listenSSL(port, site, ssl_opts) else: reactor.listenTCP(port, site)
def start(self, stop_after_crawl=True): """ This method starts a Twisted `reactor`_, adjusts its pool size to :setting:`REACTOR_THREADPOOL_MAXSIZE`, and installs a DNS cache based on :setting:`DNSCACHE_ENABLED` and :setting:`DNSCACHE_SIZE`. If `stop_after_crawl` is True, the reactor will be stopped after all crawlers have finished, using :meth:`join`. :param boolean stop_after_crawl: stop or not the reactor when all crawlers have finished """ if stop_after_crawl: d = self.join() # Don't start the reactor if the deferreds are already fired if d.called: return d.addBoth(lambda _: self._stop_reactor()) cache_size = self.settings.getint('DNSCACHE_SIZE') if self.settings.getbool('DNSCACHE_ENABLED') else 0 reactor.installResolver(CachingThreadedResolver(reactor, cache_size, self.settings.getfloat('DNS_TIMEOUT'))) tp = reactor.getThreadPool() tp.adjustPoolsize(maxthreads=self.settings.getint('REACTOR_THREADPOOL_MAXSIZE')) reactor.addSystemEventTrigger('before', 'shutdown', self.stop) reactor.run(installSignalHandlers=False) # blocking call
def run(self, app): from twisted.web import server, wsgi from twisted.internet import reactor resource = wsgi.WSGIResource(reactor, reactor.getThreadPool(), app) server = server.Site(resource) reactor.listenTCP(port=self.port, factory=server, interface=self.host) reactor.run()
def _multi_threaded_wsgi_resource(self,wsgi_handler): """runs twisted in a thread-pool for production mode""" pool = threadpool.ThreadPool() pool.start() reactor.addSystemEventTrigger('after', 'shutdown', pool.stop) wsgi_resource = wsgi.WSGIResource(reactor, reactor.getThreadPool(), wsgi_handler) return wsgi_resource
def run(): argv = sys.argv[1:] if argv: config_file_path = argv[0] else: caller_file = inspect.getouterframes(inspect.currentframe())[1][1] caller_file = os.path.realpath(caller_file) buildout_dir = os.path.dirname(os.path.dirname(caller_file)) config_file_path = os.path.join(buildout_dir, 'parts', 'etc', 'config.ini') if not os.path.isfile(config_file_path): print u'Path to config file must be given as a single parameter, for example "bin/run parts/etc/config.ini"' return paster.setup_logging(config_file_path) settings = paster.get_appsettings(config_file_path) app = main(None, **settings) from intranet3 import cron if not config.get('CRON_DISABLE'): cron.run_cron_tasks() full_config_path = os.path.abspath(config_file_path) server_config = ConfigParser.ConfigParser() server_config.readfp(open(full_config_path)) port = server_config.getint('server:main', 'port') host = server_config.get('server:main', 'host') resource = WSGIResource(reactor, reactor.getThreadPool(), app) site = server.Site(resource) reactor.listenTCP(port, site) reactor.run()
def start(self, stop_after_crawl=True): """ This method starts a :mod:`~twisted.internet.reactor`, adjusts its pool size to :setting:`REACTOR_THREADPOOL_MAXSIZE`, and installs a DNS cache based on :setting:`DNSCACHE_ENABLED` and :setting:`DNSCACHE_SIZE`. If ``stop_after_crawl`` is True, the reactor will be stopped after all crawlers have finished, using :meth:`join`. :param boolean stop_after_crawl: stop or not the reactor when all crawlers have finished """ if stop_after_crawl: d = self.join() # Don't start the reactor if the deferreds are already fired if d.called: return d.addBoth(self._stop_reactor) reactor.installResolver(self._get_dns_resolver()) tp = reactor.getThreadPool() tp.adjustPoolsize( maxthreads=self.settings.getint('REACTOR_THREADPOOL_MAXSIZE')) reactor.addSystemEventTrigger('before', 'shutdown', self.stop) reactor.run(installSignalHandlers=False) # blocking call
def setUp(self): super(Tests, self).setUp() self.api = blockdevice_api_factory(test_case=self) self.this_node = self.api.compute_instance_id() self.async_cloud_api = _SyncToThreadedAsyncCloudAPIAdapter( _reactor=reactor, _sync=self.api, _threadpool=reactor.getThreadPool())
def RunEnclaveService(config, enclave): httpport = config['EnclaveService']['HttpPort'] logger.info('service started on port %s', httpport) root = ContractEnclaveServer(config, enclave) site = server.Site(root) threadpool = reactor.getThreadPool() threadpool.start() threadpool.adjustPoolsize( 8, 100) # Min & Max number of request to service at a time logger.info('# of workers: %d', threadpool.workers) reactor.listenTCP(httpport, site) @defer.inlineCallbacks def shutdown_twisted(): logger.info("Stopping Twisted") yield reactor.callFromThread(reactor.stop) reactor.addSystemEventTrigger('before', 'shutdown', shutdown_twisted) try: reactor.run() except ReactorNotRunning: logger.warn('shutdown') except: logger.warn('shutdown') pdo_enclave_helper.shutdown() sys.exit(0)
def createService(sname, config): from anchore_engine.services.policy_engine.application import application flask_site = WSGIResource(reactor, reactor.getThreadPool(), application) root = anchore_engine.services.common.getAuthResource( flask_site, sname, config) return (anchore_engine.services.common.createServiceAPI( root, sname, config))
def _run(): reactor.suggestThreadPoolSize(FLAGS.threadpool_size) resource = wsgi.WSGIResource(reactor, reactor.getThreadPool(), app) site = server.Site(resource) endpoint = endpoints.serverFromString(reactor, address) endpoint.listen(site).addErrback(err_shutdown) reactor.run(installSignalHandlers=int(not debug))
def create_server(app, port): ## # create a Twisted Web resource for our WebSocket server ## ws_factory = WebSocketServerFactory(u"ws://127.0.0.1:5000", debug=app.debug, debugCodePaths=app.debug) ws_factory.protocol = NotificationService ws_resource = WebSocketResource(ws_factory) ## # create a Twisted Web WSGI resource for our Flask server ## wsgi_resource = WSGIResource(reactor, reactor.getThreadPool(), app) ## # create a root resource serving everything via WSGI/Flask, but # the path "/ws" served by our WebSocket stuff ## root_resource = WSGIRootResource(wsgi_resource, {'notification-service': ws_resource}) ## # create a Twisted Web Site and run everything ## site = Site(root_resource) reactor.listenTCP(port, site) reactor.run()
def run_twisted_wsgi(): from twisted.web.server import Site from twisted.web.wsgi import WSGIResource resource = WSGIResource(reactor, reactor.getThreadPool(), app) site = Site(resource) reactor.listenTCP(5000, site)
def checkReactor(self, phase, *_): has_network_selectables = False for item in reactor.getReaders() + reactor.getWriters(): if isinstance(item, HTTPChannel) or isinstance(item, Client): has_network_selectables = True break if has_network_selectables: # TODO(Martijn): we wait a while before we continue the check since network selectables # might take some time to cleanup. I'm not sure what's causing this. yield deferLater(reactor, 0.2, lambda: None) # This is the same check as in the _cleanReactor method of Twisted's Trial selectable_strings = [] for sel in reactor.removeAll(): if interfaces.IProcessTransport.providedBy(sel): self._logger.error("Sending kill signal to %s", repr(sel)) sel.signalProcess('KILL') selectable_strings.append(repr(sel)) self.assertFalse(selectable_strings, "The reactor has leftover readers/writers during %s: %r" % (phase, selectable_strings)) # Check whether we have closed all the sockets open_readers = reactor.getReaders() for reader in open_readers: self.assertNotIsInstance(reader, BasePort) # Check whether the threadpool is clean tp_items = len(reactor.getThreadPool().working) if tp_items > 0: # Print all stacks to debug this issue self.watchdog.print_all_stacks() self.assertEqual(tp_items, 0, "Still items left in the threadpool")
def run_scrAPI(): resource = wsgi.WSGIResource(reactor, reactor.getThreadPool(), app) site = server.Site(resource) http_server = endpoints.TCP4ServerEndpoint(reactor, 5005) http_server.listen(site) reactor.run() return reactor
def run_dev_server(): global sender global ws_handler app.secret_key = "SECRET" app.debug = True ## create a Twisted Web resource for our WebSocket server wsFactory = BroadcastServerFactory(ws_url, debug=True, debugCodePaths=True) wsFactory.protocol = EchoServerProtocol wsFactory.setProtocolOptions(allowHixie76=True) wsResource = WebSocketResource(wsFactory) ## create a Twisted Web WSGI resource for our Flask server wsgiResource = WSGIResource(reactor, reactor.getThreadPool(), app) ## create a root resource serving everything via WSGI/Flask, but ## the path "/ws" served by our WebSocket stuff rootResource = WSGIRootResource(wsgiResource, {'ws': wsResource}) ## create a Twisted Web Site and run everything site = Site(rootResource) site.protocol = HTTPChannelHixie76Aware sender = MessageSender(wsFactory) wsFactory.sender = sender ws_handler = wsFactory reactor.listenTCP(port, site) reactor.run()
def __download_range(self, k, dst): try: _, ext = os.path.splitext(dst) ds = [] parts = [] logging.info("Download %s start", k.name) for startByte in range(0, k.size, self.splitMB): output_part = self.new_temp_file(suffix=ext) parts.append(output_part) endByte = min(startByte + self.splitMB - 1, k.size) logging.debug( "deferToThreadPool %s start=%d end=%d size=%d cnt=%d", k.name, startByte, endByte, endByte - startByte, len(ds), ) d = twisted.internet.threads.deferToThreadPool( reactor, reactor.getThreadPool(), # @UndefinedVariable self.__downloadOne, k, startByte, endByte, output_part, len(ds), ) ds.append(d) if os.path.exists(dst): os.remove(dst) fout = file(dst, "wb") start = timeit.default_timer() for cnt, p in enumerate(parts): yield ds[cnt] shutil.copyfileobj(file(p, "rb"), fout) size = min(k.size, (cnt + 1) * self.splitMB) elapsed = timeit.default_timer() - start speedstr = formatFileSize(size / elapsed) sizestr = formatFileSize(size) percent = (float(cnt) / len(parts)) * 100.0 logging.info( "%03d/%03d (%.2f%%) speed=%s/s, elapsed=%.2f, size=%s", cnt, len(parts), percent, speedstr, elapsed, sizestr, ) except Exception: logging.error("download error", exc_info=True) raise
def start(getter,poster,web_port): global get; global post; port = web_port; get = getter; post = poster; port = web_port; #app.debug = True #app.run(host= '0.0.0.0',port =5001,debug = False); #cprint("between app.run and reactor.run") # BEGIN run in under twisted through wsgi from twisted.web.wsgi import WSGIResource from twisted.web.server import Site app.debug = True; resource = WSGIResource(reactor, reactor.getThreadPool(), app) site = Site(resource) reactor.listenTCP(port, site) #cprint("after reactor.listenTCP") reactor.run() # END run in under twisted through wsgi reactor.run();
def tearDown(self): # Unit tests that spawn a (blocking) client in a thread might still # have threads running at this point, if one is stuck waiting for a # message from a companion which has exited with an error. Our # relay's .stopService() drops all connections, which ought to # encourage those threads to terminate soon. If they don't, print a # warning to ease debugging. tp = reactor.getThreadPool() if not tp.working: return self.sp.stopService() # disconnect all callers d = defer.maybeDeferred(self.sp.stopService) wait_d = defer.Deferred() # wait a second, then check to see if it worked reactor.callLater(1.0, wait_d.callback, None) def _later(res): if len(tp.working): log.msg("wormhole.test.common.ServerBase.tearDown:" " I was unable to convince all threads to exit.") tp.dumpStats() print("tearDown warning: threads are still active") print("This test will probably hang until one of the" " clients gives up of their own accord.") else: log.msg("wormhole.test.common.ServerBase.tearDown:" " I convinced all threads to exit.") return d wait_d.addCallback(_later) return wait_d
def createService(sname, config): global monitor_threads, monitors, servicename try: application = connexion.FlaskApp(__name__, specification_dir='swagger/') flask_app = application.app flask_app.url_map.strict_slashes = False anchore_engine.subsys.metrics.init_flask_metrics(flask_app, servicename=servicename) application.add_api('swagger.yaml') except Exception as err: traceback.print_exc() raise err try: myconfig = config['services'][sname] servicename = sname except Exception as err: raise err try: kick_timer = int(myconfig['cycle_timer_seconds']) except: kick_timer = 1 doapi = False try: if myconfig['listen'] and myconfig['port'] and myconfig['endpoint_hostname']: doapi = True except: doapi = False kwargs = {} kwargs['kick_timer'] = kick_timer kwargs['monitors'] = monitors kwargs['monitor_threads'] = monitor_threads kwargs['servicename'] = servicename if doapi: # start up flask service flask_site = WSGIResource(reactor, reactor.getThreadPool(), application=flask_app) realroot = Resource() realroot.putChild(b"v1", anchore_engine.services.common.getAuthResource(flask_site, sname, config)) realroot.putChild(b"health", anchore_engine.services.common.HealthResource()) # this will rewrite any calls that do not have an explicit version to the base path before being processed by flask root = rewrite.RewriterResource(realroot, default_version_rewrite) #root = anchore_engine.services.common.getAuthResource(flask_site, sname, config) ret_svc = anchore_engine.services.common.createServiceAPI(root, sname, config) # start up the monitor as a looping call lc = LoopingCall(anchore_engine.services.common.monitor, **kwargs) lc.start(1) else: # start up the monitor as a timer service svc = internet.TimerService(1, anchore_engine.services.common.monitor, **kwargs) svc.setName(sname) ret_svc = svc return (ret_svc)
def dump_stats(): """ Dump some basic stats about the reactor pool and threads at info level :return: """ logger.info('Reactor queue stats: {}'.format( reactor.getThreadPool()._team.statistics().__dict__))
def _start_in_multi_user_mode(args, root_resource, services_factory): if args.provider is None: raise ValueError('provider name is required') init_monkeypatches() events_server.ensure_server() config, provider = initialize_leap_provider( args.provider, args.leap_provider_cert, args.leap_provider_cert_fingerprint, args.leap_home) protected_resource = set_up_protected_resources(root_resource, provider, services_factory, banner=args.banner) start_site(args, protected_resource) reactor.getThreadPool().adjustPoolsize(5, 15) return defer.succeed(None)
def createService(sname, config): global app flask_site = WSGIResource(reactor, reactor.getThreadPool(), app) root = anchore_engine.services.common.getAuthResource( flask_site, sname, config) return (anchore_engine.services.common.createServiceAPI( root, sname, config))
def startTwistedServer(self): application = self.make_app() resource = WSGIResource(reactor, reactor.getThreadPool(), application) site = Site(resource) self.port = reactor.listenTCP(0, site, interface='127.0.0.1') host = self.port.getHost() self.server_address = (host.host, host.port) self.addCleanup(self.port.stopListening)
def start(self): root = SharedRoot() root.WSGI = WSGIResource(reactor, reactor.getThreadPool(), self.app) self.webserver = server.Site(root) reactor.listenTCP(self.conf.HTTP_PORT, self.webserver) reactor.callLater(0, self.browser.show) reactor.run()
def run(): config = get_config() app = init_api_client() flask_site = WSGIResource(reactor, reactor.getThreadPool(), app) site = Site(flask_site) reactor.listenTCP(config.get('application', 'port'), site) reactor.run()
def wsgi_resource(): # Allow Ctrl-C to get you out cleanly: _thread = reactor.getThreadPool() _thread.adjustPoolsize(0, 100, ) reactor.addSystemEventTrigger('after', 'shutdown', _thread.stop) return wsgi.WSGIResource(reactor, _thread, AdminMediaHandler(WSGIHandler(), ), )