Ejemplo n.º 1
0
 def run(self):
     dbloop = task.LoopingCall(Blockchain.Default().PersistBlocks)
     dbloop.start(0.1)
     NodeLeader.Instance().Start()
     reactor.suggestThreadPoolSize(15)
     # reactor.callInThread(self.prompt)
     reactor.run(installSignalHandlers=False)
Ejemplo n.º 2
0
    def initServer(self):
        # threads are used for calling parameter updates that can happen asynchronously
        # threadPoolSize limits number of calls that can happen at the same time, make it 'big'
        reactor.suggestThreadPoolSize(100)

        # register default parameters after connected to labrad
        callLater(.1, self.register_parameters, None, json.dumps(self.default_parameters))
Ejemplo n.º 3
0
    def pre_start(self):

        logger.run.info("==============start==============")

        port = config.safe_get_int('network', 'listen-port')
        model = config.safe_get('model', 'path')

        if model[len(model) - 1] == "/":
            model = model[:len(model) - 2]

        pos = model.rfind('/')

        model_path = model[:pos]
        model_name = model[pos + 1:]

        sys.path.append(model_path)

        if config.safe_get('system', 'module').lower() == "mds":
            init_dbservice()

        # 在import的时候,会执行每个服务的init.py的INIT过程
        __import__(model_name)

        reactor.listenTCP(port, MyServerFactory())
        reactor.callWhenRunning(loop_machine)
        reactor.suggestThreadPoolSize(50)
Ejemplo n.º 4
0
    def makeService(self, options):
        if options['data-dir'] != None:
            if not os.access(options['data-dir'], os.X_OK | os.W_OK):
                raise core.SmapException("Cannot access " + options['data-dir'])
            smapconf.SERVER['DataDir'] = options['data-dir']

        inst = loader.load(options['conf'])
        # override defaults with command-line args
        smapconf.SERVER.update(dict([(k.lower(), v) for (k, v) in
                                     options.iteritems() if v != None]))

        if 'SuggestThreadPool' in smapconf.SERVER:
            reactor.suggestThreadPoolSize(int(smapconf.SERVER['SuggestThreadPool']))

        inst.start()
        reactor.addSystemEventTrigger('before', 'shutdown', inst.stop)

        site = getSite(inst, docroot=smapconf.SERVER['docroot'])
        service = MultiService()

        # add HTTP and HTTPS servers to the twisted multiservice
        if 'port' in smapconf.SERVER:
            service.addService(internet.TCPServer(int(smapconf.SERVER['port']), site))
        if 'sslport' in smapconf.SERVER:
            service.addService(internet.SSLServer(int(smapconf.SERVER['sslport']), 
                                                  site, 
                                                  SslServerContextFactory(smapconf.SERVER)))
        return service
Ejemplo n.º 5
0
def start_search_tasks():
    """
    Before everything, kill if there is any running search tasks. Then start the search tasks
    concurrently.

    """
    global SEARCH_TASKS
    logging.info("(Re)populated config collections from config file. "
                 "Cancelling previous loops and restarting them again with the new config.")

    for looping_task in SEARCH_TASKS:
        logging.info("Cancelling this loop: %r", looping_task)
        looping_task.stop()
    SEARCH_TASKS = []

    searches = CONFIG['Searches'].values()
    search_count = len(searches)
    logging.info("Search count: %d", search_count)
    reactor.suggestThreadPoolSize(search_count)
    try:
        for search in searches:
            search_obj = Search(SERVICE_CLASS_MAPPER.get(search['destination']['service']), search,
                                CONFIG)
            do_search_concurrently(search_obj)
    except Exception as exception:
        logging.exception("Exception occurred while processing search. %s", exception.message)
Ejemplo n.º 6
0
def main(fd=None):    
    from towgo.server import TwistedHttpServer
    from demo.app import initialize
    server = TwistedHttpServer()
    server.setInitMethod(initialize)
    
    from towgo.msetting import settings
    reactor.suggestThreadPoolSize(settings.THREAD_POOL_SIZE)
    reactor.callWhenRunning(server.initialize)        

    #http
    from towgo.server import TwistedSite
    site = TwistedSite()
        
    if settings.MULTI_PROCESS:
        if fd is None:
            # Create a new listening port and several other processes to help out.                                                                     
            port = reactor.listenTCP(options.port, site)
            num = cpu_count()
            for _ in range(num):
                reactor.spawnProcess(
                        None, executable, [executable, __file__, str(port.fileno())],
                    childFDs={0: 0, 1: 1, 2: 2, port.fileno(): port.fileno()},
                    env=environ)
        else:
            # Another process created the port, just start listening on it.                                                                            
            port = reactor.adoptStreamPort(fd, AF_INET, site)
    else:
        reactor.listenTCP(options.port, site)
    
    #run    
    reactor.run()
Ejemplo n.º 7
0
    def handle(self, *args, **options):
        try:
            dtx_logger_configure(**options)
            node_name = options.get('node_name')
            node_opts = options.get('node_opts')

            thread_pool_size = options.get('thread_pool_size')
            if (thread_pool_size):
                reactor.suggestThreadPoolSize(thread_pool_size)

            log.msg(u'Loading {}'.format(node_name))
            node = import_module(node_name)

            opts = dict(
                chain.from_iterable(
                    d.iteritems()
                    for d in [QueryDict(v).dict() for v in node_opts]))
            log.msg(u'Starting {} with args {}, kwargs {}'.format(
                node_name, args, opts))
            node.start(*args, **opts)

            log.msg(u'Running {}'.format(node_name))
            reactor.run()

            # TODO: Implement proper shutdown process
            for pid, process in started_process_list.items():
                log.msg('Stalled subprocess: {}'.format(pid))
                process.transport.signalProcess('KILL')

            log.msg(u'Finished')
        except Exception, exc:
            log.err(traceback.format_exc())
            raise
Ejemplo n.º 8
0
def startup():
	if not os.path.exists('data/firmware'):
		os.makedirs('data/firmware')
	if not os.path.exists('data/static'):
		os.makedirs('data/static')
	if not os.path.exists('data/cert'):
		os.makedirs('data/cert')
	# Check the certificate file
	host = getHost()
	validateCertHost('data/cert/key.pem', 'data/cert/cert.pem', 'data/static/thumb.txt', host)
	
	# Start up the HTTPS server
	web_port = 443
	root_handler = File('./data/static/')	
	firmware_handler = FirmwareHandler('data/firmware/')
	root_handler.putChild('firmware', firmware_handler)
	site = Site(root_handler)
	site.protocol = MyHttpChannel
	reactor.listenTCP(web_port, site)
	
	# Start up the HTTP server
	root_handler_http = File("./data/static/")
	config_handler = File("./config.html")
	root_handler_http.putChild('config.html', config_handler)
	site_http = Site(root_handler_http)
	reactor.listenTCP(8080, site_http)

	reactor.suggestThreadPoolSize(50)

	printStatus("Startup complete, running main loop...")

	# Run the main loop, this never returns:
	reactor.run()
Ejemplo n.º 9
0
 def start(self):
     """
     After twisted is running to get, call loader library and various starter functions
     to get everything started.
     """
     reactor.suggestThreadPoolSize(50)
     reactor.callWhenRunning(self.start_loader_library)
Ejemplo n.º 10
0
    def setUp(self):
        reactor.suggestThreadPoolSize(1)
        connection_string = os.environ.get("SHORTENER_TEST_CONNECTION_STRING",
                                           "sqlite://")

        self.account = 'test-account'
        cfg = {
            'host_domain': 'http://wtxt.io',
            'account': self.account,
            'connection_string': connection_string,
            'graphite_endpoint': 'tcp:www.example.com:80',
            'handlers': [
                {
                    'dump': 'shortener.handlers.dump.Dump'
                },
            ],
        }
        self.pool = HTTPConnectionPool(reactor, persistent=False)
        self.service = ShortenerServiceApp(reactor=reactor, config=cfg)

        self.tr = DisconnectingStringTransport()
        endpoint = StringTransportClientEndpoint(reactor, self.tr)
        self.service.metrics.carbon_client = CarbonClientService(endpoint)
        self.service.metrics.carbon_client.startService()
        yield self.service.metrics.carbon_client.connect_d

        site = Site(self.service.app.resource())
        self.listener = reactor.listenTCP(0, site, interface='localhost')
        self.listener_port = self.listener.getHost().port
        self._drop_tables()
        self.conn = yield self.service.engine.connect()
        self.addCleanup(self.listener.loseConnection)
        self.addCleanup(self.pool.closeCachedConnections)
Ejemplo n.º 11
0
    def test_make_worker_with_threadpool_size(self):
        """
        The reactor threadpool can be resized with a command line option.
        """
        from twisted.internet import reactor

        old_maxthreads = reactor.getThreadPool().max
        self.add_cleanup(reactor.suggestThreadPoolSize, old_maxthreads)
        # Explicitly set the threadpool size to something different from the
        # value we're testing with.
        reactor.suggestThreadPoolSize(5)

        self.mk_config_file('worker', ["transport_name: sphex"])
        maker = VumiWorkerServiceMaker()

        # By default, we don't touch the threadpool.
        options = StartWorkerOptions()
        options.parseOptions([
            '--worker-class', 'vumi.demos.words.EchoWorker',
            '--config', self.config_file['worker'],
        ])
        worker = maker.makeService(options)
        self.assertEqual({'transport_name': 'sphex'}, worker.config)
        self.assertEqual(reactor.getThreadPool().max, 5)

        # If asked, we set the threadpool's maximum size.
        options_mt = StartWorkerOptions()
        options_mt.parseOptions([
            '--worker-class', 'vumi.demos.words.EchoWorker',
            '--config', self.config_file['worker'],
            '--maxthreads', '2',
        ])
        worker = maker.makeService(options_mt)
        self.assertEqual({'transport_name': 'sphex'}, worker.config)
        self.assertEqual(reactor.getThreadPool().max, 2)
Ejemplo n.º 12
0
def start_console(cmdmapping,port=5432,threadcount=10):
    """start at console"""
    log.startLogging(sys.stdout)
    reactor.listenTCP(port,PGFactory(cmdmapping))
    reactor.suggestThreadPoolSize(threadcount)
    reactor.run()
    return
Ejemplo n.º 13
0
 def configure(self, settings):
     self.settings = settings
     section = settings.section('server')
     self.user = section.getString('runtime user', None)
     self.group = section.getString('runtime group', None)
     self.pidfile = section.getPath('pid file',
                                    '/var/lib/terane/terane-server.pid')
     self.debug = section.getBoolean('debug', False)
     logconfigfile = section.getString('log config file',
                                       "%s.logconfig" % settings.appname)
     if section.getBoolean("debug", False):
         startLogging(StdoutHandler(), DEBUG, logconfigfile)
     else:
         logfile = section.getPath('log file',
                                   '/var/log/terane/terane-server.log')
         verbosity = section.getString('log verbosity', 'WARNING')
         if verbosity == 'DEBUG': level = DEBUG
         elif verbosity == 'INFO': level = INFO
         elif verbosity == 'WARNING': level = WARNING
         elif verbosity == 'ERROR': level = ERROR
         else:
             raise ConfigureError("Unknown log verbosity '%s'" % verbosity)
         startLogging(FileHandler(logfile), level, logconfigfile)
     self.threadpoolsize = section.getInt('thread pool size', 20)
     reactor.suggestThreadPoolSize(self.threadpoolsize)
Ejemplo n.º 14
0
def main(argv=None):
    """ start up twisted reactor """
    parser = argparse.ArgumentParser(
        description='VMWare metrics exporter for Prometheus')
    parser.add_argument('-c',
                        '--config',
                        dest='config_file',
                        default=None,
                        help="configuration file")
    parser.add_argument('-p',
                        '--port',
                        dest='port',
                        type=int,
                        default=9272,
                        help="HTTP port to expose metrics")

    args = parser.parse_args(argv or sys.argv[1:])

    reactor.suggestThreadPoolSize(25)

    # Start up the server to expose the metrics.
    root = Resource()
    root.putChild(b'metrics', VMWareMetricsResource(args))
    root.putChild(b'healthz', HealthzResource())

    factory = Site(root)
    log("Starting web server on port {}".format(args.port))
    endpoint = endpoints.TCP4ServerEndpoint(reactor, args.port)
    endpoint.listen(factory)
    reactor.run()
Ejemplo n.º 15
0
def _init(config, mode='normal'):
    from almar.global_config import GlobalConfig, MODE_PROXY, MODE_WORKER
    g = GlobalConfig.create_instance(config)

    # configure web service
    from almar.service import worker_root, proxy_root
    from twisted.web import server

    if mode == 'proxy':
        g.server_mode = MODE_PROXY
        if not g.proxy or not g.searcher:
            fatal_out('proxy configuration is invalid')
        # configure reactor
        reactor.suggestThreadPoolSize(int(g.proxy.max_threads))
        return int(g.proxy.port), server.Site(proxy_root)
    else:
        if not g.server or not g.model or not g.database:
            fatal_out('server configuration is invalid')
        # configure reactor
        reactor.suggestThreadPoolSize(int(g.server.max_threads))
        g.server_mode = MODE_WORKER

        # configure database
        from txpostgres import txpostgres
        txpostgres.ConnectionPool.min = int(g.database.min_connections)
        txpostgres.ConnectionPool.max = int(g.database.max_connections)

        from almar.backend.postgresql import PostgreSQLBackend as Backend
        Backend.create_instance(g.database)

        return int(g.server.port), server.Site(worker_root)
Ejemplo n.º 16
0
 def do_cleanThreads(cls):
     from twisted.internet import reactor
     if interfaces.IReactorThreads.providedBy(reactor):
         reactor.suggestThreadPoolSize(0)
         if hasattr(reactor, 'threadpool') and reactor.threadpool:
             reactor.threadpool.stop()
             reactor.threadpool = None
Ejemplo n.º 17
0
 def _run():
     reactor.suggestThreadPoolSize(FLAGS.threadpool_size)
     resource = wsgi.WSGIResource(reactor, reactor.getThreadPool(), app)
     site = server.Site(resource)
     endpoint = endpoints.serverFromString(reactor, address)
     endpoint.listen(site).addErrback(err_shutdown)
     reactor.run(installSignalHandlers=int(not debug))
Ejemplo n.º 18
0
def request_continue_set(threadPool=20):
    reactor.suggestThreadPoolSize(threadPool)
    global start_time
    start_time = datetime.datetime.now()
    try:
        last_request = Requests.select().order_by(Requests.id.desc()).get()
        new_request_set = last_request.set
        request_list = Requests.select(
            Requests.request_string).where(Requests.set == new_request_set)
        finished_strings_list = []
        for request in request_list:
            finished_strings_list.append(request.request_string)
    except:
        raise Exception
    request_ittr = 0
    for x in range(1, 4):
        for combo in product(ascii_lowercase, repeat=x):
            request_ittr += 1
            if ''.join(combo) not in finished_strings_list:
                deferToThread(do_request, new_request_set, ''.join(combo),
                              request_ittr)
    global total_requests
    total_requests = request_ittr
    reactor.run()
    reactor.stop()
Ejemplo n.º 19
0
def makeService(options):
   """
   Main entry point into Crossbar.io application. This is called from the Twisted
   plugin system to instantiate "crossbar".
   """

   ## install our log observer before anything else is done
   logger = Logger()
   twisted.python.log.addObserver(logger)

   ## import reactor here first and set some thread pool size
   from twisted.internet import reactor
   reactor.suggestThreadPoolSize(30)

   ## now actually create our top service and set the logger
   service = CrossbarService()
   service.logger = logger

   ## store user options set
   service.appdata = options['appdata']
   service.webdata = options['webdata']
   service.debug = True if options['debug'] else False
   service.licenseserver = options['licenseserver']
   service.isExe = False # will be set to true iff Crossbar is running from self-contained EXE

   return service
Ejemplo n.º 20
0
    def start(self):
        '''
        start server
        '''
        log.startLogging(sys.stdout)
        reactor.suggestThreadPoolSize(settings.THREAD_POOL_SIZE)
        reactor.callWhenRunning(self.initialize)

        if settings.MULTI_PROCESS:
            args = sys.argv
            fd = int(args[-1]) if args[-1].isdigit() else None

            if fd is None:
                sargs = [arg for arg in args[1:] if arg.startswith('--')]
                # Create a new listening port and several other processes to help out.
                port = reactor.listenTCP(options.port, self.factory)
                for _ in range(self.process_num):
                    reactor.spawnProcess(None,
                                         executable, [executable, args[0]] +
                                         sargs + [str(port.fileno())],
                                         childFDs={
                                             0: 0,
                                             1: 1,
                                             2: 2,
                                             port.fileno(): port.fileno()
                                         },
                                         env=environ)
            else:
                # Another process created the port, just start listening on it.
                port = reactor.adoptStreamPort(fd, AF_INET, self.factory)
        else:
            reactor.listenTCP(options.port, self.factory)

        #run
        reactor.run()
Ejemplo n.º 21
0
def main():
    args, parser = _parse_args(sys.argv[1:])

    end = EndpointHandler
    # Add the log attribute
    setattr(end, 'log', log)
    setattr(args, 'log', log)
    annoy = Annoy(args)
    log.startLogging(sys.stdout)

    # Add the storage
    end.ap_settings = args

    site = cyclone.web.Application([
        (r"/(.*)", end),
    ],
        default_host=args.host,
        debug=args.debug,
    )

    log.msg("Starting on %s" % args.port)
    reactor.listenTCP(args.port, site)
    reactor.suggestThreadPoolSize(50)
    reactor.callLater(args.period, annoy.bother)
    reactor.run()
Ejemplo n.º 22
0
def main():
    #init logger
    logger = logging.getLogger('psp')
    hdlr = logging.FileHandler('psp.log')
    strm_out = logging.StreamHandler(sys.__stdout__)
    formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
    hdlr.setFormatter(formatter)
    strm_out.setFormatter(formatter)
    logger.addHandler(hdlr) 
    logger.addHandler(strm_out) 
    logger.setLevel(logging.INFO) 

    mdns_client = mdns.Mdns_client('laptop', 'haris.sp', 8080, logger)

    #init web server 
    site = server.Site(signpost_server.Singpost_server(logger))

    # run method in thread
    reactor.suggestThreadPoolSize(30)
    factory = Factory()
    reactor.listenSSL(8080, site, HTTPSVerifyingContextFactory()) #myContextFactory)
    mdns_client.setup_mdns()
    
    #service discovery module
    discovery = server_discovery.Server_discovery(logger)
    discovery.service_update() #initial discovery to fetch entries
    gobject.timeout_add(30000, discovery.service_update)
    
    # run the loop
    gobject.threads_init()
    gobject.MainLoop().run() 
Ejemplo n.º 23
0
    def __init__(self, config):
        router_cfg = config['router']
        for key in ('socket', 'host', 'port'):
            if key not in router_cfg:
                router_cfg[key] = None

        router_jid = '%s.%s' % (router_cfg['jid'], config['host'])
        xmlstream2.SocketComponent.__init__(self, router_cfg['socket'], router_cfg['host'], router_cfg['port'], router_jid, router_cfg['secret'])
        self.config = config

        # this is for queueing keyring thread requests
        reactor.suggestThreadPoolSize(1)

        self.logTraffic = config['debug']
        self.network = config['network']
        self.servername = config['host']
        self.start_time = time.time()

        storage.init(config['database'])
        self.keyring = keyring.Keyring(storage.MySQLNetworkStorage(), config['fingerprint'], self.network, self.servername, True)
        self.presencedb = storage.MySQLPresenceStorage()

        self.subscriptions = {}
        self.whitelists = {}
        self.blacklists = {}

        # protocol handlers here!!
        for handler in self.protocolHandlers:
            inst = handler()
            if handler == JIDCache:
                self.cache = inst
            inst.setHandlerParent(self)
Ejemplo n.º 24
0
    def makeService(self, options):
        if options['data-dir'] != None:
            if not os.access(options['data-dir'], os.X_OK | os.W_OK):
                raise core.SmapException("Cannot access " +
                                         options['data-dir'])
            smapconf.SERVER['DataDir'] = options['data-dir']

        inst = loader.load(options['conf'])
        # override defaults with command-line args
        smapconf.SERVER.update(
            dict([(k.lower(), v) for (k, v) in options.iteritems()
                  if v != None]))

        if 'SuggestThreadPool' in smapconf.SERVER:
            reactor.suggestThreadPoolSize(
                int(smapconf.SERVER['SuggestThreadPool']))

        inst.start()
        reactor.addSystemEventTrigger('before', 'shutdown', inst.stop)

        site = getSite(inst, docroot=smapconf.SERVER['docroot'])
        service = MultiService()

        # add HTTP and HTTPS servers to the twisted multiservice
        if 'port' in smapconf.SERVER:
            service.addService(
                internet.TCPServer(int(smapconf.SERVER['port']), site))
        if 'sslport' in smapconf.SERVER:
            service.addService(
                internet.SSLServer(int(smapconf.SERVER['sslport']), site,
                                   SslServerContextFactory(smapconf.SERVER)))
        return service
Ejemplo n.º 25
0
    def makeService(self, options):
        """
        Construct a TCPServer from a factory defined in myproject.
        """
        from minitree import configure
        c = configure(options["config"])

        from twisted.internet import reactor
        reactor.suggestThreadPoolSize(int(c.get("server:main", "max_threads")))
        from txpostgres import txpostgres
        txpostgres.ConnectionPool.min = int(c.get("backend:main",
                                                  "max_connections"))

        from minitree.db.postgres import dbBackend
        dbBackend.connect(c.get("backend:main", "dsn"))

        from minitree.service import site_configure
        site_root = site_configure(c)
        from twisted.web import server
        site = server.Site(site_root)

        if "socket" in options and options["socket"]:
            return internet.UNIXServer(options["socket"], site)
        else:
            return internet.TCPServer(int(options["port"] or
                                          c.get("server:main", "port")), site)
Ejemplo n.º 26
0
    def run_server():
        # Set logging
        stream = None
        if logfile:
            logging.basicConfig(filename=logfile, level=logging.DEBUG)
        elif not daemonized:
            logging.basicConfig(filename="/dev/stdout", level=logging.DEBUG)
        else:
            # If no logging file was given, and we're daemonized, create a temp
            # logfile for monitoring.
            stream = NamedTemporaryFile(delete=True, suffix=socket2.replace("/", "-"))
            logging.basicConfig(stream=stream, level=logging.DEBUG)

        logging.info("Socket server started at %s" % socket2)

        # Thread sensitive interface for stdout/stdin
        std.setup()

        # Set thread pool size (max parrallel interactive processes.)
        if thread_pool_size:
            reactor.suggestThreadPoolSize(thread_pool_size)

        # Set process name
        set_title(stream.name if stream else logfile)

        # Run Twisted reactor
        reactor.run()

        # Remove logging file (this will automatically delete the NamedTemporaryFile)
        if stream:
            stream.close()
Ejemplo n.º 27
0
 def startListen(self):
     # 先判断必填参数是否都填了
     if self._isServer():
         if self.serverProtocol is None:
             raise Exception('未设置protocol')
         if self.serverFactory is None:
             raise Exception('未设置factory')
         if self.addr is None or self.port is None:
             raise Exception('未设置createServer')
     else:
         raise Exception('未设置flag或不支持此方法')
     # 开启服务器,应该在子线程中一直运行
     # 调用父类的startListen方法,将数据传入
     factory = self.serverFactory
     factory.protocol = self.serverProtocol
     #if self.isAutoReconnect: # set auto-reconnection
     #factory.setProtocolOptions(autoPingInterval=5, autoPingTimeout=2)
     listenWS(factory)
     # reactor.listenTCP(self.port, factory)
     # reactor.run(installSignalHandlers=False)
     reactor.suggestThreadPoolSize(300)
     serverThread = threading.Thread(target=self._run,
                                     args=(reactor, factory),
                                     name='serverThread')
     serverThread.start()
    def setUp(self):
        reactor.suggestThreadPoolSize(1)
        connection_string = os.environ.get("SHORTENER_TEST_CONNECTION_STRING", "sqlite://")

        self.account = "test-account"
        cfg = {
            "host_domain": "http://wtxt.io",
            "account": self.account,
            "connection_string": connection_string,
            "graphite_endpoint": "tcp:www.example.com:80",
            "handlers": [{"dump": "shortener.handlers.dump.Dump"}],
        }
        self.pool = HTTPConnectionPool(reactor, persistent=False)
        self.service = ShortenerServiceApp(reactor=reactor, config=cfg)

        self.tr = DisconnectingStringTransport()
        endpoint = StringTransportClientEndpoint(reactor, self.tr)
        self.service.metrics.carbon_client = CarbonClientService(endpoint)
        self.service.metrics.carbon_client.startService()
        yield self.service.metrics.carbon_client.connect_d

        site = Site(self.service.app.resource())
        self.listener = reactor.listenTCP(0, site, interface="localhost")
        self.listener_port = self.listener.getHost().port
        self._drop_tables()
        self.conn = yield self.service.engine.connect()
        self.addCleanup(self.listener.loseConnection)
        self.addCleanup(self.pool.closeCachedConnections)
Ejemplo n.º 29
0
    def privilegedStartService(self):
        # get hostname
        p = subprocess.Popen(['hostname'],
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        stdout, stderr = p.communicate()
        self.hostname = stdout.strip("\n")

        # set up error mail
        log.addObserver(self.mailFailure)

        log.msg(conf.SSL_LISTEN_PORT)
        log.msg(conf.SSL_KEY)
        log.msg(conf.SSL_CRT)
        log.msg(conf.SSL_CA)
        log.msg(conf.EPP_HOST)
        log.msg(conf.EPP_PORT)
        log.msg(conf.CLIENT_SSL_KEY)
        log.msg(conf.CLIENT_SSL_CRT)
        log.msg(conf.USERNAME)
        log.msg(conf.PASSWORD)
        log.msg(conf.CONNECTIONS)
        log.msg(conf.MAIL_FROM)
        log.msg(conf.MAIL_TO_ON_ERROR)

        # init proxy manager
        self.pm = proxy.ProxyManager()

        myContextFactory = ssl.DefaultOpenSSLContextFactory(
            conf.SSL_KEY, conf.SSL_CRT)

        ctx = myContextFactory.getContext()

        def verifyCallback(connection, x509, errnum, errdepth, ok):
            if not ok:
                subject = x509.get_subject()
                ssubject = "".join(
                    "/{0:s}={1:s}".format(name.decode(), value.decode())
                    for name, value in subject.get_components())
                log.msg('invalid cert from subject:' + ssubject)
                return False
            else:
                log.msg("Certs are fine")
            return True

        ctx.set_verify(SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
                       verifyCallback)
        ctx.load_verify_locations(conf.SSL_CA)

        # start server
        reactor.suggestThreadPoolSize(5)
        if conf.ENABLED:
            self.port = reactor.listenTCP(conf.LISTEN_PORT,
                                          proxy.ProxyServerFactory(),
                                          interface=conf.INTERFACE)
        if conf.SSL_ENABLED:
            self.ssl_port = reactor.listenSSL(conf.SSL_LISTEN_PORT,
                                              proxy.ProxyServerFactory(),
                                              myContextFactory,
                                              interface=conf.INTERFACE)
Ejemplo n.º 30
0
def startNZBLeecher():
    """ gogogo """
    defaultAntiIdle = int(4.5 * 60) # 4.5 minutes
    defaultIdleTimeout = 30
    
    totalCount = 0
    # Order the initialization of servers by the fillserver priority, if fillserver
    # support is enabled
    serverDictsByPriority = Hellanzb.SERVERS.items()
    if isinstance(Hellanzb.queue, FillServerQueue):
        serverDictsByPriority.sort(lambda x, y: cmp(x[1].get('fillserver'),
                                                    y[1].get('fillserver')))
    for serverId, serverDict in serverDictsByPriority:
        if not serverDict.get('enabled') is False:
            totalCount += connectServer(serverId, serverDict, defaultAntiIdle, defaultIdleTimeout)

    # How large the scroll ticker should be
    Hellanzb.scroller.maxCount = totalCount

    # Initialize the retry queue, (this only initializes it when it's necessary) for
    # automatic failover. It contains multiple sub-queues that work within the NZBQueue,
    # for queueing segments that failed to download on particular serverPools.
    Hellanzb.queue.initRetryQueue()

    # Allocate only one thread, just for decoding
    reactor.suggestThreadPoolSize(1)

    # Well, there's egg and bacon; egg sausage and bacon; egg and spam; egg bacon and
    # spam; egg bacon sausage and spam; spam bacon sausage and spam; spam egg spam spam
    # bacon and spam; spam sausage spam spam bacon spam tomato and spam;
    reactor.run()
    # Spam! Spam! Spam! Spam! Lovely spam! Spam! Spam!

    # Safely tear down the app only after the reactor shutdown
    finishShutdown()
Ejemplo n.º 31
0
def suggestThreadpoolSize(maxThreads):
    """Updates the size of the twisted threadpool

    The function must be passed a parameter specifying the maximum number of
    generation threads the user has requested. 
    """
    reactor.suggestThreadPoolSize(int(maxThreads*1.5))
Ejemplo n.º 32
0
def start_search_tasks():
    """
    Before everything, kill if there is any running search tasks. Then start the search tasks
    concurrently.

    """
    global SEARCH_TASKS
    logging.info(
        "(Re)populated config collections from config file. "
        "Cancelling previous loops and restarting them again with the new config."
    )

    for looping_task in SEARCH_TASKS:
        logging.info("Cancelling this loop: %r", looping_task)
        looping_task.stop()
    SEARCH_TASKS = []

    searches = CONFIG['Searches'].values()
    search_count = len(searches)
    logging.info("Search count: %d", search_count)
    reactor.suggestThreadPoolSize(search_count)
    try:
        for search in searches:
            search_obj = Search(
                SERVICE_CLASS_MAPPER.get(search['destination']['service']),
                search, CONFIG)
            do_search_concurrently(search_obj)
    except Exception as exception:
        logging.exception("Exception occurred while processing search. %s",
                          exception.message)
Ejemplo n.º 33
0
    def run_server():
        # Set logging
        stream = None
        if logfile:
            logging.basicConfig(filename=logfile, level=logging.DEBUG)
        elif not daemonized:
            logging.basicConfig(filename='/dev/stdout', level=logging.DEBUG)
        else:
            # If no logging file was given, and we're daemonized, create a temp
            # logfile for monitoring.
            stream = NamedTemporaryFile(delete=True, suffix=socket2.replace('/', '-'))
            logging.basicConfig(stream=stream, level=logging.DEBUG)

        logging.info('Socket server started at %s' % socket2)

        # Thread sensitive interface for stdout/stdin
        std.setup()

        # Set thread pool size (max parrallel interactive processes.)
        if thread_pool_size:
            reactor.suggestThreadPoolSize(thread_pool_size)

        # Set process name
        set_title(stream.name if stream else logfile)

        # Run Twisted reactor
        reactor.run()

        # Remove logging file (this will automatically delete the NamedTemporaryFile)
        if stream:
            stream.close()
Ejemplo n.º 34
0
    def handle(self, *args, **options):
        try:
            dtx_logger_configure(**options)
            node_name = options.get('node_name')
            node_opts = options.get('node_opts')

            thread_pool_size = options.get('thread_pool_size')
            if (thread_pool_size):
                    reactor.suggestThreadPoolSize(thread_pool_size)

            log.msg(u'Loading {}'.format(node_name))
            node = import_module(node_name)

            opts = dict(chain.from_iterable(d.iteritems() for d in [QueryDict(v).dict() for v in node_opts]))
            log.msg(u'Starting {} with args {}, kwargs {}'.format(node_name, args, opts))
            node.start(*args, **opts)

            log.msg(u'Running {}'.format(node_name))
            reactor.run()

            # TODO: Implement proper shutdown process
            for pid, process in started_process_list.items():
                log.msg('Stalled subprocess: {}'.format(pid))
                process.transport.signalProcess('KILL')

            log.msg(u'Finished')
        except Exception, exc:
            log.err(traceback.format_exc())
            raise
Ejemplo n.º 35
0
def main():
    log_file = logfile.LogFile.fromFullPath('log/serverlog.log')
    log.addObserver(log.FileLogObserver(log_file).emit)
    print("===== PSO2Proxy vGIT %s =====" % config.proxy_ver)
    time_string = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())
    print("[ServerStart] Trying to start server at %s" % time_string)
    if myIp == "0.0.0.0":
        print("==== ERROR 001 ====")
        print("You have NOT configured the IP address for PSO2Proxy!")
        print(
            "Please edit cfg/pso2proxy.config.yml and change myIpAddr to your IP public IP address "
            "(Not LAN address if you're on a LAN!) ")
        print("After you fix this, please restart PSO2Proxy.")
        sys.exit(0)
    if bindIp == "0.0.0.0":
        interface_ip = myIp
    else:
        interface_ip = bindIp

    if not os.path.isfile("keys/myKey.pem"):
        print("==== ERROR 002 ====")
        print("You do NOT have your local RSA private key installed to 'keys/myKey.pem'!")
        print("Please see README.md's section on RSA keys for more information.")
        print("After you fix this, please restart PSO2Proxy.")
        sys.exit(0)

    if not os.path.isfile("keys/SEGAKey.pem"):
        print("==== ERROR 003 ====")
        print("You do NOT have a SEGA RSA public key installed to 'keys/SEGAKey.pem'!")
        print("Please see README.md's section on RSA keys for more information.")
        print("After you fix this, please restart PSO2Proxy.")
        sys.exit(0)

    for shipNum in range(0, 10):  # PSO2 Checks all ships round robin, so sadly for max compatibility we have to open these no matter what ships are enabled...
        ship_endpoint = endpoints.TCP4ServerEndpoint(reactor, 12099 + (100 * shipNum), interface=interface_ip)
        ship_endpoint.listen(ShipAdvertiserFactory())

    for shipNum in config.globalConfig.get_key('enabledShips'):
        query_endpoint = endpoints.TCP4ServerEndpoint(reactor, 12000 + (100 * shipNum), interface=interface_ip)
        query_endpoint.listen(BlockScraperFactory())
        print("[ShipProxy] Bound port %i for ship %i query server!" % ((12000 + (100 * shipNum)), shipNum))
    query_endpoint = endpoints.TCP4ServerEndpoint(reactor, 13000, interface=interface_ip)
    query_endpoint.listen(BlockScraperFactory())
    stdio.StandardIO(ServerConsole())
    print("[ShipProxy] Loading plugins...")
    import glob

    for plug in glob.glob("plugins/*.py"):
        plug = plug[:-3]
        plug = plug.replace(os.sep, '.')
        print("[ShipProxy] Importing %s..." % plug)
        __import__(plug)
    for f in plugin_manager.onStart:
        f()
    reactor.suggestThreadPoolSize(30)
    reactor.run()
    data.clients.dbManager.close_db()
    for f in plugin_manager.onStop:
        f()
Ejemplo n.º 36
0
 def setUp(self):
     reactor.suggestThreadPoolSize(1)
     connection_string = os.environ.get("SHORTENER_TEST_CONNECTION_STRING",
                                        "sqlite://")
     self.engine = get_engine(connection_string,
                              reactor=FakeReactorThreads())
     self._drop_tables()
     self.conn = self.successResultOf(self.engine.connect())
Ejemplo n.º 37
0
def main():
    print __file__
    workroot = os.path.dirname(os.path.abspath(__file__))
    reactor.suggestThreadPoolSize(20)

    webserver = WebService(workroot)
    reactor.listenTCP(9001, server.Site(webserver.get_resource(), timeout=10))
    reactor.run()
Ejemplo n.º 38
0
def start_loop(agents):
    reactor.suggestThreadPoolSize(30)
    for agent in agents:
        agent.update_ams(agent.ams)
        agent.on_start()
        ILP = reactor.listenTCP(agent.aid.port, agent.agentInstance)
        agent.ILP = ILP
    reactor.run()
Ejemplo n.º 39
0
 def test_suggestThreadPoolSize(self):
     """
     Try to change maximum number of threads.
     """
     reactor.suggestThreadPoolSize(34)
     self.assertEqual(reactor.threadpool.max, 34)
     reactor.suggestThreadPoolSize(4)
     self.assertEqual(reactor.threadpool.max, 4)
Ejemplo n.º 40
0
    def connectionMade(self):
        self.srv_queue = defer.DeferredQueue()
        self.cli_queue = defer.DeferredQueue()
        self.srv_queue.get().addCallback(self.clientDataReceived)

        factory = ProxyServerFactory(self.srv_queue, self.cli_queue)
        reactor.connectTCP(MORTPROD_HOST, MORTPROD_PORT, factory)
        reactor.suggestThreadPoolSize(30)
Ejemplo n.º 41
0
 def test_suggestThreadPoolSize(self):
     """
     Try to change maximum number of threads.
     """
     reactor.suggestThreadPoolSize(34)
     self.assertEqual(reactor.threadpool.max, 34)
     reactor.suggestThreadPoolSize(4)
     self.assertEqual(reactor.threadpool.max, 4)
Ejemplo n.º 42
0
 def start_reactor(self):
     from twisted.internet import reactor
     reactor.callWhenRunning(
         lambda: self.log.info('twisted-reactor-started'))
     reactor.addSystemEventTrigger('before', 'shutdown',
                                   self.shutdown_components)
     reactor.suggestThreadPoolSize(300)
     reactor.run()
Ejemplo n.º 43
0
 def setUp(self):
     reactor.suggestThreadPoolSize(1)
     connection_string = os.environ.get(
         "SHORTENER_TEST_CONNECTION_STRING", "sqlite://")
     self.engine = get_engine(
         connection_string, reactor=FakeReactorThreads())
     self._drop_tables()
     self.conn = self.successResultOf(self.engine.connect())
Ejemplo n.º 44
0
def main():
    global_var.up_ti = int(time.time())
    global_var.loop_task = task.LoopingCall(repeat_task)
    global_var.loop_task.start(1, now=False)
    reactor.suggestThreadPoolSize(50)
    connect2WS(True)
    signal.signal(signal.SIGINT, signal_handler)
    reactor.run()
Ejemplo n.º 45
0
def main():
    server = Server()
    application = service.Application(settings.get('PROJECT_NAME'))
    logfile = DailyLogFile(settings.get('LOG_FILE'), settings.get('LOG_DIR'))
    application.setComponent(ILogObserver, FileLogObserver(logfile).emit)
    server.setServiceParent(application)
    from twisted.internet import reactor
    reactor.suggestThreadPoolSize(10)
Ejemplo n.º 46
0
    def makeService(self, options):
        if options['conf']:
            settings.conf = settings.load(options['conf'])

        # we better add 
        reactor.suggestThreadPoolSize(settings.conf['threadpool size'])

        if options['memdebug']:
            from twisted.internet import task
            import objgraph
            import gc
            def stats():
                print gc.collect()
                print
                print '\n'.join(map(str, objgraph.most_common_types(limit=10)))
            task.LoopingCall(stats).start(2)

        cp = adbapi.ConnectionPool(settings.conf['database']['module'],
                                   host=settings.conf['database']['host'],
                                   database=settings.conf['database']['db'],
                                   user=settings.conf['database']['user'],
                                   password=settings.conf['database']['password'],
                                   port=settings.conf['database']['port'],
                                   cp_min=5, cp_max=30,
                                   cp_reconnect=True)

        if options['subscribe']:
            subscribe(cp, settings)


        # create a single republisher to send the data out on
        http_repub = republisher.ReResource(cp)
        websocket_repub = republisher.WebSocketRepublishResource(cp)
        if settings.conf['mongo']['enabled']:
            mongo_repub = republisher.MongoRepublisher(cp)
        else:
            mongo_repub = None

        service = MultiService()
        for svc in settings.conf['server']:
            scfg = settings.conf['server'][svc]
            site = getSite(cp, 
                           resources=scfg['resources'],
                           http_repub=http_repub, 
                           websocket_repub=websocket_repub,
                           mongo_repub=mongo_repub)

            if not len(scfg['ssl']) > 1:
                service.addService(internet.TCPServer(scfg['port'],
                                                      site,
                                                      interface=scfg['interface']))
            else:
                service.addService(internet.SSLServer(scfg['port'],
                                                      site,
                                                      SslServerContextFactory(scfg['ssl']),
                                                      interface=scfg['interface']))

        return service
Ejemplo n.º 47
0
def makeApp(ConfigClass):
    """ Generate and return an application

    See MoinMoin.server.Config for config options

    @param ConfigClass: config class
    @rtype: application object
    @return twisted application, needed by twistd
    """
    # Create config instance (raise RuntimeError if config invalid)
    global config
    config = ConfigClass()
        
    # Set number of threads
    reactor.suggestThreadPoolSize(config.threads)
    
    # The root of the HTTP hierarchy
    default = WikiRoot()

    # Here is where img and css and some special files come from
    default.putChild('wiki', static.File(config.docs))

    # Generate the Site factory
    # TODO: Maybe we can use WikiRoot instead of this
    # ----------------------------------------------
    root = vhost.NameVirtualHost()
    root.default = default
    # ----------------------------------------------
    site = MoinSite(root, logPath=config.logPath, timeout=2*60) # 2 minutes timeout

    # Make application
    application = service.Application("web", uid=config.uid, gid=config.gid)
    sc = service.IServiceCollection(application)

    # Listen to all interfaces in config.interfaces
    for entry in config.interfaces:
        # Add a TCPServer for each interface.

        # This is an hidden experimantal feature: each entry in
        # interface may contain a port, using 'ip:port'.
        # Note: the format is subject to change!
        try:
            interface, port = entry.split(':', 1)
        except ValueError:
            interface, port = entry, config.port
            
        # Might raise ValueError if not integer.
        # TODO: check if we can use string port, like 'http'
        port = int(port)                       

        if port == 443 and ssl and ssl.supported and config.sslcert:
            sslContext = ssl.DefaultOpenSSLContextFactory(*config.sslcert)
            s = internet.SSLServer(port, site, sslContext, interface=interface)
        else:
            s = internet.TCPServer(port, site, interface=interface)
        s.setServiceParent(sc)

    return application
Ejemplo n.º 48
0
def StartStorageService(config, block_store):
    try:
        http_port = config['StorageService']['HttpPort']
        http_host = config['StorageService']['Host']
        worker_threads = config['StorageService'].get('WorkerThreads', 8)
        reactor_threads = config['StorageService'].get('ReactorThreads', 8)
    except KeyError as ke:
        logger.error('missing configuration for %s', str(ke))
        sys.exit(-1)

    logger.info('service started on port %s', http_port)

    thread_pool = ThreadPool(maxthreads=worker_threads)
    thread_pool.start()
    reactor.addSystemEventTrigger('before', 'shutdown', thread_pool.stop)

    block = Resource()
    block.putChild(
        b'get',
        WSGIResource(reactor, thread_pool,
                     AppWrapperMiddleware(GetBlockApp(block_store))))
    block.putChild(
        b'gets',
        WSGIResource(reactor, thread_pool,
                     AppWrapperMiddleware(GetBlocksApp(block_store))))
    block.putChild(
        b'store',
        WSGIResource(reactor, thread_pool,
                     AppWrapperMiddleware(StoreBlocksApp(block_store))))
    block.putChild(
        b'list',
        WSGIResource(reactor, thread_pool,
                     AppWrapperMiddleware(ListBlocksApp(block_store))))
    block.putChild(
        b'check',
        WSGIResource(reactor, thread_pool,
                     AppWrapperMiddleware(CheckBlocksApp(block_store))))

    root = Resource()
    root.putChild(
        b'info',
        WSGIResource(reactor, thread_pool,
                     AppWrapperMiddleware(InfoApp(block_store))))
    root.putChild(b'block', block)

    site = Site(root, timeout=60)
    site.displayTracebacks = True

    reactor.suggestThreadPoolSize(reactor_threads)

    signal.signal(signal.SIGQUIT, __shutdown__)
    signal.signal(signal.SIGTERM, __shutdown__)

    endpoint = TCP4ServerEndpoint(reactor,
                                  http_port,
                                  backlog=32,
                                  interface=http_host)
    endpoint.listen(site)
Ejemplo n.º 49
0
	def start(self): 
		Log.info("starting collection")
		# size the thread pool to the numer of packs
		reactor.suggestThreadPoolSize(len(self.packs))
		# schedule the collection timer
		collectionTimer = task.LoopingCall(self.collection_timer_handler)
		collectionTimer.start(10.0, now=True)		
		# we're supposed to return a deferred...
		return defer.succeed(None)
Ejemplo n.º 50
0
def main():
    factory = protocol.Factory()
    factory.protocol = Server
    print "waiting for connection ..."
    # a = Server()
    # reactor.callLater(a.run_test1())
    reactor.suggestThreadPoolSize(10)
    reactor.listenTCP(8006, factory)
    reactor.run()
Ejemplo n.º 51
0
    def __init__(self, domain='', *args, **kwargs):

        # Call constructor of parent class
        super(CnqzuParser, self).__init__(*args, **kwargs)

        self.downloaded_count = 0
        self.pending_downloads = []
        self.urls_to_be_processed = [self.url]
        reactor.suggestThreadPoolSize(1)
Ejemplo n.º 52
0
def main():
    try:
        reactor.suggestThreadPoolSize(5)
        Consumer().runrealtime()

        reactor.run()
    except Exception as e:
        log.error(e)
        log.error(traceback.format_exc())
Ejemplo n.º 53
0
 def startUC1(self):
     log.startLogging(sys.stdout)
     serverFactory = nxtServerFactory(SuperNETApiD.queryComposers, SuperNETApiD.parsers, self.environ)
     serverFactory.protocol = ProxyServerProtocolSuperNET # <- this is not an instance this is the CLASS!!!!
     log.msg(1*"initUC1")
     uc1_pingPong = UC1_pingPong(serverFactory,  self, self.environ ,  ) # also hand in 'self' here as a means to stop self
     self.timer1 = task.LoopingCall(uc1_pingPong.periodic,  )
     self.timer1.start( TIMER_850 , now=True )
     reactor.suggestThreadPoolSize(500)
     reactor.listenTCP(LISTEN_PORT_SNT, serverFactory)
Ejemplo n.º 54
0
    def __init__(self, config, component_builder=None, testing=False):
        logging.basicConfig(filename=config['log_file'],
                            level=config['log_level'])
        logging.captureWarnings(True)
        logger.debug("Configuration: " + config.view())

        logger.debug("Creating a new data router")
        self.config = config
        self.data_router = self._create_data_router(config, component_builder)
        self._testing = testing
        reactor.suggestThreadPoolSize(config['num_threads'] * 5)
Ejemplo n.º 55
0
    def __init__(self):
        '''Initialize a new dispatcher'''
        # This will store all services we should host, using the service name
        # as mapping key to retrieve the service, and the service itself
        # as mapping key to retrieve the name
        log.msg('[DISPATCHER] Initializing')
        self.services = dict()

        ## Hack
        ## Prevent the application server to hang when 10  (default threadpoolsize)
        ## concurrent requests are being processed
        reactor.suggestThreadPoolSize(30)  # pylint: disable=E1101
def main():
    preferences = Preferences()
    task_factory = SimpleTaskFactory(PythonCollectionTask)
    task_splitter = PerDataSourceInstanceTaskSplitter(task_factory)
    daemon = CollectorDaemon(preferences, task_splitter)
    pool_size = preferences.options.threadPoolSize

    # The Twisted version shipped with Zenoss 4.1 doesn't have this.
    if hasattr(reactor, 'suggestThreadPoolSize'):
        reactor.suggestThreadPoolSize(pool_size)

    daemon.run()
Ejemplo n.º 57
0
 def set_thread_pool_size(self):
     # This is a somewhat made up number.  The default is ten.
     # We are resource-limited in 32-bit, can't just make this
     # a huge number.
     # We need at least 10 threads for the normal sqlalchemy
     # thread pool and 10 for the special "no locks allowed"
     # pool.  Anything beyond that are just being allowed to
     # queue and get themselves a logger.
     # We are also constrained by the number of knc sockets
     # that can be open at once (max file descriptors).
     pool_size = self.config.get("broker", "twisted_thread_pool_size")
     reactor.suggestThreadPoolSize(int(pool_size))
Ejemplo n.º 58
0
    def stop(self):
        if (self.doneflag.isSet()):

            for connection in self.single_sockets.values():
                try:
                    #I think this still sends all the data
                    connection.close()
                except:
                    pass
                
            reactor.suggestThreadPoolSize(0)
            reactor.stop()