Beispiel #1
0
    def start(self, num_processes: Optional[int] = 1, max_restarts: int = None) -> None:
        """Starts this server in the `.IOLoop`.

        By default, we run the server in this process and do not fork any
        additional child process.

        If num_processes is ``None`` or <= 0, we detect the number of cores
        available on this machine and fork that number of child
        processes. If num_processes is given and > 1, we fork that
        specific number of sub-processes.

        Since we use processes and not threads, there is no shared memory
        between any server code.

        Note that multiple processes are not compatible with the autoreload
        module (or the ``autoreload=True`` option to `tornado.web.Application`
        which defaults to True when ``debug=True``).
        When using multiple processes, no IOLoops can be created or
        referenced until after the call to ``TCPServer.start(n)``.

        Values of ``num_processes`` other than 1 are not supported on Windows.

        The ``max_restarts`` argument is passed to `.fork_processes`.

        .. versionchanged:: 6.0

           Added ``max_restarts`` argument.
        """
        assert not self._started
        self._started = True
        if num_processes != 1:
            process.fork_processes(num_processes, max_restarts)
        sockets = self._pending_sockets
        self._pending_sockets = []
        self.add_sockets(sockets)
Beispiel #2
0
def main():
    script_path = os.path.dirname(os.path.realpath(__file__))
    config_path = os.path.join(script_path, 'etc/harvest.cfg')

    config = ConfigParser()
    config.read(config_path)

    sockets = bind_sockets(config.get('server', 'port'),
                           config.get('server', 'address'))
    fork_processes(config.getint('server', 'instances'))

    datastore = DataStore(config.get('datastore', 'host'),
                          config.getint('datastore', 'port'),
                          config.get('datastore', 'username'),
                          config.get('datastore', 'password'),
                          config.get('datastore', 'database'))

    app = Application([(r"/rpc/store", Handler,
                       {'datastore': datastore,
                        'api_key': config.get('server', 'api_key')})])

    server = HTTPServer(app,
                        no_keep_alive=config.get('server', 'no_keep_alive'),
                        ssl_options={
                            'certfile': config.get('server', 'certfile'),
                            'keyfile': config.get('server', 'keyfile')})

    server.add_sockets(sockets)
    IOLoop.instance().start()
Beispiel #3
0
def main():
    options.parse_command_line()

    _port = options.options.port
    _process_num = options.options.process
    _debug_level = options.options.debug * 10

    process.fork_processes(_process_num, max_restarts=3)

    process_port = _port + process.task_id()
    process_debug = _process_num <= 1 and _debug_level < 30

    print('Service Running on %d ...' % process_port)

    app = web.Application((
        (r'/', views.base.IndexHandler),
        (r'/home', views.base.HomeHandler),
        (r'/auth/redirect', views.auth.OAuth2RedirectHandler),
        (r'/auth/revoke', views.auth.OAuth2RevokeHandler),
        (r'/auth/authorize', views.auth.OAuth2AuthorizeHandler),
        (r'/auth/info', views.auth.OAuth2InfoHandler),
        (r'/user/info', views.rest.UserInfoHandler),
        (r'/user/option', views.rest.UserOptionHandler),
        (r'/weibo/public', views.rest.WeiboPublicHandler),
        (r'/weibo/sync', views.rest.WeiboSyncHandler),
        (r'/weibo/query', views.rest.WeiboQueryHandler),
        (r'/weibo/redirect', views.rest.WeiboRedirectHandler),
        (r'/emotion/query', views.rest.EmotionQueryHandler),
    ), debug=process_debug, cookie_secret=setting.COOKIE_SECRET)
    app.listen(process_port, xheaders=True)

    loop = ioloop.IOLoop.instance()
    loop.start()
Beispiel #4
0
def main():
    """ entry """
    try:
        conf = __import__('conf')
    except ImportError as e:
        app_log.critical("Unable to load site config. ({})".format(e))
        raise SystemExit()
    parse_command_line()

    if options.debug:
        app_log.setLevel(logging.DEBUG)

    if not options.debug:
        fork_processes(None)
    options.port += task_id() or 0

    if not os.path.isdir(conf.app_path):
        app_log.critical("{p} isn't accessible, maybe "
                         "create it?".format(p=conf.app_path))
        raise SystemExit()
    app_log.debug("Starting {name} on port {port}".format(name=conf.name,
                                                          port=options.port))
    # initialize the application
    tornado.httpserver.HTTPServer(Application(options,
                                              conf)).listen(options.port,
                                                            '0.0.0.0')
    ioloop = tornado.ioloop.IOLoop.instance()
    if options.debug:
        tornado.autoreload.start(ioloop)
    # enter the Tornado IO loop
    ioloop.start()
Beispiel #5
0
    def start(self, num_processes=1):
        """Starts this server in the IOLoop.

        By default, we run the server in this process and do not fork any
        additional child process.

        If num_processes is ``None`` or <= 0, we detect the number of cores
        available on this machine and fork that number of child
        processes. If num_processes is given and > 1, we fork that
        specific number of sub-processes.

        Since we use processes and not threads, there is no shared memory
        between any server code.

        Note that multiple processes are not compatible with the autoreload
        module (or the ``debug=True`` option to `tornado.web.Application`).
        When using multiple processes, no IOLoops can be created or
        referenced until after the call to ``TCPServer.start(n)``.
        """
        assert not self._started
        self._started = True
        if num_processes != 1:
            process.fork_processes(num_processes)
        sockets = self._pending_sockets
        self._pending_sockets = []
        self.add_sockets(sockets)
Beispiel #6
0
 def start(self, num_processes=1):
     r"""Starts the server in IOLoop."""
     assert not self._started
     self._started = True
     if num_processes != 1:
         process.fork_processes(num_processes)
     sockets, self._pending_sockets = self._pending_sockets, []
     self.add_sockets(sockets)
def main():
    try:
        options,datafile_json=optparse_lib().parse_args()

        cmdstr="""nohup python easystatserver.py > /dev/null 2>&1 &"""
        #cmdstr="""nohup ./easystatserver > /dev/null 2>&1 &"""
        #status,output=cmd_execute(cmdstr)
        import os
        os.system(cmdstr)

        #print(u"start benchmark test...")
        if(options.processnum != -1):
            process.fork_processes(options.processnum)
        '''logging.basicConfig(level=logging.DEBUG,format='[%(levelname)s] (%(asctime)s) <%(message)s>',datefmt='%a,%Y-%m-%d %H:%M:%S',
            filename="./log/process."+str(tornado.process.task_id())+".log",filemode='w')'''
        logging.basicConfig(level=logging.ERROR,
                    format='[%(levelname)s] [%(asctime)s] [%(filename)s-line:%(lineno)d] [%(funcName)s-%(threadName)s] %(message)s',
                    datefmt='%a,%Y-%m-%d %H:%M:%S',
                    filename="./log/easyhttpbenchmark.log",
                    filemode='a')
        easyhttpbc=easyhttpbenchmark(options.maxclientnum,options.clientnum,options.testtime,options.flag,datafile_json)
        easyhttpbc.benchmark_test()
        #print(u"benchmark test end...")
    except Exception as e:
        logging.error(str(e))

    try:
        from xmlrpclib import ServerProxy
        cfg_json=json.load(open("./conf/easyhttpbenchmark.conf", "r"),encoding='utf-8')
        stat_rpc_server=cfg_json['stat_rpc_server']
        stat_rpc_port=cfg_json['stat_rpc_port']
        svr=ServerProxy("http://"+stat_rpc_server+":"+stat_rpc_port)
        '''print("total_req_cnt:"+str(easyhttpbc.total_req_cnt))
        print("total_res_cnt:"+str(easyhttpbc.total_res_cnt))
        print("total_err_cnt:"+str(easyhttpbc.total_err_cnt))
        print("total_nul_cnt:"+str(easyhttpbc.total_nul_cnt))'''
        import multiprocessing
        cpu_count=multiprocessing.cpu_count()
        if(options.processnum != 0):
            svr.stat_maxclientnum(options.processnum*options.maxclientnum)
            svr.stat_clientnum(options.processnum*options.clientnum)
        else:
            svr.stat_maxclientnum(cpu_count*options.maxclientnum)
            svr.stat_clientnum(cpu_count*options.clientnum)

        svr.set_test_time(easyhttpbc.testtime)
        svr.stat_total_req_cnt(easyhttpbc.total_req_cnt)
        svr.stat_total_res_cnt(easyhttpbc.total_res_cnt)
        svr.stat_total_err_cnt(easyhttpbc.total_err_cnt)
        svr.stat_total_nul_cnt(easyhttpbc.total_nul_cnt)

        svr.stat_total_below_10(easyhttpbc.below_10)
        svr.stat_total_between_10_20(easyhttpbc.between_10_20)
        svr.stat_total_between_20_30(easyhttpbc.between_20_30)
        svr.stat_total_over_30(easyhttpbc.over_30)
        svr.stat_total_res_time(easyhttpbc.total_res_time)
    except Exception as e:
        logging.error(str(e))
Beispiel #8
0
 def server_forever(self, *args, **kwargs):
     try:
         if self._multiprocess:
             info('starting tornado server in multi-process mode')
             fork_processes(self._num_processes)
         else:
             info('starting tornado server in single-process mode')
         self._server = HTTPServer(self._app)
         self._server.add_sockets(self._sockets)
         IOLoop.instance().start()
     except Exception, e:
         error("exception in serve_forever: %s", e)
Beispiel #9
0
def main():
    parse_command_line()
    
    settings = {
        'debug': options.debug
    }

    sockets = bind_sockets(options.port)
    fork_processes(0)
    application = router(settings)
    _http_server = HTTPServer(application)
    _http_server.add_sockets(sockets)
    IOLoop.current().start()
def main():
    options,DataFile=OptParseLib().parse_args()
    ConfFile=json.load(open(ConfFilePath, "r"),encoding='utf-8')
    #print(ConfFile)
    #print(options.ProcessNUM,options.ClientNUM,options.TEST_TIME,DataFile)
    print(u"Start Benchmark Test...")
    if options.ProcessNUM != -1:
        process.fork_processes(options.ProcessNUM)
    logging.basicConfig(level=logging.DEBUG,format='[%(levelname)s] (%(asctime)s) <%(message)s>',datefmt='%a,%Y-%m-%d %H:%M:%S',
    filename=ConfFile["LogFilePath"]+str(tornado.process.task_id())+".log",filemode='w')
    easyBC=easyBenchmarkTesttool(options.ClientNUM,options.TEST_TIME,DataFile)
    easyBC.benchmark_test()
    print(u"Benchmark Test End...")
Beispiel #11
0
    def run(self):
        if options.debug:
            app_log.setLevel(logging.DEBUG)

        if not options.debug:
            fork_processes(None)
        options.port += task_id() or 0

        app_log.debug("Starting %s on port %s" % (cfg.platform_name, options.port))
        # initialize the application
        tornado.httpserver.HTTPServer(Application(self.commons)).listen(options.port, '0.0.0.0')
        ioloop = tornado.ioloop.IOLoop.instance()
        if options.debug:
            tornado.autoreload.start(ioloop)
        # enter the Tornado IO loop
        ioloop.start()
Beispiel #12
0
def fork():
    """
    Fork the processes off, set process titles (master, worker) and return
    and ioloop.

    Returns an instance of tornado.ioloop.IOLoop
    """
    # Set the custom process title of the master
    set_process_title()
     # Fork and create the ioloop
    options.workers = opts.workers()
    process.fork_processes(options.workers)
    io_loop = ioloop.IOLoop.instance()
    # Set the custom process title of the workers
    set_process_title()
    return io_loop
Beispiel #13
0
def main():
    numProcs = inventory.NUM_INDEX_SHARDS + inventory.NUM_DOC_SHARDS + 1
    taskID = process.fork_processes(numProcs, max_restarts=0)
    port = inventory.BASE_PORT + taskID
    if taskID == 0:
        app = httpserver.HTTPServer(tornado.web.Application([
                (r"/search", Web),
                (r"/upload", UploadHandler),
                (r"/(.*)", IndexDotHTMLAwareStaticFileHandler, dict(path=SETTINGS['static_path']))
            ], **SETTINGS))
        logging.info("Front end is listening on " + str(port))
    else:       
        if taskID <= inventory.NUM_INDEX_SHARDS:
            shardIx = taskID - 1
            #data = pickle.load(open("data/index%d.pkl" % (shardIx), "r"))
            inverted_path = os.path.join(os.getcwd(),"../assignment5/df_jobs/%d.out"  % (shardIx))
            logging.info("Inverted file path: %s" % inverted_path)
            data = pickle.load(open(inverted_path ,'r'))
            idf_path = os.path.join(os.getcwd(), "../assignment5/idf_jobs/0.out")
            logIDF = pickle.load(open(idf_path,'r'))
            app = httpserver.HTTPServer(web.Application([(r"/index", index.Index, dict(data=data, logIDF=logIDF))]))
            logging.info("Index shard %d listening on %d" % (shardIx, port))
        else:
            shardIx = taskID - inventory.NUM_INDEX_SHARDS - 1
            #data = pickle.load(open("data/doc%d.pkl" % (shardIx), "r"))
            doc_path = os.path.join(os.getcwd(),"../assignment5/i_df_jobs/%d.out" % (shardIx))
            logging.info("Doc Server path %s" % doc_path)
            data = pickle.load(open(doc_path, "r"))
            app = httpserver.HTTPServer(web.Application([(r"/doc", doc.Doc, dict(data=data))]))
            logging.info("Doc shard %d listening on %d" % (shardIx, port))
    app.add_sockets(netutil.bind_sockets(port))
    IOLoop.current().start()
Beispiel #14
0
def main():
    entries = {}
    for urls_file in urls_files:
        with open(urls_file) as f:
            for line in f.read().splitlines():
                if not line.startswith("#"):
                    for host in options.hosts.split(","):
                        entry = Entry.make(line, host.strip(), options.retry_times)
                        entries[entry.url] = entry
    if not entries:
        sys.exit(0)

    if options.multi_processes != -1:
        process.fork_processes(options.multi_processes)

    bc = Checker(entries.values(), options.timeout, options.max_clients)
    bc.check()
def main():
    if options.urls_file:
        with open(options.urls_file) as f:
            lines = [line for line in f.read().splitlines()
                    if not line.startswith('#')]
            if options.url_template:
                lines = [options.url_template % line for line in lines]
            requests.extend(lines)
    if not requests:
        sys.exit(0)

    if options.multi_processes != -1:
        process.fork_processes(options.multi_processes)

    bc = BenchClient(requests, options.timeout, options.max_clients,
            options.time_len)
    bc.bench()
Beispiel #16
0
def main():
	taskID = process.fork_processes(7)
	if taskID == 6:
		makefrontend()
	elif taskID<=2:
		#makeIndexer(taskID)	
		makeDocEnd(taskID)
	else:
		makeIndexer(taskID-3)		
	IOLoop.instance().start()
Beispiel #17
0
 def start(self, num_processes):
     '''增加了process_id的返回值。方便识别子进程并初始化不同文件名的logfile等操作。'''
     assert not self._started
     self._started = True
     process_id = 0
     if num_processes != 1:
         process_id = process.fork_processes(num_processes)
     sockets = self._pending_sockets
     self._pending_sockets = []
     self.add_sockets(sockets)
     return process_id
Beispiel #18
0
def main(): 
    taskID = process.fork_processes(4)
    if taskID == 0:
        app = httpserver.HTTPServer(makeFrontend())
        app.add_sockets(netutil.bind_sockets(BASE_PORT))
        logging.info("Front end is listening on " + str(BASE_PORT))
    else:
        port = BASE_PORT + taskID
        app = httpserver.HTTPServer(makeBackend(port))
        app.add_sockets(netutil.bind_sockets(port))
        logging.info("Back end %d listening on %d" % (taskID, port))
    IOLoop.current().start()
Beispiel #19
0
def main():
    try:
        options.parse_command_line()
        port = options.options.port
        fork = options.options.fork

        setting['PROCESS'] = fork
        setting['PORT_GROUP'] = range(port, port + fork)
        process.fork_processes(fork, max_restarts=10)

        setting['PORT'] = port + process.task_id()

        app = web.Application(
            handlers=urls,
            **SETTINGS
        )

        app.listen(setting['PORT'], xheaders=True)
        loop = ioloop.IOLoop.instance()
        loop.start()
    except Exception, e:
        logger.error(traceback.format_exc(e))
Beispiel #20
0
def main():
    script_path = os.path.dirname(os.path.realpath(__file__))
    config_path = os.path.join(script_path, 'etc/training.cfg')
    confirm_path = os.path.join(script_path, 'data/confirmation.text')

    config = ConfigParser()
    config.read(config_path)

    sockets = bind_sockets(config.get('server', 'port'),
                           config.get('server', 'address'))
    fork_processes(config.getint('server', 'instances'))

    datastore = DataStore(config.get('database', 'host'),
                          config.getint('database', 'port'),
                          config.get('database', 'username'),
                          config.get('database', 'password'),
                          config.get('database', 'database'))

    with open(confirm_path) as file:
        message = file.read()

    params_report = {'datastore': datastore,
                     'api_key': config.get('server', 'api_key')}
    params_confirm = {'datastore': datastore,
                      'message': message}

    app = Application([(r"/training/report",
                        ReportHandler, params_report),
                       (r"/training/confirm/(.*)",
                        ConfirmationHandler, params_confirm)])

    server = HTTPServer(app,
                        no_keep_alive=config.get('server', 'no_keep_alive'),
                        ssl_options={
                            'certfile': config.get('server', 'certfile'),
                            'keyfile': config.get('server', 'keyfile')})

    server.add_sockets(sockets)
    IOLoop.instance().start()
	def start(self):
		"""Start up a tornado web server and use the logic method as the request handler"""

		# find a session and join it
		self.join_session(self.bot_endpoint)

		# configure the bot event handler and the user supplied logic
		bot_server_app = tornado.web.Application([
			(r"/event", BotEventHandler, dict(	bot_logic=self.bot_logic,
								player_id=self.player_id,
								game_id=self.game_id ))
			])

		# start 'er up
		sockets = bind_sockets(self.bot_port)
		print "Bot is listening d[-_-]b"
		fork_processes(None)
		server = HTTPServer(bot_server_app)
		server.add_sockets(sockets)

		# start listening
		tornado.ioloop.IOLoop.instance().start()
Beispiel #22
0
 def cmd_serve(self, *args):
     """
     Comando 'serve', responsável por iniciar o servidor de produção.
     """
     port = int(args[0]) if args else self.port
     ServerLog.info(code='dev_server_starting', args=[port])
     app = Application(
         routers, cookie_secret=config('SECRET_KEY'),
         login_url='/api/v1/usuarios/entrar/',
         xsrf_cookies=False)
     sockets = netutil.bind_sockets(port)
     process.fork_processes(self.fork_processes)
     server = HTTPServer(app)
     server.add_sockets(sockets)
     r.set_loop_type('tornado')
     ioloop = IOLoop.current()
     ioloop.add_callback(
         callback=ServerLog.info,
         code='server_started',
         args=[self.host, port]
     )
     ioloop.start()
Beispiel #23
0
def start(settings):
    
    application = get_application(settings)
    
    http_server = HTTPServer(application)
    
    unix_socket_enabled = settings.get("unix_socket_enabled")
    sockets = []
    if unix_socket_enabled:
        server_unix_socket_file = settings.get("unix_socket_file")
        server_backlog = settings.get("backlog")
        # Use unix socket
        _logger.info('Bind unix socket file %s', server_unix_socket_file)
        socket = netutil.bind_unix_socket(server_unix_socket_file, 0600, server_backlog)
        sockets.append(socket)
    else:
        server_port = settings.get("port")
        # Normal way to enable a port for listening the request
        _logger.info('Listen on port %d', server_port)
        sockets.extend(netutil.bind_sockets(server_port))
    
    process_count = settings.get("process_count")
    if not settings.get("debug") and process_count != 1:
        if process_count <= 0:
            process_count = process.cpu_count()
        elif process_count > 1:
            _logger.info('Start %d processes', process_count) 
        process.fork_processes(process_count)
        
    http_server.add_sockets(sockets)
    
    # Start Service
    _logger.info('Start tornado server')
    try:
        IOLoop.instance().start()
    except:
        _logger.fatal('Start tornado server failed', exc_info = True)
        raise
Beispiel #24
0
def main():
	numProcs = inventory.NUM_WORKERS
	taskID = process.fork_processes(numProcs, max_restarts=0)
	port = inventory.BASE_PORT + taskID
	app = httpserver.HTTPServer(web.Application([
		web.url(r"/map", mapper.Map),
		web.url(r"/retrieveMapOutput", mapper.RetrieveMapOutput),			
		web.url(r"/reduce", reducer.Reduce),
		web.url(r"/retrieveReduceOutput", reducer.RetrieveReduceOutput),
		web.url(r"/coordinator", coordinator.Runner)]))
	logging.info("Worker %d listening on %d" % (taskID, port))

	app.add_sockets(netutil.bind_sockets(port))
	IOLoop.current().start()
Beispiel #25
0
  def bootstrap( self ):
    gLogger.always( "\n  === Bootstrapping REST Server ===  \n" )
    ol = ObjectLoader( [ 'DIRAC', 'RESTDIRAC' ] )
    result = ol.getObjects( "RESTSystem.API", parentClass = RESTHandler, recurse = True )
    if not result[ 'OK' ]:
      return result

    self.__handlers = result[ 'Value' ]
    if not self.__handlers:
      return S_ERROR( "No handlers found" )

    self.__routes = [ ( self.__handlers[ k ].getRoute(), self.__handlers[k] ) for k in self.__handlers if self.__handlers[ k ].getRoute()  ]
    gLogger.info( "Routes found:" )
    for t in sorted( self.__routes ):
      gLogger.info( " - %s : %s" % ( t[0], t[1].__name__ ) )

    balancer = RESTConf.balancer()
    kw = dict( debug = RESTConf.debug(), log_function = self._logRequest )
    if balancer and RESTConf.numProcesses not in ( 0, 1 ):
      process.fork_processes( RESTConf.numProcesses(), max_restarts = 0 )
      kw[ 'debug' ] = False
    if kw[ 'debug' ]:
      gLogger.always( "Starting in debug mode" )
    self.__app = web.Application( self.__routes, **kw )
    port = RESTConf.port()
    if balancer:
      gLogger.notice( "Configuring REST HTTP service for balancer %s on port %s" % ( balancer, port ) )
      self.__sslops = False
    else:
      gLogger.notice( "Configuring REST HTTPS service on port %s" % port )
      self.__sslops = dict( certfile = RESTConf.cert(),
                            keyfile = RESTConf.key(),
                            cert_reqs = ssl.CERT_OPTIONAL,
                            ca_certs = RESTConf.generateCAFile() )
    self.__httpSrv = httpserver.HTTPServer( self.__app, ssl_options = self.__sslops )
    self.__httpSrv.listen( port )
    return S_OK()
Beispiel #26
0
    def start(self, handlers=None, app_info=None, twork_module=None, **settings):
        sockets_list = []
        for bind_ip in options.bind_ip.split(","):
            if not bind_ip:
                break

            sockets = tornado.netutil.bind_sockets(options.port, address=bind_ip, backlog=options.backlog)
            sockets_list.append(sockets)

        if options.num_processes >= 0:
            process.fork_processes(options.num_processes)

        if twork_module is not None:
            twork_module.setup()

        if options.timer_start:
            CommonTimer().start(twork_module.timer_callback)

        self.http_server = tornado.httpserver.HTTPServer(
            xheaders=True, request_callback=_TApplication(handlers=handlers, app_info=app_info, **settings)
        )

        for sockets in sockets_list:
            self.http_server.add_sockets(sockets)
Beispiel #27
0
def tornado_bidder_run():
    """runs httpapi bidder agent"""

    # bind tcp port to launch processes on requests
    sockets = netutil.bind_sockets(CONFIG_OBJ["Bidder"]["Port"])

    # fork working processes
    process.fork_processes(0)

    # Tornado app implementation
    app = Application([url(r"/", TornadoFixPriceBidAgentRequestHandler)])

    # start http servers and attach the web app to it
    server = httpserver.HTTPServer(app)
    server.add_sockets(sockets)

    # perform following actions only in the parent process
    process_counter = process.task_id()
    if process_counter == 0:
        # run dummy ad server
        adserver_win = Application([url(r"/", TornadoDummyRequestHandler)])
        winport = CONFIG_OBJ["Bidder"]["Win"]
        adserver_win.listen(winport)
        adserver_evt = Application([url(r"/", TornadoDummyRequestHandler)])
        evtport = CONFIG_OBJ["Bidder"]["Event"]
        adserver_evt.listen(evtport)

        # --instantiate budget pacer
        pacer = BudgetControl()
        pacer.start(CONFIG_OBJ)

        # add periodic event to call pacer
        PeriodicCallback(pacer.http_request, CONFIG_OBJ["Banker"]["Period"]).start()

    # main io loop. it will loop waiting for requests
    IOLoop.instance().start()
Beispiel #28
0
 def test_multi_process(self):
     self.assertFalse(IOLoop.initialized())
     port = get_unused_port()
     def get_url(path):
         return "http://127.0.0.1:%d%s" % (port, path)
     sockets = bind_sockets(port, "127.0.0.1")
     # ensure that none of these processes live too long
     signal.alarm(5)  # master process
     try:
         id = fork_processes(3, max_restarts=3)
     except SystemExit, e:
         # if we exit cleanly from fork_processes, all the child processes
         # finished with status 0
         self.assertEqual(e.code, 0)
         self.assertTrue(task_id() is None)
         for sock in sockets: sock.close()
         signal.alarm(0)
         return
def main():
    numProcs = 3
    taskID = process.fork_processes(numProcs, max_restarts=0)
    port = BASE_PORT + taskID
    if taskID == 0:
        app = httpserver.HTTPServer(tornado.web.Application([
            (r"/submit", Web)], **SETTINGS))
        logging.info("webapp listening on %d" % port)
    else:
        #load trained model from either dm or dbow
        if os.path.isfile(dmLabeled) and os.path.isfile(dbowLabeled):
            fname = dmLabeled if taskID == 1 else dbowLabeled
            model = Doc2Vec.load(fname)   
        else:
            raise RuntimeError("Must first train doc2vec model")

        app = httpserver.HTTPServer(web.Application([(r"/doc2vec", Doc2vecServer, dict(model = model))]))
        logging.info("Doc2vec server %d listening on %d" % (taskID, port))
    
    app.add_sockets(netutil.bind_sockets(port))
    IOLoop.current().start()
Beispiel #30
0
def startup():
    
    define(r'port', 80, int, r'Server listen port')
    define(r'service', False, bool, r'Open Scheduled Tasks')
    
    options.parse_command_line()
    
    if(config.Static.Debug):
        options.parse_command_line([__file__, r'--service=true', r'--logging=debug'])
    
    settings = {
                r'handlers': router.urls,
                r'static_path': r'static',
                r'template_loader': Jinja2Loader(r'view'),
                r'debug': config.Static.Debug,
                r'gzip': config.Static.GZip,
                r'cookie_secret': config.Static.Secret,
                }
    
    sockets = bind_sockets(options.port)
    
    task_id = 0
    
    if(platform.system() == r'Linux'):
        task_id = fork_processes(cpu_count())
    
    server = HTTPServer(Application(**settings))
    
    server.add_sockets(sockets)
    
    if(task_id == 0 and options.service):
        service.start()
    
    signal.signal(signal.SIGINT, lambda *args : shutdown(server))
    signal.signal(signal.SIGTERM, lambda *args : shutdown(server))
    
    app_log.info(r'Startup http server No.{0}'.format(task_id))
    
    IOLoop.instance().start()
Beispiel #31
0
    def test_multi_process(self):
        with ExpectLog(gen_log, "(Starting .* processes|child .* exited|uncaught exception)"):
            self.assertFalse(IOLoop.initialized())
            sock, port = bind_unused_port()

            def get_url(path):
                return "http://127.0.0.1:%d%s" % (port, path)
            # ensure that none of these processes live too long
            signal.alarm(5)  # master process
            try:
                id = fork_processes(3, max_restarts=3)
                self.assertTrue(id is not None)
                signal.alarm(5)  # child processes
            except SystemExit, e:
                # if we exit cleanly from fork_processes, all the child processes
                # finished with status 0
                self.assertEqual(e.code, 0)
                self.assertTrue(task_id() is None)
                sock.close()
                return
            try:
                if id in (0, 1):
                    self.assertEqual(id, task_id())
                    server = HTTPServer(self.get_app())
                    server.add_sockets([sock])
                    IOLoop.instance().start()
                elif id == 2:
                    self.assertEqual(id, task_id())
                    sock.close()
                    # Always use SimpleAsyncHTTPClient here; the curl
                    # version appears to get confused sometimes if the
                    # connection gets closed before it's had a chance to
                    # switch from writing mode to reading mode.
                    client = HTTPClient(SimpleAsyncHTTPClient)

                    def fetch(url, fail_ok=False):
                        try:
                            return client.fetch(get_url(url))
                        except HTTPError, e:
                            if not (fail_ok and e.code == 599):
                                raise

                    # Make two processes exit abnormally
                    fetch("/?exit=2", fail_ok=True)
                    fetch("/?exit=3", fail_ok=True)

                    # They've been restarted, so a new fetch will work
                    int(fetch("/").body)

                    # Now the same with signals
                    # Disabled because on the mac a process dying with a signal
                    # can trigger an "Application exited abnormally; send error
                    # report to Apple?" prompt.
                    #fetch("/?signal=%d" % signal.SIGTERM, fail_ok=True)
                    #fetch("/?signal=%d" % signal.SIGABRT, fail_ok=True)
                    #int(fetch("/").body)

                    # Now kill them normally so they won't be restarted
                    fetch("/?exit=0", fail_ok=True)
                    # One process left; watch it's pid change
                    pid = int(fetch("/").body)
                    fetch("/?exit=4", fail_ok=True)
                    pid2 = int(fetch("/").body)
                    self.assertNotEqual(pid, pid2)

                    # Kill the last one so we shut down cleanly
                    fetch("/?exit=0", fail_ok=True)

                    os._exit(0)
            except Exception:
                logging.error("exception in child process %d", id, exc_info=True)
                raise
Beispiel #32
0
def main():
  from recommendation import recom_worker, recom_front
  from searchEngine.backend import back as searchEng_worker
  from searchEngine.frontend import front as searchEng_front
  from classification.backend import online as classifier
  # from recommendation import searchEng_worker, searchEng_front  
  import mapreduce.framework as framework
  from src import color  
  # from src import tomatoCrawler as TC
  C = color.bcolors()

  global masterServer, MovieServer, ReviewServer, IdxServer, DocServer, Baseport
  
  print C.HEADER + "=========== Start Crawling ===========" + C.ENDC
  # TC.main2Genre()

  print C.HEADER + "=========== Find Available Ports ===========" + C.ENDC
  getPorts()

  print C.OKBLUE + "SuperFront:\t" + str(SuperFront) + C.ENDC
  print C.OKBLUE + "masterServer:\t" + str(masterServer) + C.ENDC
  print C.OKBLUE + "MovieServer:\t" + str(MovieServer) + C.ENDC
  print C.OKBLUE + "ReviewServer:\t" + str(ReviewServer) + C.ENDC
  print C.OKBLUE + "IdxServer:\t" + str(IdxServer) + C.ENDC
  print C.OKBLUE + "DocServer:\t" + str(DocServer) + C.ENDC
  print C.OKBLUE + "ClassifierServer:\t" + str(ClassifierServer) + C.ENDC
  

  print C.HEADER + "=========== Fire Up All Servers ===========" + C.ENDC
  uid = fork_processes(NumMaster+NumMovie+NumReview+NumIdx+NumDoc)
  
  if uid == 0:
    sockets = bind_sockets(masterServer[uid].split(':')[-1])
    myfront = recom_front.FrontEndApp(MovieServer, ReviewServer)
    server  = myfront.app
  elif uid ==1:
    sockets = bind_sockets(masterServer[uid].split(':')[-1])    
    myfront = searchEng_front.FrontEndApp(IdxServer, DocServer)
    server  = myfront.app
  elif uid ==2:    
    sockets = bind_sockets(masterServer[uid].split(':')[-1])
    myClasify = classifier.Application(([(r"/predict?", classifier.PredictionHandler)]))
    myClasify.setGenres("./constants/classification_weights/genres.p")
    myClasify.setWeights("./constants/classification_weights/big_weight.p")
    server  = tornado.httpserver.HTTPServer(myClasify )        
    
  elif uid < NumMaster + NumMovie:
    myIdx = uid - NumMaster
    sockets = bind_sockets(MovieServer[myIdx].split(':')[-1])    
    myback_movie = recom_worker.RecommApp('MovieServer', myIdx, MovieServer[myIdx].split(':')[-1])
    server  = myback_movie.app
  elif uid < NumMaster + NumMovie + NumReview:
    myIdx = uid - NumMovie - NumMaster
    sockets = bind_sockets(ReviewServer[myIdx].split(':')[-1])
    myback_review = recom_worker.RecommApp('ReviewServer', myIdx, ReviewServer[myIdx].split(':')[-1])
    server  = myback_review.app
  elif uid < NumMaster + NumMovie + NumReview + NumIdx:
      myIdx = uid-NumMovie-NumReview-NumMaster
      sockets = bind_sockets(IdxServer[myIdx].split(':')[-1])    
      myback_idx = searchEng_worker.BackEndApp('IndexServer', myIdx, IdxServer[myIdx].split(':')[-1])
      server  = myback_idx.app
  elif uid < NumMaster + NumMovie + NumReview + NumIdx + NumDoc:
      myIdx = uid-NumMovie-NumReview-NumIdx-NumMaster
      sockets = bind_sockets(DocServer[myIdx].split(':')[-1])    
      myback_doc = searchEng_worker.BackEndApp('DocServer', myIdx, DocServer[myIdx].split(':')[-1])
      server  = myback_doc.app  

  
  server.add_sockets(sockets)
  tornado.ioloop.IOLoop.instance().start()
    def __enter__(self):
        CORE.info('Initialising server process')
        if self.__unix:
            CORE.info('Using a UNIX socket')
            self.__realunixsocket = socket.socket(socket.AF_UNIX,
                                                  socket.SOCK_STREAM)
        if self.__port:
            CORE.info('Using a TCP socket')
            try:
                self.__realtcpsocket = socket.socket(socket.AF_INET6,
                                                     socket.SOCK_STREAM)
            except Exception:
                CORE.warn(
                    'Cannot open socket with AF_INET6 (Python reports socket.has_ipv6 is %s), trying AF_INET'
                    % socket.has_ipv6)
                self.__realtcpsocket = socket.socket(socket.AF_INET,
                                                     socket.SOCK_STREAM)

        for sock in (self.__realtcpsocket, self.__realunixsocket):
            if sock is None:
                continue
            sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
            sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
            sock.setblocking(0)
            fd = sock.fileno()
            flags = fcntl.fcntl(fd, fcntl.F_GETFD)
            fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)

        if self.__ssl and self.__port:
            CORE.info('Setting up SSL configuration')
            self.crypto_context = SSL.Context(SSL.TLSv1_METHOD)
            self.crypto_context.set_cipher_list(
                ucr.get('umc/server/ssl/ciphers', 'DEFAULT'))
            self.crypto_context.set_options(SSL.OP_NO_SSLv2)
            self.crypto_context.set_options(SSL.OP_NO_SSLv3)
            self.crypto_context.set_verify(SSL.VERIFY_PEER,
                                           self.__verify_cert_cb)
            dir = '/etc/univention/ssl/%s.%s' % (ucr['hostname'],
                                                 ucr['domainname'])
            try:
                self.crypto_context.use_privatekey_file(
                    os.path.join(dir, 'private.key'))
                self.crypto_context.use_certificate_file(
                    os.path.join(dir, 'cert.pem'))
                self.crypto_context.load_verify_locations(
                    '/etc/univention/ssl/ucsCA/CAcert.pem')
            except SSL.Error as exc:
                # SSL is not possible
                CRYPT.error('Setting up SSL configuration failed: %s' %
                            (exc, ))
                CRYPT.warn('Communication will not be encrypted!')
                self.__ssl = False
                self.crypto_context = None
                self.__realtcpsocket.bind(('', self.__port))
                CRYPT.info('Server listening to unencrypted connections')
                self.__realtcpsocket.listen(SERVER_MAX_CONNECTIONS)

            if self.crypto_context:
                self.connection = SSL.Connection(self.crypto_context,
                                                 self.__realtcpsocket)
                self.connection.setblocking(0)
                self.connection.bind(('', self.__port))
                self.connection.set_accept_state()
                CRYPT.info('Server listening to SSL connections')
                self.connection.listen(SERVER_MAX_CONNECTIONS)
        elif not self.__ssl and self.__port:
            self.crypto_context = None
            self.__realtcpsocket.bind(('', self.__port))
            CRYPT.info('Server listening to TCP connections')
            self.__realtcpsocket.listen(SERVER_MAX_CONNECTIONS)

        if self.__unix:
            # ensure that the UNIX socket is only accessible by root
            old_umask = os.umask(0o077)
            try:
                self.__realunixsocket.bind(self.__unix)
            except EnvironmentError:
                if os.path.exists(self.__unix):
                    os.unlink(self.__unix)
            finally:
                # restore old umask
                os.umask(old_umask)
            CRYPT.info('Server listening to UNIX connections')
            self.__realunixsocket.listen(SERVER_MAX_CONNECTIONS)

        if self.__processes != 1:
            self._children = multiprocessing.Manager().dict()
            try:
                self._child_number = process.fork_processes(
                    self.__processes, 0)
            except RuntimeError as exc:
                CORE.warn('Child process died: %s' % (exc, ))
                os.kill(os.getpid(), signal.SIGTERM)
                raise SystemExit(str(exc))
            if self._child_number is not None:
                self._children[self._child_number] = os.getpid()

        if self.__magic:
            self.__bucket = self.__magicClass()
        else:
            self.signal_new('session_new')

        if self.__ssl:
            notifier.socket_add(self.connection, self._connection)
        if (not self.__ssl and self.__port):
            notifier.socket_add(self.__realtcpsocket, self._connection)
        if self.__unix:
            notifier.socket_add(self.__realunixsocket, self._connection)

        return self
Beispiel #34
0
            futures.append(http.fetch(url))
        futures = []
        for i in range(5):
            url = "%s%d/result" % (BASE_URL, 25800 + i)
            logging.info("Fetching %s" % url)
            futures.append(http.fetch(url))
        responses = yield futures
        result = {}
        for idx, resp in enumerate(responses):
            result[idx] = json.loads(resp.body)
        self.write(json.dumps(result))
        self.finish()


def makeEnd(index):
    port = 25800 + index
    data = dataStore()
    app = web.Application([
        web.url(r"/test", testHandler, dict(data=data)),
        web.url(r"/result", getDataHandler, dict(data=data)),
        web.url(r"/rrrr", retrieveHandler)
    ])
    app.listen(port)
    logging.info("No. %d worker is listening on %d" % (index, port))


if __name__ == "__main__":
    taskID = process.fork_processes(5)
    makeEnd(taskID)
    IOLoop.instance().start()
Beispiel #35
0
                        required=True,
                        metavar='<PATH_TO_PICKLED_GENRE_LIST>',
                        help='pickle file of the genres list')
    parser.add_argument('--weightsPath',
                        required=True,
                        metavar='<PATH_TO_PICKLED_GENRE_WEIGHTS_DICT>',
                        help='pickle file of the weights dict')
    return parser.parse_args(argv)


# python -m classification.servers --genresPath constants/classification_weights/genres.p --weightsPath constants/classification_weights/big_weight.p 3 classification/server_address.json
if __name__ == "__main__":
    args = getArguments(sys.argv[1:])
    if args.numServer > inventory.NUM_OF_MACHINES:
        args.numServer = inventory.NUM_OF_MACHINES

    BASE_PORT = 15000
    for i in range(args.numServer):
        inventory.getWorker('127.0.0.1:{0}'.format(BASE_PORT + 1 + i))
    inventory.toJson(args.outputPath)

    pid = fork_processes(args.numServer, max_restarts=0)
    app = online.Application(([(r"/predict?", online.PredictionHandler)]))
    port = BASE_PORT + 1 + pid
    app.setGenres(args.genresPath)
    app.setWeights(args.weightsPath)
    app.listen(port)
    print 'A classification server is serving at ' + inventory.MACHINES_IN_USE[
        pid]
    tornado.ioloop.IOLoop.instance().start()
Beispiel #36
0
#!/usr/bin/env python3

from tornado.ioloop import IOLoop
from tornado.process import task_id, fork_processes
from tornado.web import Application, RequestHandler


class MainHandler(RequestHandler):
    def get(self):
        self.write('Hello, I am ID {}\n'.format(task_id()))


class CrashHandler(RequestHandler):
    def get(self):
        self.write('Crashing ID {}!\n'.format(task_id()))
        IOLoop.current().stop()


if __name__ == '__main__':
    app = Application([
        (r'/', MainHandler),
        (r'/crash', CrashHandler),
    ])
    fork_processes(6)
    app.listen(8080 + task_id(), address='127.0.0.1')
    IOLoop.current().start()
Beispiel #37
0
        ),
        datefmt = "%Y.%m.%d %H:%M:%S",
        filename = "log/my_store.log",
        filemode = "a"
    )

    sock_lsn = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
    sock_lsn.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    sock_lsn.setblocking(0)
    sock_lsn.bind(("", 11211))
    sock_lsn.listen(65535)

    while True:
        try:
            # 该函数在父进程要么exit,要么抛异常,不会返回
            task_id = fork_processes(num_processes = 1, max_restarts = 1)
        except RuntimeError as e:
            logging.error("fork failed: {}".format(e))
            time.sleep(60) # 重启间隔
        except KeyboardInterrupt as e:
            sys.exit(0)
        else:
            # 子进程
            break

    io_loop = IOLoop.current()
    server = Transformer(io_loop, sock_lsn, 1)
    io_loop.add_handler(sock_lsn.fileno(),
                        server.handle_connection,
                        io_loop.READ | io_loop.ERROR)
    try:
Beispiel #38
0
         
 
class ECHOServer(TCPServer):
 
    def handle_stream(self,stream,address):
        logging.info("echo connecting...%r",address)
        ECHO(stream,self)
 
class Policy(TCPServer):
    def handle_stream(self,stream,address):
        logging.info("policy ....")
        rs = open('socket-policy.xml', 'rt').read()
        rs = bytes(rs)
        print rs
        stream.write(rs)
 
               
if __name__ == '__main__':
    parse_command_line()
     
    socket1 = netutil.bind_sockets(8000)
    #socket2 = netutil.bind_sockets(843)
    process.fork_processes(0)
 
    server = ECHOServer()
    server.add_sockets(socket1)
    #server2 = Policy()
    #server2.add_sockets(socket2)
 
    IOLoop.instance().start()
Beispiel #39
0
 def start(self, app, port, multnum=1):
     sockets = bind_sockets(port)
     fork_processes(multnum)
     server = HTTPServer(app, xheaders=True)
     server.add_sockets(sockets)
     IOLoop.instance().start()
Beispiel #40
0

def watch_dispatch():
    #监听更多通道,分散压力
    watch_channels = [str(x) for x in xrange(1, 11)]
    #兼容
    watch_channels.append('dispatch')
    while 1:
        try:
            _, unpacked_msg = dispatch_redis.blpop(watch_channels, 0)
            unpacked_msg = msgpack.unpackb(unpacked_msg, encoding='utf-8')
            dst_ch = unpacked_msg.get('c')
            redis_list = task_ch_redis.get(dst_ch)
            if not redis_list:
                logger.warn('mis_spell channel: %s' % dst_ch)
                continue
            randin = randint(0, len(redis_list) - 1)
            selected_redis = redis_list[randin]
            logger.warn('selected_redis:%s,dst_ch:%s, unpacked_msg:%s' %
                        (selected_redis, dst_ch, unpacked_msg))
            selected_redis.rpush(dst_ch, unpacked_msg.get('p'))
        except Exception, ex:
            logger.error('blpop fail: {0}'.format(ex), exc_info=True)


if __name__ == '__main__':
    logger.init_log('mqd_job', 'mqd_job')
    fork_processes(10)
    setproctitle('mqd_job')
    watch_dispatch()
Beispiel #41
0
        #The documents are then scored. Each document's score is the inner product (a.k.a. dot product,
        #effectively correlation here) of its vector and the query vector. In addition, scores should be
        #biased so that documents with the query terms in their title receive especially high scores.
        ScoredListing = []
        for docID in MyMap:
            score = dotProd(MyMap[docID], QueryVector)
            ScoredListing.append([score, docID])  ## Bias comes from tfs itself

        #Finally, the K highest-scoring documents are written out as a JSON-encoded list of (docID, score) pairs.
        ScoredListing.sort(reverse=True)  ## check so sorted by score
        topPostings = ScoredListing[:10]
        TP = []
        for posting in topPostings:
            TP.append([posting[1], posting[0]])
        #print 'Length from ' + str(self._port)
        #print len(ScoredListing)
        self.write(json.JSONEncoder().encode({"postings": TP}))


if __name__ == "__main__":

    task_id = process.fork_processes(inventory.num_ind)
    I_BASE_PORT = inventory.port1
    #application.listen(I_BASE_PORT)
    port = I_BASE_PORT + task_id
    app = httpserver.HTTPServer(
        web.Application([web.url(r"/index", IndexHandler, dict(port=port))]))
    app.add_sockets(netutil.bind_sockets(port))
    print('indexServer' + str(task_id) + ' at port: ' + str(port))
    tornado.ioloop.IOLoop.current().start()
Beispiel #42
0
class StaticPageHandler(RequestHandler):
    def initialize(self, page):
        self.template = os.path.join(settings.template_path, page + '.html')

    def get(self):
        self.render(self.template)


application = Application([
    url(r'/static/(.*)', StaticFileHandler, {'path': settings.static_path}),
    url(r"/?", HomePageHandler, name='home'),
    url(r"/m/(.{10})/?", MoodPageHandler, name='mood'),
    url(r'/privacy/?', StaticPageHandler, {'page': 'privacy'}),
    url(r'/tos/?', StaticPageHandler, {'page': 'tos'}),
],
                          debug=settings.debug)

if '__main__' == __name__:
    if settings.debug:
        print('Starting debug server on {}'.format(settings.debug_port))
        application.listen(settings.debug_port)
        IOLoop.instance().start()
    else:
        print('Starting production server.')
        socket = bind_unix_socket(settings.socket, mode=0o777)
        fork_processes(None)
        server = HTTPServer(application)
        server.add_socket(socket)
        IOLoop.instance().start()
Beispiel #43
0
def main():
    from tornado import options

    opts = options.OptionParser()
    opts.define("version",
                type=bool,
                help="show version and exit",
                callback=show_version)
    opts.define("locators",
                default=["localhost:10053"],
                type=str,
                multiple=True,
                help="comma-separated endpoints of locators")
    opts.define("cache",
                default=DEFAULT_SERVICE_CACHE_COUNT,
                type=int,
                help="count of instances per service")
    opts.define(
        "config",
        help="path to configuration file",
        type=str,
        callback=lambda path: opts.parse_config_file(path, final=False))
    opts.define("count",
                default=1,
                type=int,
                help="count of tornado processes")
    opts.define("port", default=8080, type=int, help="listening port number")
    opts.define(
        "endpoints",
        default=["tcp://localhost:8080"],
        type=str,
        multiple=True,
        help=
        "Specify endpoints to bind on: prefix unix:// or tcp:// should be used"
    )
    opts.define("request_header",
                default="X-Request-Id",
                type=str,
                help="header used as a trace id")
    opts.define("forcegen_request_header",
                default=False,
                type=bool,
                help="enable force generation of the request header")
    opts.define("sticky_header",
                default="X-Cocaine-Sticky",
                type=str,
                help="sticky header name")
    opts.define("gcstats",
                default=False,
                type=bool,
                help="print garbage collector stats to stderr")

    # tracing options
    opts.define("tracing_chance",
                default=DEFAULT_TRACING_CHANCE,
                type=float,
                help="default chance for an app to be traced")
    opts.define("configuration_service",
                default="unicorn",
                type=str,
                help="name of configuration service")
    opts.define(
        "tracing_conf_path",
        default="/zipkin_sampling",
        type=str,
        help="path to the configuration nodes in the configuration service")

    # various logging options
    opts.define(
        "logging",
        default="info",
        help=("Set the Python log level. If 'none', tornado won't touch the "
              "logging configuration."),
        metavar="debug|info|warning|error|none")
    opts.define(
        "log_to_stderr",
        type=bool,
        default=None,
        help=("Send log output to stderr. "
              "By default use stderr if --log_file_prefix is not set and "
              "no other logging is configured."))
    opts.define("log_file_prefix",
                type=str,
                default=None,
                metavar="PATH",
                help=("Path prefix for log file"))
    opts.define("datefmt",
                type=str,
                default="%z %d/%b/%Y:%H:%M:%S",
                help="datefmt")
    opts.define("generallogfmt",
                type=str,
                help="log format of general logging system",
                default=DEFAULT_GENERAL_LOGFORMAT)
    opts.define("accesslogfmt",
                type=str,
                help="log format of access logging system",
                default=DEFAULT_ACCESS_LOGFORMAT)
    opts.define("logframework",
                type=bool,
                default=False,
                help="enable logging various framework messages")
    opts.define("fingerscrossed",
                type=bool,
                default=True,
                help="enable lazy logging")

    # util server
    opts.define("utilport",
                default=8081,
                type=int,
                help="listening port number for an util server")
    opts.define("utiladdress",
                default="127.0.0.1",
                type=str,
                help="address for an util server")
    opts.define("enableutil",
                default=False,
                type=bool,
                help="enable util server")

    opts.define("so_reuseport",
                default=True,
                type=bool,
                help="use SO_REUSEPORT option")

    opts.parse_command_line()
    enable_logging(opts)

    logger = logging.getLogger("cocaine.proxy.general")

    use_reuseport = False

    endpoints = Endpoints(opts.endpoints)
    sockets = []

    if endpoints.has_unix:
        logger.info("Start binding on unix sockets")
        for path in endpoints.unix:
            logger.info("Binding on %s", path)
            sockets.append(bind_unix_socket(path, mode=0o666))

    if opts.so_reuseport:
        if not support_reuseport():
            logger.warning("Your system doesn't support SO_REUSEPORT."
                           " Bind and fork mechanism will be used")
        else:
            logger.info("SO_REUSEPORT will be used")
            use_reuseport = True

    if not use_reuseport and endpoints.has_tcp:
        logger.info("Start binding on tcp sockets")
        for endpoint in endpoints.tcp:
            logger.info("Binding on %s:%d", endpoint.host, endpoint.port)
            # We have to bind before fork to distribute sockets to our forks
            socks = bind_sockets(endpoint.port, address=endpoint.host)
            logger.info(
                "Listening %s",
                ' '.join(str("%s:%s" % s.getsockname()[:2]) for s in socks))
            sockets.extend(socks)

    if opts.enableutil:
        utilsockets = bind_sockets(opts.utilport, address=opts.utiladdress)
        logger.info(
            "Util server is listening on %s",
            ' '.join(str("%s:%s" % s.getsockname()[:2]) for s in utilsockets))

    try:
        if opts.count != 1:
            process.fork_processes(opts.count)

        if opts.gcstats:
            enable_gc_stats()

        if use_reuseport and endpoints.has_tcp:
            logger.info("Start binding on tcp sockets")
            for endpoint in endpoints.tcp:
                logger.info("Binding on %s:%d", endpoint.host, endpoint.port)
                # We have to bind before fork to distribute sockets to our forks
                socks = bind_sockets(endpoint.port,
                                     address=endpoint.host,
                                     reuse_port=True)
                logger.info(
                    "Listening %s", ' '.join(
                        str("%s:%s" % s.getsockname()[:2]) for s in socks))
                sockets.extend(socks)

        proxy = CocaineProxy(
            locators=opts.locators,
            cache=opts.cache,
            request_id_header=opts.request_header,
            sticky_header=opts.sticky_header,
            forcegen_request_header=opts.forcegen_request_header,
            default_tracing_chance=opts.tracing_chance)
        server = HTTPServer(proxy)
        server.add_sockets(sockets)

        if opts.enableutil:
            utilsrv = HTTPServer(UtilServer(proxy=proxy))
            utilsrv.add_sockets(utilsockets)

        tornado.ioloop.IOLoop.current().start()
    except KeyboardInterrupt:
        pass
Beispiel #44
0
def main():
	taskID = process.fork_processes(MAX_WORKER)
	makeWorker(taskID)
	IOLoop.instance().start()
print bcolors.OKGREEN + 'Front end will listen to: ' + bcolors.ENDC + bcolors.OKBLUE + 'http://{}:{}'.format(
    socket.gethostname(), Baseport) + bcolors.ENDC + "\n"

# # start indexer and save it properly
# from indexer import indexer
# print bcolors.HEADER + "====== PREPARATION ======" + bcolors.ENDC
# myindexer=indexer.Indexer(Idxservers, Docservers)
# myindexer.genNum()
# myindexer.getCategoryCorrelation('./constants/reducer_tmp_1000lines.txt', Idxservers, Docservers)

# start working
print "\n" + bcolors.HEADER + '====== START FORKING ======' + bcolors.ENDC
from backend import back
from frontend import front

uid = fork_processes(numIdx + numDoc + 1)
if uid == 0:
    sockets = bind_sockets(Baseport)
    myfront = front.FrontEndApp(Idxservers, Docservers)
    server = myfront.app
elif uid < numIdx + 1:
    sockets = bind_sockets(Idxservers[uid - 1].split(':')[-1])
    myback_idx = back.BackEndApp('indexServer', uid - 1,
                                 Idxservers[uid - 1].split(':')[-1])
    server = myback_idx.app
elif uid < numIdx + numDoc + 1:
    sockets = bind_sockets(Docservers[uid - numIdx - 1].split(':')[-1])
    myback_doc = back.BackEndApp('docServer', uid - numIdx - 1,
                                 Docservers[uid - numIdx - 1].split(':')[-1])
    server = myback_doc.app
def main():
    try:
        options, datafile_json = optparse_lib().parse_args()

        #cmdstr="""nohup python easystatserver.py > /dev/null 2>&1 &"""
        cmdstr = """nohup ./easystatserver > /dev/null 2>&1 &"""
        #status,output=cmd_execute(cmdstr)
        import os
        os.system(cmdstr)

        #print(u"start benchmark test...")
        if (options.processnum != -1):
            process.fork_processes(options.processnum)
        '''logging.basicConfig(level=logging.DEBUG,format='[%(levelname)s] (%(asctime)s) <%(message)s>',datefmt='%a,%Y-%m-%d %H:%M:%S',
            filename="./log/process."+str(tornado.process.task_id())+".log",filemode='w')'''
        logging.basicConfig(
            level=logging.ERROR,
            format=
            '[%(levelname)s] [%(asctime)s] [%(filename)s-line:%(lineno)d] [%(funcName)s-%(threadName)s] %(message)s',
            datefmt='%a,%Y-%m-%d %H:%M:%S',
            filename="./log/easyhttpbenchmark.log",
            filemode='a')
        easyhttpbc = easyhttpbenchmark(options.maxclientnum, options.clientnum,
                                       options.testtime, options.flag,
                                       datafile_json)
        easyhttpbc.benchmark_test()
        #print(u"benchmark test end...")
    except Exception as e:
        logging.error(str(e))

    try:
        from xmlrpclib import ServerProxy
        cfg_json = json.load(open("./conf/easyhttpbenchmark.conf", "r"),
                             encoding='utf-8')
        stat_rpc_server = cfg_json['stat_rpc_server']
        stat_rpc_port = cfg_json['stat_rpc_port']
        svr = ServerProxy("http://" + stat_rpc_server + ":" + stat_rpc_port)
        '''print("total_req_cnt:"+str(easyhttpbc.total_req_cnt))
        print("total_res_cnt:"+str(easyhttpbc.total_res_cnt))
        print("total_err_cnt:"+str(easyhttpbc.total_err_cnt))
        print("total_nul_cnt:"+str(easyhttpbc.total_nul_cnt))'''
        import multiprocessing
        cpu_count = multiprocessing.cpu_count()
        if (options.processnum != 0):
            svr.stat_maxclientnum(options.processnum * options.maxclientnum)
            svr.stat_clientnum(options.processnum * options.clientnum)
        else:
            svr.stat_maxclientnum(cpu_count * options.maxclientnum)
            svr.stat_clientnum(cpu_count * options.clientnum)

        svr.set_test_time(easyhttpbc.testtime)
        svr.stat_total_req_cnt(easyhttpbc.total_req_cnt)
        svr.stat_total_res_cnt(easyhttpbc.total_res_cnt)
        svr.stat_total_err_cnt(easyhttpbc.total_err_cnt)
        svr.stat_total_nul_cnt(easyhttpbc.total_nul_cnt)

        svr.stat_total_below_10(easyhttpbc.below_10)
        svr.stat_total_between_10_20(easyhttpbc.between_10_20)
        svr.stat_total_between_20_30(easyhttpbc.between_20_30)
        svr.stat_total_over_30(easyhttpbc.over_30)
        svr.stat_total_res_time(easyhttpbc.total_res_time)
    except Exception as e:
        logging.error(str(e))
Beispiel #47
0
define('num_processes', default=4, help='sub-processes count', type=int)

if __name__ == '__main__':
    os.chdir(os.path.dirname(os.path.realpath(__file__)))
    opt.parse_command_line()
    routes = c.handlers + c.views
    app = Application(routes,
                      default_handler_class=c.InvalidPageHandler,
                      ui_modules=c.modules,
                      xsrf_cookies=True)
    try:
        ssl_options = not opt.debug and app.site.get('https') or None
        server = HTTPServer(app, xheaders=True, ssl_options=ssl_options)
        sockets = netutil.bind_sockets(opt.port, family=socket.AF_INET)
        fork_id = 0 if opt.debug or os.name == 'nt' else process.fork_processes(
            opt.num_processes)
        server.add_sockets(sockets)
        protocol = 'https' if ssl_options else 'http'
        logging.info('Start the service #%d v%s on %s://localhost:%d' %
                     (fork_id, app.version, protocol, opt.port))
        if fork_id == 0 and app.db and app.db.page.count_documents(
            {}) == 0 and os.path.exists(app.config.get('data_path', '')):
            script = 'python3 utils/add_pages.py --uri={0} --db_name={1} --json_path={2}/json'.format(
                app.db_uri, app.config['database']['name'],
                app.config['data_path'])
            os.system(script)
            os.system('ln -s %s static/ocr_img' % app.config['data_path'])
        ioloop.IOLoop.current().start()

    except KeyboardInterrupt:
        app.stop()
            #print (url)
            response = yield http_client.fetch(url)
            listText = response.body[14:-3]
            resp = json.loads(response.body.decode())
            DocList = resp['results'][0]
            if (DocList['found']):
                results.append(DocList)

        self.finish(json.JSONEncoder().encode({"num_results": len(results), "results": results}))
        #print 'Results: '
        print (str(len(results)) + ' results found! in ' + str(loops) + ' loops')


if __name__ == "__main__":

    task_id = process.fork_processes(3)
    if(task_id ==0):
    
        application = tornado.web.Application([
            (r"/search", FrontEndHandler), (r"/person", actorHandler)
        ], **SETTINGS)
        MAX_PORT = 49152
        MIN_PORT = 10000
        BASE_PORT = int(hashlib.md5(getpass.getuser().encode()).hexdigest()[:8], 16) % \
        (MAX_PORT - MIN_PORT) + MIN_PORT
        print ('FrontEnd at port: ' + str(BASE_PORT))
        print ('Host name: ' + str(socket.gethostname()))
        application.listen(BASE_PORT)
    elif task_id == 1:
        os.system('python3.6 -m assignment2.indexServer')
    else:
Beispiel #49
0
from tornado import ioloop, netutil, process
from tornado.httpserver import HTTPServer
from tornado.web import Application
from tornado.options import define, options
import socket
from controller.page import MainHandler, PageHandler

define('port', default=8001, help='run port', type=int)
define('debug', default=True, help='the debug mode', type=bool)
define('num_processes', default=4, help='sub-processes count', type=int)

if __name__ == '__main__':
    options.parse_command_line()
    options.debug = options.debug and options.port != 80
    handlers = [(r'/', MainHandler), (r'/(\w+)', PageHandler)]
    app = Application(
        handlers,
        debug=options.debug,
        cookie_secret='R1sl9JqfQnCOS+aAR0fPVPpw5LzQOkzKudChgWnbhKw=',
        static_path='static',
        template_path='views')

    server = HTTPServer(app, xheaders=True)
    sockets = netutil.bind_sockets(options.port, family=socket.AF_INET)
    fork_id = 0 if options.debug else process.fork_processes(
        options.num_processes)
    server.add_sockets(sockets)

    print('Start http://localhost:%d' % (options.port, ))
    ioloop.IOLoop.current().start()
Beispiel #50
0
    def get(self):
        time.sleep(1)  # simulate longer query
        cur = connect(db='tornado', user='******').cursor(cursors.DictCursor)
        cur.execute("SELECT * FROM foo")
        self.write(json.dumps(list(cur.fetchall())))

class FrontendHandler(web.RequestHandler):
    @gen.coroutine
    def get(self):
        http_client = httpclient.AsyncHTTPClient(max_clients=500)
        response = yield http_client.fetch("http://localhost:8001/foo")
        self.set_header("Content-Type", 'application/json')
        self.write(response.body)


if __name__ == "__main__":
    number_of_be_tasks = int(sys.argv[1]) if len(sys.argv) > 1 else 20
    number_of_fe_tasks = int(sys.argv[2]) if len(sys.argv) > 2 else 1
    fe_sockets = netutil.bind_sockets(8000)  # need to bind sockets
    be_sockets = netutil.bind_sockets(8001)  # before forking
    task_id = process.fork_processes(number_of_be_tasks + number_of_fe_tasks)
    if task_id < number_of_fe_tasks:
        handler_class = FrontendHandler
        sockets = fe_sockets
    else:
        handler_class = BackendHandler
        sockets = be_sockets
    httpserver.HTTPServer(web.Application([(r"/foo", handler_class)])
        ).add_sockets(sockets)
    ioloop.IOLoop.instance().start()
Beispiel #51
0
# tornado
from tornado.process import fork_processes
import tornado.ioloop
import tornado.web
# package
from config import inventory
from backend import map, reduce, application

if __name__ == "__main__":

    BASE_PORT = 20000
    path = 'mapreduce_workers.json'
    for i in range(inventory.NUM_OF_MACHINES):
        inventory.getWorker('127.0.0.1:{0}'.format(BASE_PORT + 1 + i))
    inventory.toJson(path)

    pid = fork_processes(inventory.NUM_OF_MACHINES, max_restarts=0)
    app = application.Application(([
        (r"/map?", map.MapHandler),
        (r"/retrieveMapOutput?", map.RetrieveOutputHandler),
        (r"/reduce?", reduce.ReduceHandler),
    ]))
    port = BASE_PORT + 1 + pid
    app.setInventory(path)
    app.listen(port)
    print 'A worker is working at ' + inventory.MACHINES_IN_USE[pid]
    tornado.ioloop.IOLoop.instance().start()
Beispiel #52
0
    def test_multi_process(self):
        self.assertFalse(IOLoop.initialized())
        port = get_unused_port()

        def get_url(path):
            return "http://127.0.0.1:%d%s" % (port, path)

        sockets = bind_sockets(port, "127.0.0.1")
        # ensure that none of these processes live too long
        signal.alarm(5)  # master process
        id = fork_processes(3, max_restarts=3)
        if id is None:
            # back in the master process; everything worked!
            self.assertTrue(task_id() is None)
            for sock in sockets:
                sock.close()
            signal.alarm(0)
            return
        signal.alarm(5)  # child process
        try:
            if id in (0, 1):
                signal.alarm(5)
                self.assertEqual(id, task_id())
                server = HTTPServer(self.get_app())
                server.add_sockets(sockets)
                IOLoop.instance().start()
            elif id == 2:
                signal.alarm(5)
                self.assertEqual(id, task_id())
                for sock in sockets:
                    sock.close()
                client = HTTPClient()

                def fetch(url, fail_ok=False):
                    try:
                        return client.fetch(get_url(url))
                    except HTTPError, e:
                        if not (fail_ok and e.code == 599):
                            raise

                # Make two processes exit abnormally
                fetch("/?exit=2", fail_ok=True)
                fetch("/?exit=3", fail_ok=True)

                # They've been restarted, so a new fetch will work
                int(fetch("/").body)

                # Now the same with signals
                # Disabled because on the mac a process dying with a signal
                # can trigger an "Application exited abnormally; send error
                # report to Apple?" prompt.
                #fetch("/?signal=%d" % signal.SIGTERM, fail_ok=True)
                #fetch("/?signal=%d" % signal.SIGABRT, fail_ok=True)
                #int(fetch("/").body)

                # Now kill them normally so they won't be restarted
                fetch("/?exit=0", fail_ok=True)
                # One process left; watch it's pid change
                pid = int(fetch("/").body)
                fetch("/?exit=4", fail_ok=True)
                pid2 = int(fetch("/").body)
                self.assertNotEqual(pid, pid2)

                # Kill the last one so we shut down cleanly
                fetch("/?exit=0", fail_ok=True)

                os._exit(0)
        except Exception:
            logging.error("exception in child process %d", id, exc_info=True)
            raise
Beispiel #53
0
def main():
    from tornado import options

    default_general_logformat = "[%(asctime)s.%(msecs)d]\t[%(filename).5s:%(lineno)d]\t%(levelname)s\t%(message)s"
    default_access_logformat = "[%(asctime)s.%(msecs)d]\t[%(filename).5s:%(lineno)d]\t%(levelname)s\t%(trace_id)s\t%(message)s"

    opts = options.OptionParser()
    opts.define("version",
                type=bool,
                help="show version and exit",
                callback=show_version)
    opts.define("locators",
                default=["localhost:10053"],
                type=str,
                multiple=True,
                help="comma-separated endpoints of locators")
    opts.define("cache",
                default=DEFAULT_SERVICE_CACHE_COUNT,
                type=int,
                help="count of instances per service")
    opts.define(
        "config",
        help="path to configuration file",
        type=str,
        callback=lambda path: opts.parse_config_file(path, final=False))
    opts.define("count",
                default=1,
                type=int,
                help="count of tornado processes")
    opts.define(
        "endpoints",
        default=["tcp://localhost:8080"],
        type=str,
        multiple=True,
        help=
        "Specify endpoints to bind on: prefix unix:// or tcp:// should be used"
    )
    opts.define("request_header",
                default="X-Request-Id",
                type=str,
                help="header used as a trace id")
    opts.define("forcegen_request_header",
                default=False,
                type=bool,
                help="enable force generation of the request header")
    opts.define("sticky_header",
                default="X-Cocaine-Sticky",
                type=str,
                help="sticky header name")
    opts.define("gcstats",
                default=False,
                type=bool,
                help="print garbage collector stats to stderr")
    opts.define("srwconfig", default="", type=str, help="path to srwconfig")
    opts.define("allow_json_rpc",
                default=True,
                type=bool,
                help="allow JSON RPC module")
    opts.define("mapped_headers",
                default=[],
                type=str,
                multiple=True,
                help="pass specified headers as cocaine headers")

    # tracing options
    opts.define("tracing_chance",
                default=DEFAULT_TRACING_CHANCE,
                type=float,
                help="default chance for an app to be traced")
    opts.define("configuration_service",
                default="unicorn",
                type=str,
                help="name of configuration service")
    opts.define(
        "tracing_conf_path",
        default="/zipkin_sampling",
        type=str,
        help="path to the configuration nodes in the configuration service")

    # various logging options
    opts.define(
        "logging",
        default="info",
        help=("Set the Python log level. If 'none', tornado won't touch the "
              "logging configuration."),
        metavar="debug|info|warning|error|none")
    opts.define("log_to_cocaine",
                default=False,
                type=bool,
                help="log to cocaine")
    opts.define(
        "log_to_stderr",
        type=bool,
        default=None,
        help=("Send log output to stderr. "
              "By default use stderr if --log_file_prefix is not set and "
              "no other logging is configured."))
    opts.define("log_file_prefix",
                type=str,
                default=None,
                metavar="PATH",
                help=("Path prefix for log file"))
    opts.define("datefmt",
                type=str,
                default="%z %d/%b/%Y:%H:%M:%S",
                help="datefmt")
    opts.define("generallogfmt",
                type=str,
                help="log format of general logging system",
                default=default_general_logformat)
    opts.define("accesslogfmt",
                type=str,
                help="log format of access logging system",
                default=default_access_logformat)
    opts.define("logframework",
                type=bool,
                default=False,
                help="enable logging various framework messages")

    # util server
    opts.define("utilport",
                default=8081,
                type=int,
                help="listening port number for an util server")
    opts.define("utiladdress",
                default="127.0.0.1",
                type=str,
                help="address for an util server")
    opts.define("enableutil",
                default=False,
                type=bool,
                help="enable util server")
    opts.define("client_id",
                default=0,
                type=int,
                help="client id used for authentication")
    opts.define("client_secret",
                default='',
                type=str,
                help="client secret used for authentication")
    opts.parse_command_line()

    srw_config = None
    if opts.srwconfig:
        try:
            srw_config = load_srw_config(opts.srwconfig)
        except Exception as err:
            print("unable to load SRW config: %s" % err)
            exit(1)

    use_reuseport = hasattr(socket, "SO_REUSEPORT")
    endpoints = Endpoints(opts.endpoints)
    sockets = []

    for path in endpoints.unix:
        sockets.append(bind_unix_socket(path, mode=0o666))

    if not use_reuseport:
        for endpoint in endpoints.tcp:
            # We have to bind before fork to distribute sockets to our forks
            socks = bind_sockets(endpoint.port, address=endpoint.host)
            sockets.extend(socks)

    if opts.enableutil:
        utilsockets = bind_sockets(opts.utilport, address=opts.utiladdress)

    try:
        if opts.count != 1:
            process.fork_processes(opts.count)

        enable_logging(opts)

        if opts.gcstats:
            enable_gc_stats()

        if use_reuseport:
            for endpoint in endpoints.tcp:
                # We have to bind before fork to distribute sockets to our forks
                socks = bind_sockets(endpoint.port,
                                     address=endpoint.host,
                                     reuse_port=True)
                sockets.extend(socks)

        proxy = CocaineProxy(
            locators=opts.locators,
            cache=opts.cache,
            request_id_header=opts.request_header,
            sticky_header=opts.sticky_header,
            forcegen_request_header=opts.forcegen_request_header,
            default_tracing_chance=opts.tracing_chance,
            srw_config=srw_config,
            allow_json_rpc=opts.allow_json_rpc,
            client_id=opts.client_id,
            client_secret=opts.client_secret,
            mapped_headers=opts.mapped_headers)
        server = HTTPServer(proxy)
        server.add_sockets(sockets)

        if opts.enableutil:
            utilsrv = HTTPServer(UtilServer(proxy=proxy))
            utilsrv.add_sockets(utilsockets)

        tornado.ioloop.IOLoop.current().start()
    except KeyboardInterrupt:
        pass
Beispiel #54
0
def main():

    parser = argparse.ArgumentParser(
        'SEASaw - A Search Engine For Video Content')
    parser.add_argument("--gca_credentials_path",
                        action="store",
                        default=None,
                        dest="gca_credentials_path")
    parser.add_argument("--database_password",
                        action="store",
                        default=None,
                        dest="database_password")
    parser.add_argument("--imgur_password",
                        action="store",
                        default=None,
                        dest="imgur_password")
    parser.add_argument(
        "-s", action="store_true",
        dest="run_scraper")  # will not run on linux box, due to dependencies
    parser.add_argument("-l", action="store_true",
                        dest="local")  # local env and linserv have differences
    args = parser.parse_args()

    if args.local:
        inventory.set_local()
    else:
        inventory.set_linserv()

    if (args.gca_credentials_path is None) or (args.database_password is None):
        print(
            "start - Missing credential path or database password, datastore will not be loaded"
        )
    else:
        proxy.start(args.gca_credentials_path)
        dao.init(args.database_password)

    # spin up component APIs
    process_id = process.fork_processes(len(inventory.ports), max_restarts=100)

    if process_id is 0:
        if args.run_scraper:
            print("start - initiating scraper")
            # youtube scraper
            for i in range(0, 5):
                term_index = randint(0, len(inventory.search_terms) - 1)
                term = inventory.search_terms[term_index]
                print("start - setting scraper to find 10 results for " + term)
                scraper.start(term, 10)
                print("start - resting scraper")
                time.sleep(3600)
            print("start - scraper finished")
    elif process_id is 1:
        # imgur uploader
        if args.imgur_password is None or args.run_scraper is False:
            print("start - imgur uploader not running")
        else:
            print("start - imgur uploader running")
            while True:
                result = imguruploader.start(args.imgur_password)
                if result is 1:
                    print(
                        "start - rate limit exceeded, imgur component will pause for a while"
                    )
                    time.sleep(120)
                elif result is 2:
                    print(
                        "start - nothing for imgur uploader to do, imgur component will pause for a while"
                    )
                    time.sleep(120)
    elif process_id is 2:
        # database uploader
        if args.run_scraper:
            print("start - initiating database uploader")
            while True:
                datasourceuploader.start()
                time.sleep(30)
    elif process_id is 3 or process_id is 4:
        # datasource api

        if args.local:
            instance = Application([(r"/healthcheck", HealthCheckHandler),
                                    (r"/results/(.*)", ResultGetterHandler),
                                    (r"/results", ResultQueryHandler),
                                    (r"/(.*)", StaticFileHandler, {
                                        "path":
                                        "static/apidocs/datasource_local/",
                                        "default_filename": "index.html"
                                    })])
        else:
            instance = Application([(r"/healthcheck", HealthCheckHandler),
                                    (r"/results/(.*)", ResultGetterHandler),
                                    (r"/results", ResultQueryHandler),
                                    (r"/(.*)", StaticFileHandler, {
                                        "path": "static/apidocs/datasource/",
                                        "default_filename": "index.html"
                                    })])

        port = inventory.ports[process_id - 3]
        instance.listen(port)

        print("start - Data Source Interface listening on port " + str(port))

    else:
        # Index server threads
        port = inventory.ports[process_id]
        if (port == 25285):
            if (args.local):  #vagrant
                instance = Application(
                    [(r'/search', SearchHandler),
                     (r'/(.*)', IndexDotHTMLAwareStaticFileHandler,
                      dict(path=SETTINGS['static_path']))], **SETTINGS)
                print("start - Frontend Interface listening on port " +
                      str(port))
                instance.listen(port)
            else:  #linserv
                instance = Application(
                    [(r'/search', SearchHandlerL),
                     (r'/(.*)', IndexDotHTMLAwareStaticFileHandlerL,
                      dict(path=SETTINGS['static_path']))], **SETTINGS)
                print("start - Frontend Interface listening on port " +
                      str(port))
                instance.listen(port)

    IOLoop.current().start()
Beispiel #55
0
class ReqHandler(RequestHandler):
    def initialize(self, db):
        self.db = db

    def get(self, query_string):
        r = self.db.search(query_string)
        self.write({'size': len(r), 'entries': r})


def make_app(db):
    return Application([('/s/(.+)', ReqHandler, dict(db=db))])


def load_config(config_file="config.yml"):
    with open(config_file, 'r') as config:
        return yaml.load(config)


if __name__ == '__main__':
    cfg = load_config()
    db = init_db(cfg['database']['DATA_SOURCE'])
    app = make_app(db)

    sockets = bind_sockets(cfg['server']['PORT'])
    fork_processes(0)
    server = HTTPServer(app)
    server.add_sockets(sockets)

    logging.info("Server are listening in %d port ", cfg['server']['PORT'])
    IOLoop.current().start()
Beispiel #56
0
    @coroutine
    def execute(self, fut):
        conn = pymysql.connect(**CONFIG_CONNECT)
        cursor = conn.cursor()
        cursor.execute('select * from lbe_manager.ec2_and_user')
        try:
            result = yield cursor.fetchall()
        except:
            result = []
        fut.set_result(json.dumps(result))

    @coroutine
    def get(self, *args, **kwargs):
        future = yield self.new_handle(self.execute)
        print('-' * 10, future)
        self.finish('hello world')

    def post(self, *args, **kwargs):

        pass


if __name__ == '__main__':
    app = Application([(r'/handle', BaseHandle)])
    sockets = netutil.bind_sockets(6667, address='0.0.0.0', reuse_port=True)
    process.fork_processes(os.cpu_count())
    server = httpserver.HTTPServer(app)
    server.add_sockets(sockets)
    print('测试开始')
    ioloop.IOLoop.current().start()