예제 #1
0
    def f(c, a, b):
        e = Executor((c.ip, c.port), start=False)
        IOLoop.current().spawn_callback(e._go)

        L = e.map(inc, range(5), workers={a.ip})
        yield _wait(L)

        assert set(a.data) == {x.key for x in L}
        assert not b.data
        for x in L:
            assert e.restrictions[x.key] == {a.ip}

        L = e.map(inc, [10, 11, 12], workers=[{a.ip},
                                              {a.ip, b.ip},
                                              {b.ip}])
        yield _wait(L)

        assert e.restrictions[L[0].key] == {a.ip}
        assert e.restrictions[L[1].key] == {a.ip, b.ip}
        assert e.restrictions[L[2].key] == {b.ip}

        with pytest.raises(ValueError):
            e.map(inc, [10, 11, 12], workers=[{a.ip}])

        yield e._shutdown()
예제 #2
0
    def stop_single_user(self, user):
        if user.stop_pending:
            raise RuntimeError("Stop already pending for: %s" % user.name)
        tic = IOLoop.current().time()
        yield self.proxy.delete_user(user)
        f = user.stop()
        @gen.coroutine
        def finish_stop(f=None):
            """Finish the stop action by noticing that the user is stopped.

            If the spawner is slow to stop, this is passed as an async callback,
            otherwise it is called immediately.
            """
            if f and f.exception() is not None:
                # failed, don't do anything
                return
            toc = IOLoop.current().time()
            self.log.info("User %s server took %.3f seconds to stop", user.name, toc-tic)

        try:
            yield gen.with_timeout(timedelta(seconds=self.slow_stop_timeout), f)
        except gen.TimeoutError:
            if user.stop_pending:
                # hit timeout, but stop is still pending
                self.log.warning("User %s server is slow to stop", user.name)
                # schedule finish for when the server finishes stopping
                IOLoop.current().add_future(f, finish_stop)
            else:
                raise
        else:
            yield finish_stop()
예제 #3
0
 def run(self):
     """Run server which returns an available server port where code
     can be executed.
     """
     # We start the code servers here to ensure they are run as nobody.
     self._start_code_servers()
     IOLoop.current().start()
예제 #4
0
def test_errors_dont_block():
    c = Center('127.0.0.1', 8017)
    w = Worker('127.0.0.2', 8018, c.ip, c.port, ncores=1)
    e = Executor((c.ip, c.port), start=False)
    @gen.coroutine
    def f():
        c.listen(c.port)
        yield w._start()
        IOLoop.current().spawn_callback(e._go)

        L = [e.submit(inc, 1),
             e.submit(throws, 1),
             e.submit(inc, 2),
             e.submit(throws, 2)]

        i = 0
        while not (L[0].status == L[2].status == 'finished'):
            i += 1
            if i == 1000:
                assert False
            yield gen.sleep(0.01)
        result = yield e._gather([L[0], L[2]])
        assert result == [2, 3]

        yield w._close()
        c.stop()

    IOLoop.current().run_sync(f)
예제 #5
0
    def send_request(self, request):
        """Send the given request and response is required.

        Use this for messages which have a response message.

        :param request:
            request to send
        :returns:
            A Future containing the response for the request
        """
        assert self._loop_running, "Perform a handshake first."

        assert request.id not in self._outstanding, (
            "Message ID '%d' already being used" % request.id
        )

        future = tornado.gen.Future()
        self._outstanding[request.id] = future
        self.stream_request(request)

        if request.ttl:
            self._add_timeout(request, future)

        # the actual future that caller will yield
        response_future = tornado.gen.Future()
        # TODO: fire before_receive_response

        IOLoop.current().add_future(
            future,
            lambda f: self.adapt_result(f, request, response_future),
        )
        return response_future
예제 #6
0
def main() -> None:
    '''Runs server'''

    # Parse options
    define('production',
               default = False,
               help = 'run in production mode',
               type = bool)
    options.parse_command_line()

    # Set server name
    pname = settings.process_name if settings.process_name else None
    if pname:
        setproctitle(pname)

    # Register IRC server
    server = IRCServer(settings = ircdsettings)
    for address, port in ircdsettings['listen']:
        server.listen(port, address = address)

    # Start profiling
    if settings.profiling:
        import yappi
        yappi.start()

    # Setup autoreload
    autoreload.start()

    # Run application
    IOLoop.instance().start()
예제 #7
0
파일: handler.py 프로젝트: 1-Hash/Toto
  def respond(self, result=None, error=None, batch_results=None, allow_async=True):
    '''Respond to the request with the given result or error object (the ``batch_results`` and
    ``allow_async`` parameters are for internal use only and not intended to be supplied manually).
    Responses will be serialized according to the ``response_type`` propery. The default
    serialization is "application/json". Other supported protocols are:

    * application/bson - requires pymongo
    * application/msgpack - requires msgpack-python

    The response will also contain any available session information.

    To help with error handling in asynchronous methods, calling ``handler.respond(error=<your_error>)`` with a caught
    exception will trigger a normal Toto error response, log the error and finish the request. This is the same basic
    flow that is used internally when exceptions are raised from synchronous method calls.

    The "error" property of the response is derived from the ``error`` parameter in the following ways:

    1. If ``error`` is an instance of ``TotoException``, "error" will be a dictionary with "value" and "code" keys matching those of the ``TotoException``.
    2. In all other cases, ``error`` is first converted to a ``TotoException`` with ``code = <ERROR_SERVER>`` and ``value = str(error)`` before following (1.).

    To send custom error information, pass an instance of ``TotoException`` with ``value = <some_json_serializable_object>``.
    '''
    #if the handler is processing an async method, schedule the response on the main runloop
    if self.async and allow_async:
      IOLoop.instance().add_callback(lambda: self.respond(result, error, batch_results, False))
      return
예제 #8
0
def _go(args):
    app = create_app(args)
    port = app.app.config['PORT']
    app.setup()

    mode = app.app.config['SERVER']
    if args.mode == 'dev':
        mode = 'development'
        app.app.config['DEBUG'] = True
    elif args.mode == 'prd':
        mode = 'production'

    if mode == 'development':
        print("Starting development server on port %d..." % port)
        app.app.run(port=port)
    elif mode == 'production':
        appl = WSGIContainer(app.app)
        if 'SSL_KEY' in app.app.config:
            http_server = HTTPServer(appl, ssl_options={
                "certfile": app.app.config['SSL_CRT'],
                "keyfile": app.app.config['SSL_KEY'],
            })
        else:
            http_server = HTTPServer(appl)
        http_server.listen(port)
        print("Starting production server on port %d..." % port)
        IOLoop.instance().start()
    else:
        sys.stderr.write("Invalid SERVER setting '%s', aborting.\n" % args.mode)
        sys.exit(1)
예제 #9
0
    def on_pong(self, data):
        """Clear the timeout, sleep, and send a new ping.

        .. todo::
            *   Document the times used in this method.
                The calculations are in my black notebook
                XD.
        """
        try:
            if self.ping_timeout_handle is not None:
                IOLoop.current().remove_timeout(
                    self.ping_timeout_handle)

            yield sleep(conf.ping_sleep)

            self.ping(b'1')
            self.ping_timeout_handle = \
                IOLoop.current().call_later(
                    conf.ping_timeout, self.close)

        except WebSocketClosedError:
            pass

        except:
            raise
예제 #10
0
파일: pool.py 프로젝트: sansa1839/mongotor
    def connection(self, callback):
        """Get a connection from pool

        :Parameters:
          - `callback` : method which will be called when connection is ready

        """
        self._condition.acquire()

        try:
            conn = self._idle_connections.pop(0)

        except IndexError:
            if self._maxconnections and self._connections >= self._maxconnections:

                retry_connection = partial(self.connection, callback)
                IOLoop.instance().add_callback(retry_connection)

                return

            conn = self._create_connection()
            self._connections += 1

        finally:
            self._condition.release()

        return callback(conn)
예제 #11
0
def test_send_email_single_blacklisted_domain(smtp_sendmail, options):
    options = add_options(options)

    func = partial(accounts.utils.send_email, '*****@*****.**', 'test subject', 'test message')
    IOLoop.instance().run_sync(func)

    assert not smtp_sendmail.called
예제 #12
0
def main():
    numProcs = inventory.NUM_INDEX_SHARDS + inventory.NUM_DOC_SHARDS + 1
    taskID = process.fork_processes(numProcs, max_restarts=0)
    port = inventory.BASE_PORT + taskID
    if taskID == 0:
        app = httpserver.HTTPServer(tornado.web.Application([
                (r"/search", Web),
                (r"/upload", UploadHandler),
                (r"/(.*)", IndexDotHTMLAwareStaticFileHandler, dict(path=SETTINGS['static_path']))
            ], **SETTINGS))
        logging.info("Front end is listening on " + str(port))
    else:       
        if taskID <= inventory.NUM_INDEX_SHARDS:
            shardIx = taskID - 1
            #data = pickle.load(open("data/index%d.pkl" % (shardIx), "r"))
            inverted_path = os.path.join(os.getcwd(),"../assignment5/df_jobs/%d.out"  % (shardIx))
            logging.info("Inverted file path: %s" % inverted_path)
            data = pickle.load(open(inverted_path ,'r'))
            idf_path = os.path.join(os.getcwd(), "../assignment5/idf_jobs/0.out")
            logIDF = pickle.load(open(idf_path,'r'))
            app = httpserver.HTTPServer(web.Application([(r"/index", index.Index, dict(data=data, logIDF=logIDF))]))
            logging.info("Index shard %d listening on %d" % (shardIx, port))
        else:
            shardIx = taskID - inventory.NUM_INDEX_SHARDS - 1
            #data = pickle.load(open("data/doc%d.pkl" % (shardIx), "r"))
            doc_path = os.path.join(os.getcwd(),"../assignment5/i_df_jobs/%d.out" % (shardIx))
            logging.info("Doc Server path %s" % doc_path)
            data = pickle.load(open(doc_path, "r"))
            app = httpserver.HTTPServer(web.Application([(r"/doc", doc.Doc, dict(data=data))]))
            logging.info("Doc shard %d listening on %d" % (shardIx, port))
    app.add_sockets(netutil.bind_sockets(port))
    IOLoop.current().start()
예제 #13
0
 def tearDown(self):
     self.http_server.stop()
     self.io_loop.run_sync(self.http_server.close_all_connections)
     if (not IOLoop.initialized() or
             self.http_client.io_loop is not IOLoop.instance()):
         self.http_client.close()
     super(AsyncHTTPTestCase, self).tearDown()
예제 #14
0
    def wrapper(*args, **kwargs):
        runner = None
        future = TracebackFuture()

        if 'callback' in kwargs:
            callback = kwargs.pop('callback')
            IOLoop.current().add_future(
                future, lambda future: callback(future.result()))

        def handle_exception(typ, value, tb):
            try:
                if runner is not None and runner.handle_exception(typ, value, tb):
                    return True
            except Exception:
                typ, value, tb = sys.exc_info()
            future.set_exc_info((typ, value, tb))
            return True
        with ExceptionStackContext(handle_exception):
            try:
                result = func(*args, **kwargs)
            except (Return, StopIteration) as e:
                result = getattr(e, 'value', None)
            except Exception:
                future.set_exc_info(sys.exc_info())
                return future
            else:
                if isinstance(result, types.GeneratorType):
                    def final_callback(value):
                        future.set_result(value)
                    runner = Runner(result, final_callback)
                    runner.run()
                    return future
            future.set_result(result)
        return future
예제 #15
0
파일: server.py 프로젝트: AxTheB/OctoPrint
	def run(self):
		# Global as I can't work out a way to get it into PrinterStateConnection
		global printer
		global gcodeManager

		from tornado.wsgi import WSGIContainer
		from tornado.httpserver import HTTPServer
		from tornado.ioloop import IOLoop
		from tornado.web import Application, FallbackHandler

		# first initialize the settings singleton and make sure it uses given configfile and basedir if available
		self._initSettings(self._configfile, self._basedir)

		# then initialize logging
		self._initLogging(self._debug)

		gcodeManager = gcodefiles.GcodeManager()
		printer = Printer(gcodeManager)

		if self._host is None:
			self._host = settings().get(["server", "host"])
		if self._port is None:
			self._port = settings().getInt(["server", "port"])

		logging.getLogger(__name__).info("Listening on http://%s:%d" % (self._host, self._port))
		app.debug = self._debug

		self._router = tornadio2.TornadioRouter(PrinterStateConnection)

		self._tornado_app = Application(self._router.urls + [
			(".*", FallbackHandler, {"fallback": WSGIContainer(app)})
		])
		self._server = HTTPServer(self._tornado_app)
		self._server.listen(self._port, address=self._host)
		IOLoop.instance().start()
예제 #16
0
 def wrapper(self, *args, **kwargs):
     callback = kwargs.pop("callback", None)
     future = thread_resolver.executor.submit(fn, self, *args, **kwargs)
     if callback:
         IOLoop.current().add_future(future,
                                 lambda future: callback(future.result()))
     return future
예제 #17
0
 def save(self, data):
     if data != '':
         self.fd.write(data)
     else:
         self.fd.close()
         print "File is saved to " + self.store_path
         IOLoop.instance().stop()
예제 #18
0
    def test_close_file_object(self):
        """When a file object is used instead of a numeric file descriptor,
        the object should be closed (by IOLoop.close(all_fds=True),
        not just the fd.
        """
        # Use a socket since they are supported by IOLoop on all platforms.
        # Unfortunately, sockets don't support the .closed attribute for
        # inspecting their close status, so we must use a wrapper.
        class SocketWrapper(object):
            def __init__(self, sockobj):
                self.sockobj = sockobj
                self.closed = False

            def fileno(self):
                return self.sockobj.fileno()

            def close(self):
                self.closed = True
                self.sockobj.close()
        sockobj, port = bind_unused_port()
        socket_wrapper = SocketWrapper(sockobj)
        io_loop = IOLoop()
        io_loop.add_handler(socket_wrapper, lambda fd, events: None,
                            IOLoop.READ)
        io_loop.close(all_fds=True)
        self.assertTrue(socket_wrapper.closed)
예제 #19
0
파일: hermes.py 프로젝트: madytyoo/appscale
def main():
  """ Main. """

  logging_level = logging.INFO
  if hermes_constants.DEBUG:
    logging_level = logging.DEBUG
  logging.getLogger().setLevel(logging_level)

  signal.signal(signal.SIGTERM, signal_handler)
  signal.signal(signal.SIGINT, signal_handler)

  parse_command_line()

  app = tornado.web.Application([
    (MainHandler.PATH, MainHandler),
    (TaskHandler.PATH, TaskHandler),
  ], debug=False)

  try:
    app.listen(options.port)
  except socket.error:
    logging.error("ERROR on Hermes initialization: Port {0} already in use.".
      format(options.port))
    shutdown()
    return

  logging.info("Hermes is up and listening on port: {0}.".
    format(options.port))

  # Start loop for accepting http requests.
  IOLoop.instance().start()
예제 #20
0
파일: oauth.py 프로젝트: astore/pluss
	def on_refresh_complete(cls, response, id, callback):
		"""Callback for request to get a new access token based on refresh token."""

		if response.code in (400, 401):

			if 'invalid_grant' in response.body:
				# Our refresh token is invalid, which means that we don't have
				# permission to access this user's content anymore. Forget them.
				Cache.delete(cls.auth_cache_key_template % id)
				Cache.delete(cls.profile_cache_key_template % id)
				TokenIdMapping.remove_id(id)
				logging.error("Access was revoked for %s; cached data deleted.", id)

			logging.error("HTTP %s while trying to refresh access token for %s.", response.code, id)
			return IOLoop.instance().add_callback(lambda: callback(None))

		elif response.code != 200:
			logging.error("Non-200 response to refresh token request (%s, id=%s): %r" % (response.code, id, response.body))
			return IOLoop.instance().add_callback(lambda: callback(None))

		results = json.loads(response.body)

		# sanity check
		if results['token_type'] != "Bearer":
			logging.error('Unknown token type received: %s' % results['token_type'])
			return IOLoop.instance().add_callback(lambda: callback(None))

		token = results['access_token']
		Cache.set(cls.auth_cache_key_template % id, token, time=results['expires_in'])

		IOLoop.instance().add_callback(lambda: callback(token))
예제 #21
0
def main():
    root_dir = os.path.abspath(os.path.split(__file__)[0])
    print(root_dir)
    app = Application([(r'/gfxtablet', GfxTabletHandler),
                       #(r'/(index.js|src/.*\.js|node_modules/.*\.js)', StaticFileHandler, {}),
                       (r'/', MainHandler)],
                      debug=config.get('DEBUG', False), static_path=root_dir, static_url_prefix='/static/')

    _logger.info("app.settings:\n%s" % '\n'.join(['%s: %s' % (k, str(v))
                                                  for k, v in sorted(app.settings.items(),
                                                                     key=itemgetter(0))]))

    port = config.get('PORT', 5000)

    app.listen(port)
    _logger.info("listening on port %d" % port)
    _logger.info("press CTRL-C to terminate the server")
    _logger.info("""
           -----------
        G f x T a b l e t
    *************************
*********************************
STARTING TORNADO APP!!!!!!!!!!!!!
*********************************
    *************************
        G f x T a b l e t
           -----------
""")
    IOLoop.instance().start()
예제 #22
0
파일: server.py 프로젝트: IIHE/pyglidein
def main():
    parser = OptionParser()
    parser.add_option('-p', '--port', type='int', default=11001,
                      help='Port to serve from (default: 11001)')
    parser.add_option('-u', '--user', type='string', default=None,
                      help='Only track a single user')
    parser.add_option('--constraint', type='string', default=None,
                      help='HTCondor constraint expression')
    parser.add_option('--delay', type='int', default=300,
                      help='delay between calls to condor_q (default: 300 seconds)')
    parser.add_option('--debug', action='store_true', default=False,
                      help='Enable debug logging')
    (options, args) = parser.parse_args()

    if options.debug:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)

    if options.delay < 0 or options.delay > 1000:
        raise Exception('delay out of range')

    cfg = {'options':options, 'condor_q':False, 'state':[], 'monitoring':{}}

    # load condor_q
    IOLoop.instance().call_later(5, partial(condor_q_helper, cfg))

    # setup server
    s = server(cfg)
    s.start()
예제 #23
0
파일: le.py 프로젝트: jcsy521/ydws
 def _on_finish():
     try:
         zs_response = self.zsle_request(data['sim'])
         zlp = ZsLeParser(zs_response.replace('GBK', 'UTF-8'))
         if zlp.success == "0":
              ret.success = ErrorCode.SUCCESS
              ret.position = zlp.get_position()
              ret.info = ErrorCode.ERROR_MESSAGE[ret.success]
              logging.info("[LE] Zsle response position: %s, sim:%s", ret.position, data['sim'])
         else:
             if zlp.success == "9999228":
                 callback = partial(self.re_subscription, data['sim'])
                 IOLoop.instance().add_timeout(int(time.time()) + 5, callback)
             logging.info("[LE] Zsle request failed, errorcode: %s, info: %s, sim:%s",
                          zlp.success, zlp.info, data['sim'])
             # logging.info('[LE] Google request:\n %s', request)
             # response = self.send(ConfHelper.LBMP_CONF.le_host, 
             #                      ConfHelper.LBMP_CONF.le_url, 
             #                      request,
             #                      HTTP.METHOD.POST)
             # logging.info('[LE] Google response:\n %s', response.decode('utf8'))
             # json_data = json_decode(response)
             # if json_data.get("location"):
             #     ret.position.lat = int(json_data["location"]["latitude"] * 3600000)
             #     ret.position.lon = int(json_data["location"]["longitude"] * 3600000)
             #     ret.success = ErrorCode.SUCCESS 
             #     ret.info = ErrorCode.ERROR_MESSAGE[ret.success]
     except Exception as e:
         logging.exception("[LE] Get latlon failed. Exception: %s, sim:%s", e.args, data['sim'])
     self.write(ret)
     IOLoop.instance().add_callback(self.finish)
예제 #24
0
def main():
    '''Create server, begin IOLoop 
    '''
    tornado.options.parse_command_line()
    http_server = HTTPServer(Application(), xheaders=True)
    http_server.listen(options.port)
    IOLoop.instance().start()
예제 #25
0
    def test_stepdown_triggers_refresh(self, done):
        c_find_one = motor.MotorReplicaSetClient(
            self.seed, replicaSet=self.name).open_sync()

        # We've started the primary and one secondary
        primary = ha_tools.get_primary()
        secondary = ha_tools.get_secondaries()[0]
        self.assertEqual(
            one(c_find_one.secondaries), _partition_node(secondary))

        ha_tools.stepdown_primary()

        # Make sure the stepdown completes
        yield gen.Task(IOLoop.instance().add_timeout, time.time() + 1)

        # Trigger a refresh
        yield AssertRaises(AutoReconnect, c_find_one.test.test.find_one)

        # Wait for the immediate refresh to complete - we're not waiting for
        # the periodic refresh, which has been disabled
        yield gen.Task(IOLoop.instance().add_timeout, time.time() + 1)

        # We've detected the stepdown
        self.assertTrue(
            not c_find_one.primary
            or primary != _partition_node(c_find_one.primary))

        done()
예제 #26
0
    def test_set_not_none_trace_equal_trace_id(self):
        new_trace = Trace(300, 20, 10)

        IOLoop.current().run_sync(partial(self.tx.dummy, trace=new_trace))
        # Keep old trace_id, keep old logger
        assert self.tx.trace_id is not new_trace.traceid
        assert self.tx.log is self.initial_log
예제 #27
0
파일: Server.py 프로젝트: irr/stock-labs
def main():
    global http_server

    try:
        signal(SIGTERM, on_signal)

        parse_command_line()
        if options.config != None:
            parse_config_file(options.config)

        path = join(dirname(__file__), "templates")

        application = Application(
            [(r"/", IndexHandler), (r"/stock", StockHandler)],
            template_path=path,
            static_path=join(dirname(__file__), "static"),
        )

        application.db = motor.MotorClient(options.db_host, options.db_port).open_sync()[options.db_name]

        http_server = HTTPServer(application)
        http_server.listen(options.port, options.address)
        log().info("server listening on port %s:%d" % (options.address, options.port))
        if log().isEnabledFor(DEBUG):
            log().debug("autoreload enabled")
            tornado.autoreload.start()
        IOLoop.instance().start()

    except KeyboardInterrupt:
        log().info("exiting...")

    except BaseException as ex:
        log().error("exiting due: [%s][%s]" % (str(ex), str(format_exc().splitlines())))
        exit(1)
예제 #28
0
파일: app.py 프로젝트: ugoano/sbchatter
def main():
	define('listen', metavar='IP', default='127.0.0.1', help='listen on IP address (default 127.0.0.1)')
	define('port', metavar='PORT', default=8888, type=int, help='listen on PORT (default 8888)')
	define('debug', metavar='True|False', default=False, type=bool, 
			help='enable Tornado debug mode: templates will not be cached '
			'and the app will watch for changes to its source files '
			'and reload itself when anything changes')

	options.parse_command_line()

	settings = dict(
			template_path=rel('templates'),
			static_path=rel('static'),
			debug=options.debug
			)

	application = Application([
		(r'/', MainHandler),
		(r'/ws', EchoWebSocket),
		(r'/websocket', SignallingHandler),
		(r'/webrtc', WebRTCHandler)
		], **settings)

	#application.listen(address=options.listen, port=options.port)
	application.listen(7080)
	IOLoop.instance().start()
예제 #29
0
 def test_set_not_none_trace(self):
     new_trace_id = 100
     new_trace = Trace(new_trace_id, 2, 1)
     IOLoop.current().run_sync(partial(self.tx.dummy, trace=new_trace))
     # Set new trace_id, set new logger adapter
     assert self.tx.trace_id == new_trace_id
     assert self.tx.log.extra == {'trace_id': '{:016x}'.format(new_trace_id)}
예제 #30
0
파일: oauth.py 프로젝트: astore/pluss
	def access_token_for_id(cls, id, callback):
		"""Returns the access token for an id, acquiring a new one if necessary."""
		token = Cache.get(cls.auth_cache_key_template % id)
		if token:
			return IOLoop.instance().add_callback(lambda: callback(token))

		# If we don't have an access token cached, see if we have a refresh token
		token = TokenIdMapping.lookup_refresh_token(id)
		if token:
			post_body = urllib.urlencode({
				'client_id': Config.get('oauth', 'client-id'),
				'client_secret': Config.get('oauth', 'client-secret'),
				'refresh_token': token,
				'grant_type': 'refresh_token',
			})
			http_client = AsyncHTTPClient()
			return http_client.fetch(
				'https://accounts.google.com/o/oauth2/token',
				lambda response: cls.on_refresh_complete(response, id, callback),
				method='POST',
				body=post_body,
				request_timeout=20.0,
				connect_timeout=15.0,
			)
		else:
			logging.error("Unable to update access token for %s, no refresh token stored.", id)
			return IOLoop.instance().add_callback(lambda: callback(None))
예제 #31
0
 def f():
     self.current_io_loop = IOLoop.current()
     assert self.io_loop is not None
     self.io_loop.stop()
            if url is None:
                return
            try:
                await fetch_url(url)
            except Exception as e:
                print("Exception: %s %s" % (e, url))
                dead.add(url)
            finally:
                q.task_done()  # 对每一个get元素,紧接着调用task_done(),表示这个任务执行完毕

    await q.put(base_url)

    # Start workers, then wait for the work queue to be empty.
    workers = gen.multi([worker() for _ in range(concurrency)])  # 多个并发
    await q.join(
        timeout=timedelta(seconds=10)
    )  # Block until all items in the queue are processed with timeout
    assert fetching == (fetched | dead)
    print("Done in %d seconds, fetched %s URLs." %
          (time.time() - start, len(fetched)))
    print("Unable to fetch %s URLS." % len(dead))

    # Signal all the workers to exit.
    for _ in range(concurrency):
        await q.put(None)
    await workers


if __name__ == "__main__":
    io_loop = IOLoop.current()
    io_loop.run_sync(main)
예제 #33
0
    async def get(self, provider_prefix, _unescaped_spec):
        """Get a built image for a given spec and repo provider.

        Different repo providers will require different spec information. This
        function relies on the functionality of the tornado `GET` request.

        Parameters
        ----------
            provider_prefix : str
                the nickname for a repo provider (i.e. 'gh')
            spec:
                specifies information needed by the repo provider (i.e. user,
                repo, ref, etc.)

        """
        prefix = '/build/' + provider_prefix
        spec = self.get_spec_from_request(prefix)

        # set up for sending event streams
        self.set_header('content-type', 'text/event-stream')
        self.set_header('cache-control', 'no-cache')

        # Verify if the provider is valid for EventSource.
        # EventSource cannot handle HTTP errors, so we must validate and send
        # error messages on the eventsource.
        if provider_prefix not in self.settings['repo_providers']:
            await self.fail("No provider found for prefix %s" % provider_prefix
                            )
            return

        # create a heartbeat
        IOLoop.current().spawn_callback(self.keep_alive)

        spec = spec.rstrip("/")
        key = '%s:%s' % (provider_prefix, spec)

        # get a provider object that encapsulates the provider and the spec
        try:
            provider = self.get_provider(provider_prefix, spec=spec)
        except Exception as e:
            app_log.exception("Failed to get provider for %s", key)
            await self.fail(str(e))
            return

        if provider.is_banned():
            await self.emit({
                'phase':
                'failed',
                'message':
                'Sorry, {} has been temporarily disabled from launching. Please contact admins for more info!'
                .format(spec)
            })
            return

        repo_url = self.repo_url = provider.get_repo_url()

        # labels to apply to build/launch metrics
        self.repo_metric_labels = {
            'provider': provider.name,
            'repo': repo_url,
        }

        try:
            ref = await provider.get_resolved_ref()
        except Exception as e:
            await self.fail("Error resolving ref for %s: %s" % (key, e))
            return
        if ref is None:
            await self.fail(
                "Could not resolve ref for %s. Double check your URL." % key)
            return

        self.ref_url = await provider.get_resolved_ref_url()
        resolved_spec = await provider.get_resolved_spec()

        badge_base_url = self.get_badge_base_url()
        self.binder_launch_host = badge_base_url or '{proto}://{host}{base_url}'.format(
            proto=self.request.protocol,
            host=self.request.host,
            base_url=self.settings['base_url'],
        )
        # These are relative URLs so do not have a leading /
        self.binder_request = 'v2/{provider}/{spec}'.format(
            provider=provider_prefix,
            spec=spec,
        )
        self.binder_persistent_request = 'v2/{provider}/{spec}'.format(
            provider=provider_prefix,
            spec=resolved_spec,
        )

        # generate a complete build name (for GitHub: `build-{user}-{repo}-{ref}`)

        image_prefix = self.settings['image_prefix']

        # Enforces max 255 characters before image
        safe_build_slug = self._safe_build_slug(provider.get_build_slug(),
                                                limit=255 - len(image_prefix))

        build_name = self._generate_build_name(provider.get_build_slug(),
                                               ref,
                                               prefix='build-')

        image_name = self.image_name = '{prefix}{build_slug}:{ref}'.format(
            prefix=image_prefix, build_slug=safe_build_slug,
            ref=ref).replace('_', '-').lower()

        if self.settings['use_registry']:
            for _ in range(3):
                try:
                    image_manifest = await self.registry.get_image_manifest(
                        *'/'.join(image_name.split('/')[-2:]).split(':', 1))
                    image_found = bool(image_manifest)
                    break
                except HTTPClientError:
                    app_log.exception(
                        "Tornado HTTP Timeout error: Failed to get image manifest for %s",
                        image_name)
                    image_found = False
        else:
            # Check if the image exists locally!
            # Assume we're running in single-node mode or all binder pods are assigned to the same node!
            docker_client = docker.from_env(version='auto')
            try:
                docker_client.images.get(image_name)
            except docker.errors.ImageNotFound:
                # image doesn't exist, so do a build!
                image_found = False
            else:
                image_found = True

        # Launch a notebook server if the image already is built
        kube = self.settings['kubernetes_client']

        if image_found:
            await self.emit({
                'phase': 'built',
                'imageName': image_name,
                'message': 'Found built image, launching...\n'
            })
            with LAUNCHES_INPROGRESS.track_inprogress():
                await self.launch(kube, provider)
            self.event_log.emit(
                'binderhub.jupyter.org/launch', 4, {
                    'provider':
                    provider.name,
                    'spec':
                    spec,
                    'ref':
                    ref,
                    'status':
                    'success',
                    'origin':
                    self.settings['normalized_origin'] if
                    self.settings['normalized_origin'] else self.request.host
                })
            return

        # Prepare to build
        q = Queue()

        if self.settings['use_registry']:
            push_secret = self.settings['push_secret']
        else:
            push_secret = None

        BuildClass = FakeBuild if self.settings.get('fake_build') else Build

        appendix = self.settings['appendix'].format(
            binder_url=self.binder_launch_host + self.binder_request,
            persistent_binder_url=self.binder_launch_host +
            self.binder_persistent_request,
            repo_url=repo_url,
            ref_url=self.ref_url,
        )

        self.build = build = BuildClass(
            q=q,
            api=kube,
            name=build_name,
            namespace=self.settings["build_namespace"],
            repo_url=repo_url,
            ref=ref,
            image_name=image_name,
            push_secret=push_secret,
            build_image=self.settings['build_image'],
            memory_limit=self.settings['build_memory_limit'],
            docker_host=self.settings['build_docker_host'],
            node_selector=self.settings['build_node_selector'],
            appendix=appendix,
            log_tail_lines=self.settings['log_tail_lines'],
            git_credentials=provider.git_credentials,
            sticky_builds=self.settings['sticky_builds'],
        )

        with BUILDS_INPROGRESS.track_inprogress():
            build_starttime = time.perf_counter()
            pool = self.settings['build_pool']
            # Start building
            submit_future = pool.submit(build.submit)
            # TODO: hook up actual error handling when this fails
            IOLoop.current().add_callback(lambda: submit_future)

            log_future = None

            # initial waiting event
            await self.emit({
                'phase': 'waiting',
                'message': 'Waiting for build to start...\n',
            })

            done = False
            failed = False
            while not done:
                progress = await q.get()

                # FIXME: If pod goes into an unrecoverable stage, such as ImagePullBackoff or
                # whatever, we should fail properly.
                if progress['kind'] == 'pod.phasechange':
                    if progress['payload'] == 'Pending':
                        # nothing to do, just waiting
                        continue
                    elif progress['payload'] == 'Deleted':
                        event = {
                            'phase': 'built',
                            'message': 'Built image, launching...\n',
                            'imageName': image_name,
                        }
                        done = True
                    elif progress['payload'] == 'Running':
                        # start capturing build logs once the pod is running
                        if log_future is None:
                            log_future = pool.submit(build.stream_logs)
                        continue
                    elif progress['payload'] == 'Succeeded':
                        # Do nothing, is ok!
                        continue
                    else:
                        # FIXME: message? debug?
                        event = {'phase': progress['payload']}
                elif progress['kind'] == 'log':
                    # We expect logs to be already JSON structured anyway
                    event = progress['payload']
                    payload = json.loads(event)
                    if payload.get('phase') in ('failure', 'failed'):
                        failed = True
                        BUILD_TIME.labels(
                            status='failure').observe(time.perf_counter() -
                                                      build_starttime)
                        BUILD_COUNT.labels(status='failure',
                                           **self.repo_metric_labels).inc()

                await self.emit(event)

        # Launch after building an image
        if not failed:
            BUILD_TIME.labels(status='success').observe(time.perf_counter() -
                                                        build_starttime)
            BUILD_COUNT.labels(status='success',
                               **self.repo_metric_labels).inc()
            with LAUNCHES_INPROGRESS.track_inprogress():
                await self.launch(kube, provider)
            self.event_log.emit(
                'binderhub.jupyter.org/launch', 4, {
                    'provider':
                    provider.name,
                    'spec':
                    spec,
                    'ref':
                    ref,
                    'status':
                    'success',
                    'origin':
                    self.settings['normalized_origin'] if
                    self.settings['normalized_origin'] else self.request.host
                })

        # Don't close the eventstream immediately.
        # (javascript) eventstream clients reconnect automatically on dropped connections,
        # so if the server closes the connection first,
        # the client will reconnect which starts a new build.
        # If we sleep here, that makes it more likely that a well-behaved
        # client will close its connection first.
        # The duration of this shouldn't matter because
        # well-behaved clients will close connections after they receive the launch event.
        await gen.sleep(60)
예제 #34
0
파일: main.py 프로젝트: maq-622674/python
# coding=utf-8

from tornado.web import RequestHandler,Application,gen
from tornado.ioloop import IOLoop

class IndexHandler(RequestHandler):
    @gen.coroutine
    def get(self,*args,**kwargs):
        self.write('hello')

app=Application([
    (r'^/$',IndexHandler)
])

app.listen(9994)

IOLoop.instance().start()
예제 #35
0
 def f():
     self.assertIs(IOLoop.current(), self.io_loop)
예제 #36
0
 def setUp(self):
     self.io_loop = IOLoop(make_current=False)
예제 #37
0
class TestIOLoopRunSync(unittest.TestCase):
    def setUp(self):
        self.io_loop = IOLoop(make_current=False)

    def tearDown(self):
        self.io_loop.close()

    def test_sync_result(self):
        with self.assertRaises(gen.BadYieldError):
            self.io_loop.run_sync(lambda: 42)

    def test_sync_exception(self):
        with self.assertRaises(ZeroDivisionError):
            self.io_loop.run_sync(lambda: 1 / 0)

    def test_async_result(self):
        @gen.coroutine
        def f():
            yield gen.moment
            raise gen.Return(42)

        self.assertEqual(self.io_loop.run_sync(f), 42)

    def test_async_exception(self):
        @gen.coroutine
        def f():
            yield gen.moment
            1 / 0

        with self.assertRaises(ZeroDivisionError):
            self.io_loop.run_sync(f)

    def test_current(self):
        def f():
            self.assertIs(IOLoop.current(), self.io_loop)

        self.io_loop.run_sync(f)

    def test_timeout(self):
        @gen.coroutine
        def f():
            yield gen.sleep(1)

        self.assertRaises(TimeoutError, self.io_loop.run_sync, f, timeout=0.01)

    def test_native_coroutine(self):
        @gen.coroutine
        def f1():
            yield gen.moment

        async def f2():
            await f1()

        self.io_loop.run_sync(f2)
예제 #38
0
 def f():
     for i in range(10):
         loop = IOLoop(make_current=False)
         loop.close()
예제 #39
0
def sig_handler(sig, frame):
    print('Caught signal: %s' % sig)
    IOLoop.instance().add_callback(shutdown)
예제 #40
0
 def setUp(self):
     setup_with_context_manager(self, ignore_deprecation())
     self.io_loop = None  # type: typing.Optional[IOLoop]
     IOLoop.clear_current()
예제 #41
0
        def test_func():
            IOLoop.clear_instance()
            loop = IOLoop()
            loop.make_current()

            s, workers = loop.run_sync(
                lambda: start_cluster(ncores, Worker=Worker))
            args = [s] + workers

            if executor:
                e = Executor((s.ip, s.port), loop=loop, start=False)
                loop.run_sync(e._start)
                args = [e] + args
            try:
                loop.run_sync(lambda: cor(*args), timeout=timeout)
            finally:
                if executor:
                    loop.run_sync(e._shutdown)
                loop.run_sync(lambda: end_cluster(s, workers))
                loop.stop()
                loop.close(all_fds=True)
예제 #42
0
파일: __init__.py 프로젝트: bula87/AstroBox
class Server():
    def __init__(self,
                 configfile=None,
                 basedir=None,
                 host="0.0.0.0",
                 port=5000,
                 debug=False,
                 allowRoot=False,
                 logConf=None):
        self._configfile = configfile
        self._basedir = basedir
        self._host = host
        self._port = port
        self._debug = debug
        self._allowRoot = allowRoot
        self._logConf = logConf
        self._ioLoop = None

    def stop(self):
        if self._ioLoop:
            self._ioLoop.stop()
            self._ioLoop = None

    def run(self):
        if not self._allowRoot:
            self._checkForRoot()

        global userManager
        global eventManager
        global loginManager
        global debug
        global softwareManager
        global discoveryManager
        global VERSION
        global UI_API_KEY

        from tornado.wsgi import WSGIContainer
        from tornado.httpserver import HTTPServer
        from tornado.ioloop import IOLoop
        from tornado.web import Application, FallbackHandler

        from astroprint.printfiles.watchdogs import UploadCleanupWatchdogHandler

        debug = self._debug

        # first initialize the settings singleton and make sure it uses given configfile and basedir if available
        self._initSettings(self._configfile, self._basedir)
        s = settings()

        if not s.getBoolean(['api', 'regenerate']) and s.getString(
            ['api', 'key']):
            UI_API_KEY = s.getString(['api', 'key'])
        else:
            UI_API_KEY = ''.join('%02X' % ord(z) for z in uuid.uuid4().bytes)

        # then initialize logging
        self._initLogging(self._debug, self._logConf)
        logger = logging.getLogger(__name__)

        if s.getBoolean(["accessControl", "enabled"]):
            userManagerName = s.get(["accessControl", "userManager"])
            try:
                clazz = util.getClass(userManagerName)
                userManager = clazz()
            except AttributeError, e:
                logger.exception(
                    "Could not instantiate user manager %s, will run with accessControl disabled!"
                    % userManagerName)

        softwareManager = swManager()
        VERSION = softwareManager.versionString

        logger.info("Starting AstroBox (%s) - Commit (%s)" %
                    (VERSION, softwareManager.commit))

        from astroprint.migration import migrateSettings
        migrateSettings()

        pluginManager().loadPlugins()

        eventManager = events.eventManager()
        printer = printerManager(printerProfileManager().data['driver'])

        #Start some of the managers here to make sure there are no thread collisions
        from astroprint.network.manager import networkManager
        from astroprint.boxrouter import boxrouterManager

        networkManager()
        boxrouterManager()

        # configure timelapse
        #octoprint.timelapse.configureTimelapse()

        app.wsgi_app = ReverseProxied(app.wsgi_app)

        app.secret_key = boxrouterManager().boxId
        loginManager = LoginManager()
        loginManager.session_protection = "strong"
        loginManager.user_callback = load_user
        if userManager is None:
            loginManager.anonymous_user = users.DummyUser
            principals.identity_loaders.appendleft(users.dummy_identity_loader)
        loginManager.init_app(app)

        # setup command triggers
        events.CommandTrigger(printer)
        if self._debug:
            events.DebugEventListener()

        if networkManager().isOnline():
            softwareManager.checkForcedUpdate()

        if self._host is None:
            self._host = s.get(["server", "host"])
        if self._port is None:
            self._port = s.getInt(["server", "port"])

        app.debug = self._debug

        from octoprint.server.api import api

        app.register_blueprint(api, url_prefix="/api")

        boxrouterManager(
        )  # Makes sure the singleton is created here. It doesn't need to be stored
        self._router = SockJSRouter(self._createSocketConnection, "/sockjs")

        discoveryManager = DiscoveryManager()

        externalDriveManager()

        def access_validation_factory(validator):
            """
			Creates an access validation wrapper using the supplied validator.

			:param validator: the access validator to use inside the validation wrapper
			:return: an access validation wrapper taking a request as parameter and performing the request validation
			"""
            def f(request):
                """
				Creates a custom wsgi and Flask request context in order to be able to process user information
				stored in the current session.

				:param request: The Tornado request for which to create the environment and context
				"""
                wsgi_environ = tornado.wsgi.WSGIContainer.environ(request)
                with app.request_context(wsgi_environ):
                    app.session_interface.open_session(app, request)
                    loginManager.reload_user()
                    validator(request)

            return f

        self._tornado_app = Application(self._router.urls + [
            #(r"/downloads/timelapse/([^/]*\.mpg)", LargeResponseHandler, {"path": s.getBaseFolder("timelapse"), "as_attachment": True}),
            (r"/downloads/files/local/([^/]*\.(gco|gcode))",
             LargeResponseHandler, {
                 "path": s.getBaseFolder("uploads"),
                 "as_attachment": True
             }),
            (r"/downloads/logs/([^/]*)", LargeResponseHandler, {
                "path": s.getBaseFolder("logs"),
                "as_attachment": True,
                "access_validation": access_validation_factory(admin_validator)
            }),
            #(r"/downloads/camera/current", UrlForwardHandler, {"url": s.get(["webcam", "snapshot"]), "as_attachment": True, "access_validation": access_validation_factory(user_validator)}),
            (r".*", FallbackHandler, {
                "fallback": WSGIContainer(app.wsgi_app)
            })
        ])
        self._server = HTTPServer(self._tornado_app,
                                  max_buffer_size=1048576 *
                                  s.getInt(['server', 'maxUploadSize']))
        self._server.listen(self._port, address=self._host)

        logger.info("Listening on http://%s:%d" % (self._host, self._port))

        eventManager.fire(events.Events.STARTUP)
        if s.getBoolean(["serial", "autoconnect"]):
            (port, baudrate) = s.get(["serial", "port"
                                      ]), s.getInt(["serial", "baudrate"])
            connectionOptions = printer.getConnectionOptions()
            if port in connectionOptions["ports"]:
                t = threading.Thread(target=printer.connect,
                                     args=(port, baudrate))
                t.daemon = True
                t.start()

        # start up watchdogs
        observer = Observer()
        observer.daemon = True
        observer.schedule(UploadCleanupWatchdogHandler(),
                          s.getBaseFolder("uploads"))
        observer.start()

        #Load additional Tasks
        additionalTasksManager()

        #Load maintenance menu
        maintenanceMenuManager()

        try:
            self._ioLoop = IOLoop.instance()
            self._ioLoop.start()

        except SystemExit:
            pass

        except:
            logger.fatal(
                "Please report this including the stacktrace below in AstroPrint's bugtracker. Thanks!"
            )
            logger.exception("Stacktrace follows:")

        finally:
            observer.stop()
            self.cleanup()
            logger.info('Cleanup complete')

        observer.join(1.0)
        logger.info('Good Bye!')
예제 #43
0
def cluster(nworkers=2, nanny=False):
    if nanny:
        _run_worker = run_nanny
    else:
        _run_worker = run_worker
    scheduler_q = Queue()
    scheduler = Process(target=run_scheduler, args=(scheduler_q, ))
    scheduler.daemon = True
    scheduler.start()
    sport = scheduler_q.get()

    workers = []
    for i in range(nworkers):
        q = Queue()
        fn = '_test_worker-%s' % uuid.uuid1()
        proc = Process(target=_run_worker,
                       args=(q, sport),
                       kwargs={
                           'ncores': 1,
                           'local_dir': fn
                       })
        workers.append({'proc': proc, 'queue': q, 'dir': fn})

    for worker in workers:
        worker['proc'].start()

    for worker in workers:
        worker['port'] = worker['queue'].get()

    loop = IOLoop()
    s = rpc(ip='127.0.0.1', port=sport)
    start = time()
    try:
        while True:
            ncores = loop.run_sync(s.ncores)
            if len(ncores) == nworkers:
                break
            if time() - start > 5:
                raise Exception("Timeout on cluster creation")

        yield {'proc': scheduler, 'port': sport}, workers
    finally:
        logger.debug("Closing out test cluster")
        with ignoring(socket.error, TimeoutError, StreamClosedError):
            loop.run_sync(lambda: disconnect('127.0.0.1', sport), timeout=0.5)
        scheduler.terminate()
        scheduler.join(timeout=2)

        for port in [w['port'] for w in workers]:
            with ignoring(socket.error, TimeoutError, StreamClosedError):
                loop.run_sync(lambda: disconnect('127.0.0.1', port),
                              timeout=0.5)
        for proc in [w['proc'] for w in workers]:
            with ignoring(Exception):
                proc.terminate()
                proc.join(timeout=2)
        for q in [w['queue'] for w in workers]:
            q.close()
        for fn in glob('_test_worker-*'):
            shutil.rmtree(fn)
        loop.close(all_fds=True)
예제 #44
0
def run_scheduler(q, center_port=None, **kwargs):
    from distributed import Scheduler
    from tornado.ioloop import IOLoop, PeriodicCallback
    import logging
    IOLoop.clear_instance()
    loop = IOLoop()
    loop.make_current()
    PeriodicCallback(lambda: None, 500).start()
    logging.getLogger("tornado").setLevel(logging.CRITICAL)

    center = ('127.0.0.1', center_port) if center_port else None
    scheduler = Scheduler(center=center, **kwargs)
    scheduler.listen(0)

    if center_port:
        loop.run_sync(scheduler.sync_center)
    done = scheduler.start(0)

    q.put(scheduler.port)
    try:
        loop.start()
    finally:
        loop.close(all_fds=True)
예제 #45
0
 def daemonize(self):
     """daemonize is probably not the best name because there is no daemon behind
     but we must keep it to the same interface of the DISET refresher"""
     _IOLoop.current().spawn_callback(self.__refreshLoop)
예제 #46
0
        def test_func():
            IOLoop.clear_instance()
            loop = IOLoop()
            loop.make_current()

            cor = gen.coroutine(func)
            try:
                loop.run_sync(cor, timeout=timeout)
            finally:
                loop.stop()
                loop.close(all_fds=True)
예제 #47
0
파일: test.py 프로젝트: zhaoxin34/tl
def main():
    app = make_app()
    app.listen(8899)
    print('listen start aha')
    IOLoop.current().start()
예제 #48
0
def run_nanny(q, center_port, **kwargs):
    from distributed import Nanny
    from tornado.ioloop import IOLoop, PeriodicCallback
    import logging
    with log_errors():
        IOLoop.clear_instance()
        loop = IOLoop()
        loop.make_current()
        PeriodicCallback(lambda: None, 500).start()
        logging.getLogger("tornado").setLevel(logging.CRITICAL)
        worker = Nanny('127.0.0.1', center_port, ip='127.0.0.1', **kwargs)
        loop.run_sync(lambda: worker._start(0))
        q.put(worker.port)
        try:
            loop.start()
        finally:
            loop.close(all_fds=True)
예제 #49
0
파일: aucote.py 프로젝트: rydzykje/aucote
async def main():
    """
    Main function of aucote project
    Returns:

    """
    print("%s, version: %s.%s.%s" % ((APP_NAME,) + VERSION))

    # parse arguments
    parser = argparse.ArgumentParser(description='Tests compliance of devices.')
    parser.add_argument("--cfg", help="config file path")
    parser.add_argument('cmd', help="aucote command", type=str, default='service',
                        choices=['scan', 'service'],
                        nargs='?')
    parser.add_argument("--syncdb", action="store_true", help="Synchronize database")
    args = parser.parse_args()

    # read configuration
    await cfg_load(args.cfg)

    log.info("%s, version: %s.%s.%s", APP_NAME, *VERSION)

    try:
        lock = open(cfg['pid_file'], 'w')
        fcntl.lockf(lock, fcntl.LOCK_EX | fcntl.LOCK_NB)
    except IOError:
        log.error("There is another Aucote instance running already")
        sys.exit(1)

    exploit_filename = cfg['fixtures.exploits.filename']
    try:
        exploits = Exploits.read(file_name=exploit_filename)
    except NmapUnsupported:
        log.exception("Cofiguration seems to be invalid. Check ports and services or contact with collective-sense")
        exit(1)

    def get_kuduworker():
        """
        Get kudu worker if enable, mock otherwise

        Returns:
            KuduQueue|MagicMock

        """
        if cfg['kuduworker.enable']:
            return KuduQueue(cfg['kuduworker.queue.address'])
        return MagicMock()  # used for local testing

    with get_kuduworker() as kudu_queue:
        aucote = Aucote(exploits=exploits, kudu_queue=kudu_queue, tools_config=EXECUTOR_CONFIG)

        if args.syncdb:
            log.info('Pushing %s exploits to the database', len(exploits))
            aucote.run_syncdb()

        if args.cmd == 'scan':
            await aucote.run_scan(as_service=False)
            IOLoop.current().stop()
        elif args.cmd == 'service':
            while True:
                await aucote.run_scan()
                cfg.reload(cfg['config_filename'])
예제 #50
0
파일: base.py 프로젝트: fernandog/Medusa
 def __init__(self, *args, **kwargs):
     super(WebHandler, self).__init__(*args, **kwargs)
     self.io_loop = IOLoop.current()
예제 #51
0
    def _run(
        cls,
        worker_args,
        worker_kwargs,
        worker_start_args,
        silence_logs,
        init_result_q,
        child_stop_q,
        uid,
        env,
        Worker,
    ):  # pragma: no cover
        os.environ.update(env)
        try:
            from dask.multiprocessing import initialize_worker_process
        except ImportError:  # old Dask version
            pass
        else:
            initialize_worker_process()

        if silence_logs:
            logger.setLevel(silence_logs)

        IOLoop.clear_instance()
        loop = IOLoop()
        loop.make_current()
        worker = Worker(*worker_args, **worker_kwargs)

        @gen.coroutine
        def do_stop(timeout=5, executor_wait=True):
            try:
                yield worker.close(
                    report=False,
                    nanny=False,
                    executor_wait=executor_wait,
                    timeout=timeout,
                )
            finally:
                loop.stop()

        def watch_stop_q():
            """
            Wait for an incoming stop message and then stop the
            worker cleanly.
            """
            while True:
                try:
                    msg = child_stop_q.get(timeout=1000)
                except Empty:
                    pass
                else:
                    child_stop_q.close()
                    assert msg.pop("op") == "stop"
                    loop.add_callback(do_stop, **msg)
                    break

        t = threading.Thread(target=watch_stop_q, name="Nanny stop queue watch")
        t.daemon = True
        t.start()

        @gen.coroutine
        def run():
            """
            Try to start worker and inform parent of outcome.
            """
            try:
                yield worker._start(*worker_start_args)
            except Exception as e:
                logger.exception("Failed to start worker")
                init_result_q.put({"uid": uid, "exception": e})
                init_result_q.close()
            else:
                assert worker.address
                init_result_q.put(
                    {"address": worker.address, "dir": worker.local_dir, "uid": uid}
                )
                init_result_q.close()
                yield worker.wait_until_closed()
                logger.info("Worker closed")

        try:
            loop.run_sync(run)
        except TimeoutError:
            # Loop was stopped before wait_until_closed() returned, ignore
            pass
        except KeyboardInterrupt:
            pass
예제 #52
0
파일: test.py 프로젝트: zhaoxin34/tl
    @gen.coroutine
    def visitServer(url, **kwargs):
        request = None
        if kwargs:
            request = HTTPRequest(url, **kwargs)
        else:
            request = HTTPRequest(url)

        print('=' * 50, kwargs and kwargs['method'])
        response = yield httpclient.fetch(request)
        cprint('response.headers:{0}'.format(response.headers), 'blue')
        cprint('response.body:{0}'.format(response.body), 'blue')

    # visitServer('http://localhost:8888/')
    # visitServer('http://localhost:8888/?test=113')
    post_data = {'arg1': '赵鑫test'}
    body = parse.urlencode(post_data)
    visitServer(
        'http://localhost:8899/?test=113',
        method="POST",
        headers={
            'header1': 'header1 value',
            'Content-Type': 'application/x-www-form-urlencoded'
        },
        # body='arg1=' + tornado.escape.url_escape('赵鑫'))
        body=body)
    time.sleep(2)
finally:
    IOLoop.current().stop()
    pass
예제 #53
0
class LoopRunner(object):
    """
    A helper to start and stop an IO loop in a controlled way.
    Several loop runners can associate safely to the same IO loop.

    Parameters
    ----------
    loop: IOLoop (optional)
        If given, this loop will be re-used, otherwise an appropriate one
        will be looked up or created.
    asynchronous: boolean (optional, default False)
        If false (the default), the loop is meant to run in a separate
        thread and will be started if necessary.
        If true, the loop is meant to run in the thread this
        object is instantiated from, and will not be started automatically.
    """
    # All loops currently associated to loop runners
    _all_loops = weakref.WeakKeyDictionary()
    _lock = threading.Lock()

    def __init__(self, loop=None, asynchronous=False):
        current = IOLoop.current()
        if loop is None:
            if asynchronous:
                self._loop = current
            else:
                # We're expecting the loop to run in another thread,
                # avoid re-using this thread's assigned loop
                self._loop = IOLoop()
            self._should_close_loop = True
        else:
            self._loop = loop
            self._should_close_loop = False
        self._asynchronous = asynchronous
        self._loop_thread = None
        self._started = False
        with self._lock:
            self._all_loops.setdefault(self._loop, (0, None))

    def start(self):
        """
        Start the IO loop if required.  The loop is run in a dedicated
        thread.

        If the loop is already running, this method does nothing.
        """
        with self._lock:
            self._start_unlocked()

    def _start_unlocked(self):
        assert not self._started

        count, real_runner = self._all_loops[self._loop]
        if (self._asynchronous or real_runner is not None or count > 0):
            self._all_loops[self._loop] = count + 1, real_runner
            self._started = True
            return

        assert self._loop_thread is None
        assert count == 0

        loop_evt = threading.Event()
        done_evt = threading.Event()
        in_thread = [None]
        start_exc = [None]

        def loop_cb():
            in_thread[0] = threading.current_thread()
            loop_evt.set()

        def run_loop(loop=self._loop):
            loop.add_callback(loop_cb)
            try:
                loop.start()
            except Exception as e:
                start_exc[0] = e
            finally:
                done_evt.set()

        thread = threading.Thread(target=run_loop, name="IO loop")
        thread.daemon = True
        thread.start()

        loop_evt.wait(timeout=10)
        self._started = True

        actual_thread = in_thread[0]
        if actual_thread is not thread:
            # Loop already running in other thread (user-launched)
            done_evt.wait(5)
            if not isinstance(start_exc[0], RuntimeError):
                if not isinstance(start_exc[0],
                                  Exception):  # track down infrequent error
                    raise TypeError("not an exception", start_exc[0])
                raise start_exc[0]
            self._all_loops[self._loop] = count + 1, None
        else:
            assert start_exc[0] is None, start_exc
            self._loop_thread = thread
            self._all_loops[self._loop] = count + 1, self

    def stop(self, timeout=10):
        """
        Stop and close the loop if it was created by us.
        Otherwise, just mark this object "stopped".
        """
        with self._lock:
            self._stop_unlocked(timeout)

    def _stop_unlocked(self, timeout):
        if not self._started:
            return

        self._started = False

        count, real_runner = self._all_loops[self._loop]
        if count > 1:
            self._all_loops[self._loop] = count - 1, real_runner
        else:
            assert count == 1
            del self._all_loops[self._loop]
            if real_runner is not None:
                real_runner._real_stop(timeout)

    def _real_stop(self, timeout):
        assert self._loop_thread is not None
        if self._loop_thread is not None:
            try:
                self._loop.add_callback(self._loop.stop)
                self._loop_thread.join(timeout=timeout)
                with ignoring(KeyError):  # IOLoop can be missing
                    self._loop.close()
            finally:
                self._loop_thread = None

    def is_started(self):
        """
        Return True between start() and stop() calls, False otherwise.
        """
        return self._started

    def run_sync(self, func, *args, **kwargs):
        """
        Convenience helper: start the loop if needed,
        run sync(func, *args, **kwargs), then stop the loop again.
        """
        if self._started:
            return sync(self.loop, func, *args, **kwargs)
        else:
            self.start()
            try:
                return sync(self.loop, func, *args, **kwargs)
            finally:
                self.stop()

    @property
    def loop(self):
        return self._loop
예제 #54
0
파일: aucote.py 프로젝트: rydzykje/aucote
    def stop_scan(self, key, value, scan_name=None):
        """
        Stop scan with given name (scan_name)

        """
        scan_name = self._get_scan_name(self.SCAN_CONTROL_STOP, key) if scan_name is None else scan_name

        if value is True:
            log.debug('Stopping %s basing on Toucan request', scan_name)
            cfg.toucan.put(key, False)
            self.ioloop.add_callback(self.async_task_managers[TaskManagerType.SCANNER].cron_task(scan_name).func.stop)

    def resume_scan(self, key, value, scan_name: Optional[str] = None):
        scan_name = self._get_scan_name(self.SCAN_CONTROL_RESUME, key) if scan_name is None else scan_name

        if value is True:
            log.debug('Resuming %s basing on Toucan request', scan_name)
            cfg.toucan.put(key, False)
            self.ioloop.add_callback(partial(
                self.async_task_managers[TaskManagerType.SCANNER].cron_task(scan_name), skip_cron=True, resume=True))


# =================== start app =================

if __name__ == "__main__":  # pragma: no cover
    chdir(dirname(realpath(__file__)))
    AsyncIOMainLoop().install()
    IOLoop.current().add_callback(main)
    asyncio.get_event_loop().run_forever()
예제 #55
0
def status_file_timeout():
    write_dgl_status_file()
    IOLoop.current().add_timeout(time.time() + config.status_file_update_rate,
                                 status_file_timeout)
예제 #56
0
    def __init__(
        self,
        scheduler_ip=None,
        scheduler_port=None,
        scheduler_file=None,
        worker_port=0,
        ncores=None,
        loop=None,
        local_dir="dask-worker-space",
        services=None,
        name=None,
        memory_limit="auto",
        reconnect=True,
        validate=False,
        quiet=False,
        resources=None,
        silence_logs=None,
        death_timeout=None,
        preload=(),
        preload_argv=[],
        security=None,
        contact_address=None,
        listen_address=None,
        worker_class=None,
        env=None,
        interface=None,
        host=None,
        port=None,
        protocol=None,
        **worker_kwargs
    ):
        self.loop = loop or IOLoop.current()
        self.security = security or Security()
        assert isinstance(self.security, Security)
        self.connection_args = self.security.get_connection_args("worker")
        self.listen_args = self.security.get_listen_args("worker")

        if scheduler_file:
            cfg = json_load_robust(scheduler_file)
            self.scheduler_addr = cfg["address"]
        elif scheduler_ip is None and dask.config.get("scheduler-address"):
            self.scheduler_addr = dask.config.get("scheduler-address")
        elif scheduler_port is None:
            self.scheduler_addr = coerce_to_address(scheduler_ip)
        else:
            self.scheduler_addr = coerce_to_address((scheduler_ip, scheduler_port))

        self._given_worker_port = worker_port
        self.ncores = ncores or _ncores
        self.reconnect = reconnect
        self.validate = validate
        self.resources = resources
        self.death_timeout = parse_timedelta(death_timeout)
        self.preload = preload
        self.preload_argv = preload_argv
        self.Worker = Worker if worker_class is None else worker_class
        self.env = env or {}
        self.worker_kwargs = worker_kwargs

        self.contact_address = contact_address
        self.memory_terminate_fraction = dask.config.get(
            "distributed.worker.memory.terminate"
        )

        self.local_dir = local_dir

        self.services = services
        self.name = name
        self.quiet = quiet
        self.auto_restart = True

        self.memory_limit = parse_memory_limit(memory_limit, self.ncores)

        if silence_logs:
            silence_logging(level=silence_logs)
        self.silence_logs = silence_logs

        handlers = {
            "instantiate": self.instantiate,
            "kill": self.kill,
            "restart": self.restart,
            # cannot call it 'close' on the rpc side for naming conflict
            "terminate": self.close,
            "run": self.run,
        }

        super(Nanny, self).__init__(
            handlers=handlers, io_loop=self.loop, connection_args=self.connection_args
        )

        self.scheduler = self.rpc(self.scheduler_addr)

        if self.memory_limit:
            pc = PeriodicCallback(self.memory_monitor, 100, io_loop=self.loop)
            self.periodic_callbacks["memory"] = pc

        self._start_address = address_from_user_args(
            host=host,
            port=port,
            interface=interface,
            protocol=protocol,
            security=security,
        )

        self._listen_address = listen_address
        Nanny._instances.add(self)
        self.status = "init"
예제 #57
0
def main(scheduler, host, worker_port, listen_address, contact_address,
         nanny_port, nthreads, nprocs, nanny, name, pid_file, resources,
         dashboard, bokeh, bokeh_port, scheduler_file, dashboard_prefix,
         tls_ca_file, tls_cert, tls_key, dashboard_address, worker_class,
         **kwargs):
    g0, g1, g2 = gc.get_threshold(
    )  # https://github.com/dask/distributed/issues/1653
    gc.set_threshold(g0 * 3, g1 * 3, g2 * 3)

    enable_proctitle_on_current()
    enable_proctitle_on_children()

    if bokeh_port is not None:
        warnings.warn(
            "The --bokeh-port flag has been renamed to --dashboard-address. "
            "Consider adding ``--dashboard-address :%d`` " % bokeh_port)
        dashboard_address = bokeh_port
    if bokeh is not None:
        warnings.warn(
            "The --bokeh/--no-bokeh flag has been renamed to --dashboard/--no-dashboard. "
        )
        dashboard = bokeh

    sec = Security(
        **{
            k: v
            for k, v in [
                ("tls_ca_file", tls_ca_file),
                ("tls_worker_cert", tls_cert),
                ("tls_worker_key", tls_key),
            ] if v is not None
        })

    if nprocs > 1 and worker_port != 0:
        logger.error(
            "Failed to launch worker.  You cannot use the --port argument when nprocs > 1."
        )
        sys.exit(1)

    if nprocs > 1 and not nanny:
        logger.error(
            "Failed to launch worker.  You cannot use the --no-nanny argument when nprocs > 1."
        )
        sys.exit(1)

    if contact_address and not listen_address:
        logger.error(
            "Failed to launch worker. "
            "Must specify --listen-address when --contact-address is given")
        sys.exit(1)

    if nprocs > 1 and listen_address:
        logger.error("Failed to launch worker. "
                     "You cannot specify --listen-address when nprocs > 1.")
        sys.exit(1)

    if (worker_port or host) and listen_address:
        logger.error(
            "Failed to launch worker. "
            "You cannot specify --listen-address when --worker-port or --host is given."
        )
        sys.exit(1)

    try:
        if listen_address:
            (host, worker_port) = get_address_host_port(listen_address,
                                                        strict=True)

        if contact_address:
            # we only need this to verify it is getting parsed
            (_, _) = get_address_host_port(contact_address, strict=True)
        else:
            # if contact address is not present we use the listen_address for contact
            contact_address = listen_address
    except ValueError as e:
        logger.error("Failed to launch worker. " + str(e))
        sys.exit(1)

    if nanny:
        port = nanny_port
    else:
        port = worker_port

    if not nthreads:
        nthreads = CPU_COUNT // nprocs

    if pid_file:
        with open(pid_file, "w") as f:
            f.write(str(os.getpid()))

        def del_pid_file():
            if os.path.exists(pid_file):
                os.remove(pid_file)

        atexit.register(del_pid_file)

    if resources:
        resources = resources.replace(",", " ").split()
        resources = dict(pair.split("=") for pair in resources)
        resources = valmap(float, resources)
    else:
        resources = None

    loop = IOLoop.current()

    worker_class = import_term(worker_class)
    if nanny:
        kwargs["worker_class"] = worker_class

    if nanny:
        kwargs.update({
            "worker_port": worker_port,
            "listen_address": listen_address
        })
        t = Nanny
    else:
        if nanny_port:
            kwargs["service_ports"] = {"nanny": nanny_port}
        t = worker_class

    if (not scheduler and not scheduler_file
            and dask.config.get("scheduler-address", None) is None):
        raise ValueError("Need to provide scheduler address like\n"
                         "dask-worker SCHEDULER_ADDRESS:8786")

    with ignoring(TypeError, ValueError):
        name = int(name)

    if "DASK_INTERNAL_INHERIT_CONFIG" in os.environ:
        config = deserialize_for_cli(
            os.environ["DASK_INTERNAL_INHERIT_CONFIG"])
        # Update the global config given priority to the existing global config
        dask.config.update(dask.config.global_config, config, priority="old")

    nannies = [
        t(scheduler,
          scheduler_file=scheduler_file,
          nthreads=nthreads,
          loop=loop,
          resources=resources,
          security=sec,
          contact_address=contact_address,
          host=host,
          port=port,
          dashboard_address=dashboard_address if dashboard else None,
          service_kwargs={"dashboard": {
              "prefix": dashboard_prefix
          }},
          name=name if nprocs == 1 or name is None or name == "" else
          str(name) + "-" + str(i),
          **kwargs) for i in range(nprocs)
    ]

    async def close_all():
        # Unregister all workers from scheduler
        if nanny:
            await asyncio.gather(*[n.close(timeout=2) for n in nannies])

    signal_fired = False

    def on_signal(signum):
        nonlocal signal_fired
        signal_fired = True
        if signum != signal.SIGINT:
            logger.info("Exiting on signal %d", signum)
        return asyncio.ensure_future(close_all())

    async def run():
        await asyncio.gather(*nannies)
        await asyncio.gather(*[n.finished() for n in nannies])

    install_signal_handlers(loop, cleanup=on_signal)

    try:
        loop.run_sync(run)
    except TimeoutError:
        # We already log the exception in nanny / worker. Don't do it again.
        if not signal_fired:
            logger.info("Timed out starting worker")
        sys.exit(1)
    except KeyboardInterrupt:
        pass
    finally:
        logger.info("End worker")
예제 #58
0
def run_pylinkjs_app(**kwargs):
    # exit on Ctrl-C
    signal.signal(signal.SIGINT, signal_handler)

    if 'port' not in kwargs:
        kwargs['port'] = 8300
    if 'default_html' not in kwargs:
        kwargs['default_html'] = 'index.html'
    if 'html_dir' not in kwargs:
        kwargs['html_dir'] = '.'
    if 'login_html_page' not in kwargs:
        kwargs['login_html_page'] = os.path.join(os.path.dirname(__file__),
                                                 'login.html')
    if 'cookie_secret' not in kwargs:
        logging.warning('COOKIE SECRET IS INSECURE!  PLEASE CHANGE')
        kwargs['cookie_secret'] = 'GENERIC COOKIE SECRET'
    if 'heartbeat_callback' not in kwargs:
        kwargs['heartbeat_callback'] = None
    if 'heartbeat_interval' not in kwargs:
        kwargs['heartbeat_interval'] = None
    if 'login_handler' not in kwargs:
        kwargs['login_handler'] = LoginHandler
    if 'logout_handler' not in kwargs:
        kwargs['logout_handler'] = LogoutHandler
    if 'extra_settings' not in kwargs:
        kwargs['extra_settings'] = {}

    asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())

    request_handlers = [
        (r"/websocket/.*", PyLinkJSWebSocketHandler, {
            'all_jsclients': ALL_JSCLIENTS
        }),
        (r"/login", kwargs['login_handler']),
        (r"/logout", kwargs['logout_handler']),
        (r"/.*", MainHandler),
    ]

    app = tornado.web.Application(request_handlers,
                                  default_html=kwargs['default_html'],
                                  html_dir=kwargs['html_dir'],
                                  login_html_page=kwargs['login_html_page'],
                                  cookie_secret=kwargs['cookie_secret'],
                                  on_404=kwargs.get('on_404', None),
                                  **kwargs['extra_settings'])

    caller_globals = inspect.stack()[1][0].f_globals

    # start additional ioloops on new threads
    threading.Thread(target=start_pycallback_handler_ioloop,
                     args=(caller_globals, ),
                     daemon=True).start()
    threading.Thread(target=start_retval_handler_ioloop, args=(),
                     daemon=True).start()
    threading.Thread(target=start_execjs_handler_ioloop, args=(),
                     daemon=True).start()
    if kwargs['heartbeat_interval']:
        threading.Thread(target=heartbeat_threadworker,
                         args=(kwargs['heartbeat_callback'],
                               kwargs['heartbeat_interval']),
                         daemon=True).start()

    # start the tornado server
    app.listen(kwargs['port'])
    app.settings['on_context_close'] = kwargs.get('onContextClose', None)
    app.settings['on_context_open'] = kwargs.get('onContextOpen', None)
    logging.info('**** Starting app on port %d' % kwargs['port'])
    IOLoop.current().start()
예제 #59
0
from tools import Env, Init
from controllers.api.HelloController import HelloController
from controllers.api.UserController import UserController
from controllers.api.ClassController import ClassController
from controllers.api.SemesterController import SemesterController
from controllers.api.YearController import YearController
from controllers.api.AuthController import AuthController
from controllers.api.FileUploadController import FileUploadController


ROUTES = [
    (r"/api/hello/", HelloController),
    (r"/api/user/", UserController),
    (r"/api/class/", ClassController),
    (r"/api/semester/", SemesterController),
    (r"/api/year/", YearController),
    (r"/api/auth/", AuthController),
    (r"/api/fileupload/", FileUploadController),
]

MONGO_URI = "mongodb://{0}:{1}@database:27017/admin".format("root", "root")
MONGO_CLIENT = MotorClient(MONGO_URI)
MONGO_DB = MONGO_CLIENT["viewgrade"]

if __name__ == "__main__":
    loop = asyncio.get_event_loop()
    app = Application(ROUTES, xheaders=True, debug=Env.DEBUG, db=MONGO_DB)
    loop.run_until_complete(Init.init(app))
    app.listen(8000)
    IOLoop.current().start()
예제 #60
0
def start_graphite_listener(port):
    echo_server = GraphiteServer(None, get_hostname(None))
    echo_server.listen(port)
    IOLoop.instance().start()