コード例 #1
0
ファイル: pool.py プロジェクト: furaoing/TorMySQL
    def release_connection(self, connection):
        if self._closed:
            return connection.do_close()

        if not connection.open:
            future = Future()
            future.set_result(None)
            return future

        if self.continue_next_wait(connection):
            while self._wait_connections and self._connections:
                connection = self._connections.pop()
                if connection.open:
                    if self.continue_next_wait(connection):
                        self._used_connections[id(connection)] = connection
                    else:
                        self._connections.append(connection)
                        break
        else:
            try:
                del self._used_connections[id(connection)]
                self._connections.append(connection)
                connection.idle_time = time.time()
            except KeyError:
                if connection not in self._connections:
                    IOLoop.current().add_callback(connection.do_close)
                    raise ConnectionNotFoundError("Connection not found.")
                else:
                    raise ConnectionNotUsedError("Connection is not used, you maybe close wrong connection.")

        future = Future()
        future.set_result(None)
        return future
コード例 #2
0
ファイル: base.py プロジェクト: JeanLescut/jupyterhub
    def stop_single_user(self, user):
        if user.stop_pending:
            raise RuntimeError("Stop already pending for: %s" % user.name)
        tic = IOLoop.current().time()
        yield self.proxy.delete_user(user)
        f = user.stop()
        @gen.coroutine
        def finish_stop(f=None):
            """Finish the stop action by noticing that the user is stopped.

            If the spawner is slow to stop, this is passed as an async callback,
            otherwise it is called immediately.
            """
            if f and f.exception() is not None:
                # failed, don't do anything
                return
            toc = IOLoop.current().time()
            self.log.info("User %s server took %.3f seconds to stop", user.name, toc-tic)

        try:
            yield gen.with_timeout(timedelta(seconds=self.slow_stop_timeout), f)
        except gen.TimeoutError:
            if user.stop_pending:
                # hit timeout, but stop is still pending
                self.log.warning("User %s server is slow to stop", user.name)
                # schedule finish for when the server finishes stopping
                IOLoop.current().add_future(f, finish_stop)
            else:
                raise
        else:
            yield finish_stop()
コード例 #3
0
ファイル: code_server.py プロジェクト: FOSSEE/online_test
 def run(self):
     """Run server which returns an available server port where code
     can be executed.
     """
     # We start the code servers here to ensure they are run as nobody.
     self._start_code_servers()
     IOLoop.current().start()
コード例 #4
0
ファイル: pool.py プロジェクト: furaoing/TorMySQL
    def check_idle_connections(self):
        now = time.time()

        while self._wait_connections:
            wait_future, create_time = self._wait_connections[0]
            wait_time = now - create_time
            if wait_time < self._wait_connection_timeout:
                break
            self._wait_connections.popleft()
            IOLoop.current().add_callback(wait_future.set_exception, WaitConnectionTimeoutError("Wait connection timeout, used time %.2fs." % wait_time))

        next_check_time = now + self._idle_seconds
        for connection in tuple(self._connections):
            if now - connection.idle_time > self._idle_seconds:
                self.close_connection(connection)
            elif connection.idle_time + self._idle_seconds < next_check_time:
                next_check_time = connection.idle_time + self._idle_seconds

        for connection in self._used_connections.values():
            if now - connection.used_time > (self._wait_connection_timeout * 4) ** 2:
                connection.do_close()
                logging.error("Connection used timeout close, used time %.2fs %s %s.", now - connection.used_time, connection, self)
            elif now - connection.used_time > self._wait_connection_timeout ** 2 * 2:
                logging.warning("Connection maybe not release, used time %.2fs %s %s.", now - connection.used_time, connection, self)

        if not self._closed and (self._connections or self._used_connections):
            IOLoop.current().add_timeout(min(next_check_time, now + 60), self.check_idle_connections)
        else:
            self._check_idle_callback = False
コード例 #5
0
    def send_request(self, request):
        """Send the given request and response is required.

        Use this for messages which have a response message.

        :param request:
            request to send
        :returns:
            A Future containing the response for the request
        """
        assert self._loop_running, "Perform a handshake first."

        assert request.id not in self._outstanding, (
            "Message ID '%d' already being used" % request.id
        )

        future = tornado.gen.Future()
        self._outstanding[request.id] = future
        self.stream_request(request)

        if request.ttl:
            self._add_timeout(request, future)

        # the actual future that caller will yield
        response_future = tornado.gen.Future()
        # TODO: fire before_receive_response

        IOLoop.current().add_future(
            future,
            lambda f: self.adapt_result(f, request, response_future),
        )
        return response_future
コード例 #6
0
ファイル: starter.py プロジェクト: iaoshili/Search_Project
def main():
    numProcs = inventory.NUM_INDEX_SHARDS + inventory.NUM_DOC_SHARDS + 1
    taskID = process.fork_processes(numProcs, max_restarts=0)
    port = inventory.BASE_PORT + taskID
    if taskID == 0:
        app = httpserver.HTTPServer(tornado.web.Application([
                (r"/search", Web),
                (r"/upload", UploadHandler),
                (r"/(.*)", IndexDotHTMLAwareStaticFileHandler, dict(path=SETTINGS['static_path']))
            ], **SETTINGS))
        logging.info("Front end is listening on " + str(port))
    else:       
        if taskID <= inventory.NUM_INDEX_SHARDS:
            shardIx = taskID - 1
            #data = pickle.load(open("data/index%d.pkl" % (shardIx), "r"))
            inverted_path = os.path.join(os.getcwd(),"../assignment5/df_jobs/%d.out"  % (shardIx))
            logging.info("Inverted file path: %s" % inverted_path)
            data = pickle.load(open(inverted_path ,'r'))
            idf_path = os.path.join(os.getcwd(), "../assignment5/idf_jobs/0.out")
            logIDF = pickle.load(open(idf_path,'r'))
            app = httpserver.HTTPServer(web.Application([(r"/index", index.Index, dict(data=data, logIDF=logIDF))]))
            logging.info("Index shard %d listening on %d" % (shardIx, port))
        else:
            shardIx = taskID - inventory.NUM_INDEX_SHARDS - 1
            #data = pickle.load(open("data/doc%d.pkl" % (shardIx), "r"))
            doc_path = os.path.join(os.getcwd(),"../assignment5/i_df_jobs/%d.out" % (shardIx))
            logging.info("Doc Server path %s" % doc_path)
            data = pickle.load(open(doc_path, "r"))
            app = httpserver.HTTPServer(web.Application([(r"/doc", doc.Doc, dict(data=data))]))
            logging.info("Doc shard %d listening on %d" % (shardIx, port))
    app.add_sockets(netutil.bind_sockets(port))
    IOLoop.current().start()
コード例 #7
0
    def on_pong(self, data):
        """Clear the timeout, sleep, and send a new ping.

        .. todo::
            *   Document the times used in this method.
                The calculations are in my black notebook
                XD.
        """
        try:
            if self.ping_timeout_handle is not None:
                IOLoop.current().remove_timeout(
                    self.ping_timeout_handle)

            yield sleep(conf.ping_sleep)

            self.ping(b'1')
            self.ping_timeout_handle = \
                IOLoop.current().call_later(
                    conf.ping_timeout, self.close)

        except WebSocketClosedError:
            pass

        except:
            raise
コード例 #8
0
ファイル: async_execute.py プロジェクト: EddieZhao/torngas
 def wrapper(self, *args, **kwargs):
     callback = kwargs.pop("callback", None)
     future = thread_resolver.executor.submit(fn, self, *args, **kwargs)
     if callback:
         IOLoop.current().add_future(future,
                                 lambda future: callback(future.result()))
     return future
コード例 #9
0
ファイル: gen.py プロジェクト: AlwinHummels/CouchPotatoServer
    def wrapper(*args, **kwargs):
        runner = None
        future = TracebackFuture()

        if 'callback' in kwargs:
            callback = kwargs.pop('callback')
            IOLoop.current().add_future(
                future, lambda future: callback(future.result()))

        def handle_exception(typ, value, tb):
            try:
                if runner is not None and runner.handle_exception(typ, value, tb):
                    return True
            except Exception:
                typ, value, tb = sys.exc_info()
            future.set_exc_info((typ, value, tb))
            return True
        with ExceptionStackContext(handle_exception):
            try:
                result = func(*args, **kwargs)
            except (Return, StopIteration) as e:
                result = getattr(e, 'value', None)
            except Exception:
                future.set_exc_info(sys.exc_info())
                return future
            else:
                if isinstance(result, types.GeneratorType):
                    def final_callback(value):
                        future.set_result(value)
                    runner = Runner(result, final_callback)
                    runner.run()
                    return future
            future.set_result(result)
        return future
コード例 #10
0
 def test_set_not_none_trace(self):
     new_trace_id = 100
     new_trace = Trace(new_trace_id, 2, 1)
     IOLoop.current().run_sync(partial(self.tx.dummy, trace=new_trace))
     # Set new trace_id, set new logger adapter
     assert self.tx.trace_id == new_trace_id
     assert self.tx.log.extra == {'trace_id': '{:016x}'.format(new_trace_id)}
コード例 #11
0
    def test_set_not_none_trace_equal_trace_id(self):
        new_trace = Trace(300, 20, 10)

        IOLoop.current().run_sync(partial(self.tx.dummy, trace=new_trace))
        # Keep old trace_id, keep old logger
        assert self.tx.trace_id is not new_trace.traceid
        assert self.tx.log is self.initial_log
コード例 #12
0
    def f(c, a, b):
        e = Executor((c.ip, c.port), start=False)
        IOLoop.current().spawn_callback(e._go)

        L = e.map(inc, range(5), workers={a.ip})
        yield _wait(L)

        assert set(a.data) == {x.key for x in L}
        assert not b.data
        for x in L:
            assert e.restrictions[x.key] == {a.ip}

        L = e.map(inc, [10, 11, 12], workers=[{a.ip},
                                              {a.ip, b.ip},
                                              {b.ip}])
        yield _wait(L)

        assert e.restrictions[L[0].key] == {a.ip}
        assert e.restrictions[L[1].key] == {a.ip, b.ip}
        assert e.restrictions[L[2].key] == {b.ip}

        with pytest.raises(ValueError):
            e.map(inc, [10, 11, 12], workers=[{a.ip}])

        yield e._shutdown()
コード例 #13
0
def test_errors_dont_block():
    c = Center('127.0.0.1', 8017)
    w = Worker('127.0.0.2', 8018, c.ip, c.port, ncores=1)
    e = Executor((c.ip, c.port), start=False)
    @gen.coroutine
    def f():
        c.listen(c.port)
        yield w._start()
        IOLoop.current().spawn_callback(e._go)

        L = [e.submit(inc, 1),
             e.submit(throws, 1),
             e.submit(inc, 2),
             e.submit(throws, 2)]

        i = 0
        while not (L[0].status == L[2].status == 'finished'):
            i += 1
            if i == 1000:
                assert False
            yield gen.sleep(0.01)
        result = yield e._gather([L[0], L[2]])
        assert result == [2, 3]

        yield w._close()
        c.stop()

    IOLoop.current().run_sync(f)
コード例 #14
0
ファイル: diagnostics.py プロジェクト: Krylon360/elastickube
def run_server():
    level = 'WARNING' if not os.getenv('DEBUG') else 'DEBUG'
    logging.basicConfig(level=level)

    logger.info('Starting server')
    tornado.netutil.Resolver.configure('tornado.netutil.ThreadedResolver', num_threads=10)

    settings = {}
    settings_from_env(settings, os.environ)

    replication_controllers = (
        ('kube-system', 'elastickube-server'),
        ('kube-system', 'elastickube-mongo'),
    )
    logger.debug('Loaded settings')

    system_status = SystemStatus(replication_controllers)

    start_background_checks(settings, system_status, replication_controllers)

    statics_path = os.path.join(os.path.dirname(__file__), 'assets')
    application = create_application(system_status, statics_path, bool(os.getenv('DEBUG')))
    server = tornado.httpserver.HTTPServer(application)

    socket = tornado.netutil.bind_unix_socket("/var/run/elastickube-diagnostics.sock", mode=0777)
    server.add_socket(socket)

    if os.getenv('DEBUG'):
        IOLoop.current().set_blocking_log_threshold(0.5)

    IOLoop.current().start()
コード例 #15
0
ファイル: base.py プロジェクト: paul918/jupyterhub
    def spawn_single_user(self, user, options=None):
        if user.spawn_pending:
            raise RuntimeError("Spawn already pending for: %s" % user.name)
        tic = IOLoop.current().time()

        f = user.spawn(options)

        @gen.coroutine
        def finish_user_spawn(f=None):
            """Finish the user spawn by registering listeners and notifying the proxy.
            
            If the spawner is slow to start, this is passed as an async callback,
            otherwise it is called immediately.
            """
            if f and f.exception() is not None:
                # failed, don't add to the proxy
                return
            toc = IOLoop.current().time()
            self.log.info("User %s server took %.3f seconds to start", user.name, toc-tic)
            yield self.proxy.add_user(user)
            user.spawner.add_poll_callback(self.user_stopped, user)
        
        try:
            yield gen.with_timeout(timedelta(seconds=self.slow_spawn_timeout), f)
        except gen.TimeoutError:
            if user.spawn_pending:
                # hit timeout, but spawn is still pending
                self.log.warn("User %s server is slow to start", user.name)
                # schedule finish for when the user finishes spawning
                IOLoop.current().add_future(f, finish_user_spawn)
            else:
                raise
        else:
            yield finish_user_spawn()
コード例 #16
0
ファイル: chat.py プロジェクト: hulingfeng211/mywork
    def get_data(self):
        if self.request.connection.stream.closed():
            return
        self.subscribe()
        num = 90  # 设置超时时间,

        IOLoop.current().add_timeout(time.time() + num, lambda: self.on_timeout(num))
コード例 #17
0
ファイル: concurrent.py プロジェクト: leeclemens/tornado
def chain_future(a: 'Future[_T]', b: 'Future[_T]') -> None:
    """Chain two futures together so that when one completes, so does the other.

    The result (success or failure) of ``a`` will be copied to ``b``, unless
    ``b`` has already been completed or cancelled by the time ``a`` finishes.

    .. versionchanged:: 5.0

       Now accepts both Tornado/asyncio `Future` objects and
       `concurrent.futures.Future`.

    """
    def copy(future: 'Future[_T]') -> None:
        assert future is a
        if b.done():
            return
        if (hasattr(a, 'exc_info') and
                a.exc_info() is not None):  # type: ignore
            future_set_exc_info(b, a.exc_info())  # type: ignore
        elif a.exception() is not None:
            b.set_exception(a.exception())
        else:
            b.set_result(a.result())
    if isinstance(a, Future):
        future_add_done_callback(a, copy)
    else:
        # concurrent.futures.Future
        from tornado.ioloop import IOLoop
        IOLoop.current().add_future(a, copy)
コード例 #18
0
ファイル: server.py プロジェクト: carolling/elastickube
def setup_server():
    # Config tornado.curl_httpclient to use NullHandler
    tornado_logger = logging.getLogger('tornado.curl_httpclient')
    tornado_logger.addHandler(logging.NullHandler())
    tornado_logger.propagate = False

    settings = dict(
        autoreload=True,
        secret="ElasticKube",
    )

    configure(settings)

    handlers = [
        (r"/api/v1/auth/providers", AuthProvidersHandler),
        (r"/api/v1/auth/signup", SignupHandler),
        (r"/api/v1/auth/login", PasswordHandler),
        (r"/api/v1/auth/google", GoogleOAuth2LoginHandler),
        (r"/api/v1/ws", MainWebSocketHandler),
        (r"/icons/(?P<entity_id>[^\/]+)\/(?P<chart_id>[^\/]+)", IconGenerator)
    ]

    application = Application(handlers, **settings)

    server = HTTPServer(application)
    socket = bind_unix_socket("/var/run/elastickube-api.sock", mode=0777)
    server.add_socket(socket)

    IOLoop.current().add_callback(initialize, settings)
コード例 #19
0
def getMoreMovieName():
  global url
  url = 'http://www.imdb.com/calendar/?ref_=nv_mv_cal_5'
  IOLoop.current().run_sync(readHtml)
  # print body
  match = re.findall('.*href=".[^>]*>(.[^<]*)</a>', body)
  return match
コード例 #20
0
ファイル: index_manager.py プロジェクト: AppScale/appscale
  def _handle_connection_change(self, state):
    """ Notifies the admin lock holder when the connection changes.

    Args:
      state: The new connection state.
    """
    IOLoop.current().add_callback(self._wake_event.set)
コード例 #21
0
ファイル: index_manager.py プロジェクト: AppScale/appscale
  def apply_definitions(self):
    """ Populate composite indexes that are not marked as ready yet. """
    try:
      yield self.update_event.wait()
      self.update_event.clear()
      if not self._index_manager.admin_lock.is_acquired or not self.active:
        return

      logger.info(
        'Applying composite index definitions for {}'.format(self.project_id))

      for index in self.indexes:
        if index.ready:
          continue

        # Wait until all clients have either timed out or received the new index
        # definition. This prevents entities from being added without entries
        # while the index is being rebuilt.
        creation_time = self._creation_times.get(index.id, time.time())
        consensus = creation_time + (self._zk_client._session_timeout / 1000.0)
        yield gen.sleep(max(consensus - time.time(), 0))

        yield self._ds_access.update_composite_index(
          self.project_id, index.to_pb())
        logger.info('Index {} is now ready'.format(index.id))
        self._mark_index_ready(index.id)

      logging.info(
        'All composite indexes for {} are ready'.format(self.project_id))
    finally:
      IOLoop.current().spawn_callback(self.apply_definitions)
コード例 #22
0
ファイル: datastore.py プロジェクト: AppScale/appscale
  def update_index_request(self, app_id, http_request_data):
    """ High level function for updating a composite index.

    Args:
      app_id: A string containing the application ID.
      http_request_data: A string containing the protocol buffer request
        from the AppServer.
    Returns:
       A tuple containing an encoded response, error code, and error details.
    """
    global datastore_access
    index = entity_pb.CompositeIndex(http_request_data)
    response = api_base_pb.VoidProto()

    if READ_ONLY:
      logger.warning('Unable to update in read-only mode: {}'.format(index))
      return ('', datastore_pb.Error.CAPABILITY_DISABLED,
              'Datastore is in read-only mode.')

    state = index.state()
    if state not in [index.READ_WRITE, index.WRITE_ONLY]:
      state_name = entity_pb.CompositeIndex.State_Name(state)
      error_message = 'Unable to update index because state is {}. '\
        'Index: {}'.format(state_name, index)
      logger.error(error_message)
      return '', datastore_pb.Error.PERMISSION_DENIED, error_message
    else:
      # Updating index in background so we can return a response quickly.
      IOLoop.current().spawn_callback(
        datastore_access.update_composite_index, app_id, index)

    return response.Encode(), 0, ''
コード例 #23
0
def main():
  """ Starts the groomer. """
  logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)

  parser = argparse.ArgumentParser()
  parser.add_argument('-v', '--verbose', action='store_true',
                      help='Output debug-level logging')
  args = parser.parse_args()

  if args.verbose:
    logger.setLevel(logging.DEBUG)

  zk_hosts = appscale_info.get_zk_node_ips()
  zk_client = KazooClient(hosts=','.join(zk_hosts),
                          connection_retry=ZK_PERSISTENT_RECONNECTS,
                          command_retry=KazooRetry(max_tries=-1))
  zk_client.start()

  db_access = DatastoreProxy()

  thread_pool = ThreadPoolExecutor(4)

  TransactionGroomer(zk_client, db_access, thread_pool)
  logger.info('Starting transaction groomer')

  IOLoop.current().start()
コード例 #24
0
def future_fold(future, result_mapper=None, exception_mapper=None):
    """
    Creates a new future with result or exception processed by result_mapper and exception_mapper.

    If result_mapper or exception_mapper raises an exception, it will be set as an exception for the resulting future.
    Any of the mappers can be None — then the result or exception is left as is.
    """

    res_future = Future()

    def _process(func, value):
        try:
            processed = func(value) if func is not None else value
        except Exception as e:
            res_future.set_exception(e)
            return
        res_future.set_result(processed)

    def _on_ready(wrapped_future):
        exception = wrapped_future.exception()
        if exception is not None:
            if not callable(exception_mapper):
                def default_exception_func(error):
                    raise error
                _process(default_exception_func, exception)
            else:
                _process(exception_mapper, exception)
        else:
            _process(result_mapper, future.result())

    IOLoop.current().add_future(future, callback=_on_ready)
    return res_future
コード例 #25
0
 def request_initialise(self, sock, program, qdr_cal, require_epoch,
                        monitor_vacc):
     """
     Initialise self.instrument
     :param sock:
     :param program: program the FPGA boards if True
     :param qdr_cal: perform QDR cal if True
     :param require_epoch: the synch epoch MUST be set before init if True
     :param monitor_vacc: start the VACC monitoring ioloop
     :return:
     """
     try:
         self.instrument.initialise(program=program,
                                    qdr_cal=qdr_cal,
                                    require_epoch=require_epoch)
         #sensor_manager = sensors.SensorManager(self, self.instrument)
         #self.instrument.sensor_manager = sensor_manager
         #sensor_manager.sensors_clear()
         #sensors.setup_mainloop_sensors(sensor_manager)
         IOLoop.current().add_callback(self.periodic_send_metadata)
         if monitor_vacc:
             self.instrument.xops.vacc_check_timer_start()
         return 'ok',
     except Exception as e:
         return self._log_excep('Failed to initialise %s: %s' % (
             self.instrument.descriptor, e.message))
コード例 #26
0
ファイル: test_core.py プロジェクト: aterrel/distributed
 def f():
     from distributed.core import Server
     from tornado.ioloop import IOLoop
     server = Server({'ping': pingpong})
     server.listen(8887)
     IOLoop.current().start()
     IOLoop.current().stop()
コード例 #27
0
ファイル: main.py プロジェクト: Adelscott/persomov
    def frontend(self, type="notification", data=None, message=None):
        if not data:
            data = {}

        log.debug("Notifying frontend")

        self.m_lock.acquire()
        notification = {
            "message_id": str(uuid.uuid4()),
            "time": time.time(),
            "type": type,
            "data": data,
            "message": message,
        }
        self.messages.append(notification)

        while len(self.listeners) > 0 and not self.shuttingDown():
            try:
                listener, last_id = self.listeners.pop()
                IOLoop.current().add_callback(listener, {"success": True, "result": [notification]})
            except:
                log.debug("Failed sending to listener: %s", traceback.format_exc())

        self.listeners = []
        self.m_lock.release()

        log.debug("Done notifying frontend")
コード例 #28
0
def run_auth_server():
    client_store = ClientStore()
    client_store.add_client(client_id="abc", client_secret="xyz", redirect_uris=["http://localhost:8081/callback"])

    token_store = TokenStore()

    provider = Provider(
        access_token_store=token_store, auth_code_store=token_store, client_store=client_store, token_generator=Uuid4()
    )
    provider.add_grant(AuthorizationCodeGrant(site_adapter=TestSiteAdapter()))

    try:
        app = Application(
            [
                url(provider.authorize_path, OAuth2Handler, dict(provider=provider)),
                url(provider.token_path, OAuth2Handler, dict(provider=provider)),
            ]
        )

        app.listen(8080)
        print("Starting OAuth2 server on http://localhost:8080/...")
        IOLoop.current().start()

    except KeyboardInterrupt:
        IOLoop.close()
コード例 #29
0
ファイル: Server.py プロジェクト: rwth-i6/returnn
  def __init__(self, config_file):
    self.lock = locks.Lock()
    self.classification_queue = Queue()

    print('loading config %s' % config_file, file=log.v5)
    # Load and setup config
    try:
      self.config = Config.Config()
      self.config.load_file(config_file)
      self.pause_after_first_seq = self.config.float('pause_after_first_seq', 0.2)
      self.batch_size = self.config.int('batch_size', 5000)
      self.max_seqs = self.config.int('max_seqs', -1)
    except Exception:
      print('Error: loading config %s failed' % config_file, file=log.v1)
      raise

    try:
      self.devices = self._init_devices()
    except Exception:
      print('Error: Loading devices for config %s failed' % config_file, file=log.v1)
      raise

    print('Starting engine for config %s' % config_file, file=log.v5)
    self.engine = Engine.Engine(self.devices)
    try:
      self.engine.init_network_from_config(config=self.config)
    except Exception:
      print('Error: Loading network for config %s failed' % config_file, file=log.v1)
      raise

    IOLoop.current().spawn_callback(self.classify_in_background)

    self.last_used = datetime.datetime.now()
コード例 #30
0
ファイル: index_manager.py プロジェクト: AppScale/appscale
  def __init__(self, zk_client, datastore_access, perform_admin=False):
    """ Creates a new IndexManager.

    Args:
      zk_client: A kazoo.client.KazooClient object.
      datastore_access: A DatastoreDistributed object.
      perform_admin: A boolean specifying whether or not to perform admin
        operations.
    """
    self.projects = {}
    self._wake_event = AsyncEvent()
    self._zk_client = zk_client
    self.admin_lock = AsyncKazooLock(self._zk_client, self.ADMIN_LOCK_NODE)

    # TODO: Refactor so that this dependency is not needed.
    self._ds_access = datastore_access

    self._zk_client.ensure_path('/appscale/projects')
    self._zk_client.ChildrenWatch('/appscale/projects', self._update_projects)

    # Since this manager can be used synchronously, ensure that the projects
    # are populated for this IOLoop iteration.
    project_ids = self._zk_client.get_children('/appscale/projects')
    self._update_projects_sync(project_ids)

    if perform_admin:
      IOLoop.current().spawn_callback(self._contend_for_admin_lock)
コード例 #31
0
ファイル: nanny.py プロジェクト: tjb900/distributed
    def __init__(self, scheduler_ip=None, scheduler_port=None,
                 scheduler_file=None, worker_port=0,
                 ncores=None, loop=None, local_dir=None, services=None,
                 name=None, memory_limit='auto', reconnect=True,
                 validate=False, quiet=False, resources=None, silence_logs=None,
                 death_timeout=None, preload=(), preload_argv=[], security=None,
                 contact_address=None, listen_address=None, **kwargs):
        if scheduler_file:
            cfg = json_load_robust(scheduler_file)
            self.scheduler_addr = cfg['address']
        elif scheduler_ip is None and dask.config.get('scheduler-address'):
            self.scheduler_addr = dask.config.get('scheduler-address')
        elif scheduler_port is None:
            self.scheduler_addr = coerce_to_address(scheduler_ip)
        else:
            self.scheduler_addr = coerce_to_address((scheduler_ip, scheduler_port))
        self._given_worker_port = worker_port
        self.ncores = ncores or _ncores
        self.reconnect = reconnect
        self.validate = validate
        self.resources = resources
        self.death_timeout = death_timeout
        self.preload = preload
        self.preload_argv = preload_argv

        self.contact_address = contact_address
        self.memory_terminate_fraction = dask.config.get('distributed.worker.memory.terminate')

        self.security = security or Security()
        assert isinstance(self.security, Security)
        self.connection_args = self.security.get_connection_args('worker')
        self.listen_args = self.security.get_listen_args('worker')

        self.local_dir = local_dir

        self.loop = loop or IOLoop.current()
        self.scheduler = rpc(self.scheduler_addr, connection_args=self.connection_args)
        self.services = services
        self.name = name
        self.quiet = quiet
        self.auto_restart = True

        self.memory_limit = parse_memory_limit(memory_limit, self.ncores)

        if silence_logs:
            silence_logging(level=silence_logs)
        self.silence_logs = silence_logs

        handlers = {'instantiate': self.instantiate,
                    'kill': self.kill,
                    'restart': self.restart,
                    # cannot call it 'close' on the rpc side for naming conflict
                    'terminate': self._close,
                    'run': self.run}

        super(Nanny, self).__init__(handlers, io_loop=self.loop,
                                    connection_args=self.connection_args,
                                    **kwargs)

        if self.memory_limit:
            pc = PeriodicCallback(self.memory_monitor, 100, io_loop=self.loop)
            self.periodic_callbacks['memory'] = pc

        self._listen_address = listen_address
        self.status = 'init'
コード例 #32
0
ファイル: base.py プロジェクト: whoget/nbviewer
 def initialize(self, **kwargs):
     super(RenderingHandler, self).initialize(**kwargs)
     loop = IOLoop.current()
     if self.render_timeout:
         self.slow_timeout = loop.add_timeout(
             loop.time() + self.render_timeout, self.finish_early)
コード例 #33
0
ファイル: power.py プロジェクト: panik988/moonraker
 def run_power_changed_action(self):
     if self.state == "on" and self.klipper_restart:
         ioloop = IOLoop.current()
         klippy_apis = self.server.lookup_component("klippy_apis")
         ioloop.call_later(self.restart_delay, klippy_apis.do_restart,
                           "FIRMWARE_RESTART")
コード例 #34
0
ファイル: netutil.py プロジェクト: CzaOrz/sourceCodeLearning
def add_accept_handler(
        sock: socket.socket, callback: Callable[[socket.socket, Any],
                                                None]) -> Callable[[], None]:
    """Adds an `.IOLoop` event handler to accept new connections on ``sock``.

    When a connection is accepted, ``callback(connection, address)`` will
    be run (``connection`` is a socket object, and ``address`` is the
    address of the other end of the connection).  Note that this signature
    is different from the ``callback(fd, events)`` signature used for
    `.IOLoop` handlers.

    A callable is returned which, when called, will remove the `.IOLoop`
    event handler and stop processing further incoming connections.

    .. versionchanged:: 5.0
       The ``io_loop`` argument (deprecated since version 4.1) has been removed.

    .. versionchanged:: 5.0
       A callable is returned (``None`` was returned before).
    """
    io_loop = IOLoop.current()
    removed = [False]

    def accept_handler(fd: socket.socket, events: int) -> None:
        # More connections may come in while we're handling callbacks;
        # to prevent starvation of other tasks we must limit the number
        # of connections we accept at a time.  Ideally we would accept
        # up to the number of connections that were waiting when we
        # entered this method, but this information is not available
        # (and rearranging this method to call accept() as many times
        # as possible before running any callbacks would have adverse
        # effects on load balancing in multiprocess configurations).
        # Instead, we use the (default) listen backlog as a rough
        # heuristic for the number of connections we can reasonably
        # accept at once.
        for i in range(_DEFAULT_BACKLOG):
            if removed[0]:
                # The socket was probably closed
                return
            try:
                connection, address = sock.accept()
            except socket.error as e:
                # _ERRNO_WOULDBLOCK indicate we have accepted every
                # connection that is available.
                if errno_from_exception(e) in _ERRNO_WOULDBLOCK:
                    return
                # ECONNABORTED indicates that there was a connection
                # but it was closed while still in the accept queue.
                # (observed on FreeBSD).
                if errno_from_exception(e) == errno.ECONNABORTED:
                    continue
                raise
            set_close_exec(connection.fileno())
            callback(connection, address)

    def remove_handler() -> None:
        io_loop.remove_handler(sock)
        removed[0] = True

    """
    BaseAsyncIOLoop里面的方法
    实际调用的是asyncio里面, selector_events -> BaseSelectorEventLoop -> add_reader 方法
    将accept_handler与后面的参数, 打包为一个events.Handle(callback, args, self, None) 对象
    然后以fd注册, 也就是selectors.register方法
    self._selector.register(fd, selectors.EVENT_READ,
                                    (handle, None))
    
    所以此处就算服务正式挂起了. 每当有一个conn连接建立, 都会调用accept_handler, 用于sock.accept接受新连接
    然后调用回调函数执行新连接
    """
    io_loop.add_handler(sock, accept_handler, IOLoop.READ)
    return remove_handler
コード例 #35
0
            scheduler = IOLoopScheduler(IOLoop.current())
            self.messages = Subject()
            only_messages = self.messages.filter(lambda msg: msg[
                0] == 'message').map(lambda msg: msg[1]).publish()
            only_messages.subscribe(lambda msg: print(msg))
            only_messages.connect()
            self._app = Application([
                (r'/exchange', ExchangeHandler),
                (r'/', MainHandler),
            ])

    def start(self):
        self._app.listen(8888)

    instance = None

    def __init__(self):
        if Server.instance is None:
            Server.instance = Server.__Server()

    def __getattr__(self, item):
        return getattr(self.instance, item)


if __name__ == '__main__':
    Server().messages.subscribe(lambda msg: print('Received: {}'.format(msg)))
    Server().messages.filter(lambda msg: msg == 'opened').subscribe(
        lambda msg: print('Connection has been opened'))
    Server().start()
    IOLoop.current().start()
コード例 #36
0
 def get(self):
     self.stream = self.detach()
     IOLoop.current().spawn_callback(self.write_response)
コード例 #37
0
ファイル: data_mds.py プロジェクト: srsuper/star_yuuki_bot
class PythonMDS:
    switch_data = {}

    # Main
    app = Application([('/', IndexHandler)])
    server = HTTPServer(app)
    async_lock = IOLoop.current()

    def __init__(self):
        _work["UPT"] = self._update
        _work["DEL"] = self._delete
        _work["GET"] = self._query
        _work["SYC"] = self._sync
        _work["YLD"] = self._yuuki_limit_decrease
        _work["EXT"] = self._shutdown

    def _query(self, data):
        query_data = data["path"]
        if type(self.switch_data) is dict and type(query_data) is list:
            result = self.switch_data
            query_len = len(query_data) - 1
            for count, key in enumerate(query_data):
                if key in result:
                    if count < query_len:
                        if type(result.get(key)) is not dict:
                            result = 1  # "unknown_type" + type(source_data.get(key))
                            break
                    result = result.get(key)
                else:
                    result = 2  # "unknown_key"
                    break

            return {"status": 200, "data": result}
        return {"status": 400}

    def _update(self, data):
        if type(data["path"]) is list:
            over = self._query({"path": data["path"]})
            over.get("data").update(data["data"])
            return {"status": 200}
        return {"status": 400}

    def _delete(self, data):
        if type(data["path"]) is list:
            over = self._query({"path": data["path"]})
            over.get("data").pop(data["data"])
            return {"status": 200}
        return {"status": 400}

    def _sync(self, data):
        self.switch_data = data["path"]
        return {"status": 200}

    def _yuuki_limit_decrease(self, data):
        self.switch_data["LimitInfo"][data["path"]][data["data"]] -= 1
        return {"status": 200}

    def _shutdown(self, data):
        if data:
            pass
        self.server.stop()
        yield True
        self.async_lock.stop()
        self.async_lock.close()

    def mds_listen(self, code):
        global auth_code
        auth_code = code
        self.server.listen(2019)
        self.async_lock.start()
コード例 #38
0
ファイル: gen_test.py プロジェクト: xingleilee/tornado
 def prepare(self):
     yield gen.Task(IOLoop.current().add_callback)
     raise HTTPError(403)
コード例 #39
0
ファイル: gen_test.py プロジェクト: xingleilee/tornado
 def prepare(self):
     self.chunks = []
     yield gen.Task(IOLoop.current().add_callback)
     self.chunks.append('1')
コード例 #40
0
from tornado.web import gen
import async_requests


# == 生成短链接服务 ==
@gen.coroutine
def tiny_url(long_url):
    shorten_service_url = "http://dwz.cn/create.php"
    my_link = {"url": long_url}
    resp_obj = yield async_requests.session("POST",
                                            shorten_service_url,
                                            callback=None,
                                            data=my_link)
    resp = json.loads(resp_obj.body)
    if resp['status'] == 0:
        raise gen.Return(resp["tinyurl"])
    else:
        logging.warning('Shorten url=[%s] error, msg=[%s]' %
                        (long_url, resp["err_msg"]))
        raise gen.Return(None)


if __name__ == '__main__':
    from tornado.ioloop import IOLoop
    from functools import partial

    f = partial(
        tiny_url,
        'http://cha.123feng.com/sllsdjalsldlakdfaksflkasjfas;fklasjf;alskjfl')
    ret = IOLoop.current().run_sync(f)
    print("tiny_url: %s" % (ret))
コード例 #41
0
ファイル: tornado.py プロジェクト: zuohd/TorMySQL
def current_ioloop():
    return IOLoop.current()
コード例 #42
0
ファイル: gen_test.py プロジェクト: xingleilee/tornado
 def get(self):
     self.chunks.append('2')
     yield gen.Task(IOLoop.current().add_callback)
     self.chunks.append('3')
     yield gen.Task(IOLoop.current().add_callback)
     self.write(''.join(self.chunks))
コード例 #43
0
ファイル: __init__.py プロジェクト: xshiyun/forsun
 def stop(self):
     IOLoop.current().add_callback(lambda: IOLoop.current().stop())
     logging.info("server stoping")
コード例 #44
0
def fwrap(gf, ioloop=None):
    f = Future()
    if ioloop is None:
        ioloop = IOLoop.current()
    gf.add_done_callback(lambda _: ioloop.add_callback(_fwrap, f, gf))
    return f
コード例 #45
0
ファイル: sentry_error.py プロジェクト: caowenbin08/frontik
 def finish(self, chunk=None):
     # delay page finish to make sure that sentry mock got the exception
     self.add_timeout(IOLoop.current().time() + 1.0,
                      partial(super(Page, self).finish, chunk))
コード例 #46
0
ファイル: server.py プロジェクト: zero88/social-crawler
 def start(self):
     http_server = HTTPServer(WSGIContainer(self.app))
     http_server.listen(self.port)
     ioloop = IOLoop.current()
     self.__setPing__(ioloop, timedelta(seconds=2))
     ioloop.start()
コード例 #47
0
ファイル: runner.py プロジェクト: Tubbz-alt/couchpotato-1
def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, Env = None, desktop = None):

    try:
        locale.setlocale(locale.LC_ALL, "")
        encoding = locale.getpreferredencoding()
    except (locale.Error, IOError):
        encoding = None

    # for OSes that are poorly configured I'll just force UTF-8
    if not encoding or encoding in ('ANSI_X3.4-1968', 'US-ASCII', 'ASCII'):
        encoding = 'UTF-8'

    Env.set('encoding', encoding)

    # Do db stuff
    db_path = sp(os.path.join(data_dir, 'database'))

    # Check if database exists
    db = SuperThreadSafeDatabase(db_path)
    db_exists = db.exists()
    if db_exists:

        # Backup before start and cleanup old backups
        backup_path = sp(os.path.join(data_dir, 'db_backup'))
        backup_count = 5
        existing_backups = []
        if not os.path.isdir(backup_path): os.makedirs(backup_path)

        for root, dirs, files in os.walk(backup_path):
            for backup_file in sorted(files):
                ints = re.findall('\d+', backup_file)

                # Delete non zip files
                if len(ints) != 1:
                    os.remove(os.path.join(backup_path, backup_file))
                else:
                    existing_backups.append((int(ints[0]), backup_file))

        # Remove all but the last 5
        for eb in existing_backups[:-backup_count]:
            os.remove(os.path.join(backup_path, eb[1]))

        # Create new backup
        new_backup = sp(os.path.join(backup_path, '%s.tar.gz' % int(time.time())))
        zipf = tarfile.open(new_backup, 'w:gz')
        for root, dirs, files in os.walk(db_path):
            for zfilename in files:
                zipf.add(os.path.join(root, zfilename), arcname = 'database/%s' % os.path.join(root[len(db_path) + 1:], zfilename))
        zipf.close()

        # Open last
        db.open()

    else:
        db.create()

    # Force creation of cachedir
    log_dir = sp(log_dir)
    cache_dir = sp(os.path.join(data_dir, 'cache'))
    python_cache = sp(os.path.join(cache_dir, 'python'))

    if not os.path.exists(cache_dir):
        os.mkdir(cache_dir)
    if not os.path.exists(python_cache):
        os.mkdir(python_cache)

    # Register environment settings
    Env.set('app_dir', sp(base_path))
    Env.set('data_dir', sp(data_dir))
    Env.set('log_path', sp(os.path.join(log_dir, 'CouchPotato.log')))
    Env.set('db', db)
    Env.set('http_opener', requests.Session())
    Env.set('cache_dir', cache_dir)
    Env.set('cache', FileSystemCache(python_cache))
    Env.set('console_log', options.console_log)
    Env.set('quiet', options.quiet)
    Env.set('desktop', desktop)
    Env.set('daemonized', options.daemon)
    Env.set('args', args)
    Env.set('options', options)

    # Determine debug
    debug = options.debug or Env.setting('debug', default = False, type = 'bool')
    Env.set('debug', debug)

    # Development
    development = Env.setting('development', default = False, type = 'bool')
    Env.set('dev', development)

    # Disable logging for some modules
    for logger_name in ['enzyme', 'guessit', 'subliminal', 'apscheduler', 'tornado', 'requests']:
        logging.getLogger(logger_name).setLevel(logging.ERROR)

    for logger_name in ['gntp']:
        logging.getLogger(logger_name).setLevel(logging.WARNING)

    # Use reloader
    reloader = debug is True and development and not Env.get('desktop') and not options.daemon

    # Logger
    logger = logging.getLogger()
    formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%m-%d %H:%M:%S')
    level = logging.DEBUG if debug else logging.INFO
    logger.setLevel(level)
    logging.addLevelName(19, 'INFO')

    # To screen
    if (debug or options.console_log) and not options.quiet and not options.daemon:
        hdlr = logging.StreamHandler(sys.stderr)
        hdlr.setFormatter(formatter)
        logger.addHandler(hdlr)

    # To file
    hdlr2 = handlers.RotatingFileHandler(Env.get('log_path'), 'a', 500000, 10, encoding = Env.get('encoding'))
    hdlr2.setFormatter(formatter)
    logger.addHandler(hdlr2)

    # Start logging & enable colors
    # noinspection PyUnresolvedReferences
    import color_logs
    from couchpotato.core.logger import CPLog
    log = CPLog(__name__)
    log.debug('Started with options %s', options)

    def customwarn(message, category, filename, lineno, file = None, line = None):
        log.warning('%s %s %s line:%s', (category, message, filename, lineno))
    warnings.showwarning = customwarn

    # Create app
    from couchpotato import WebHandler
    web_base = ('/' + Env.setting('url_base').lstrip('/') + '/') if Env.setting('url_base') else '/'
    Env.set('web_base', web_base)

    api_key = Env.setting('api_key')
    if not api_key:
        api_key = uuid4().hex
        Env.setting('api_key', value = api_key)

    api_base = r'%sapi/%s/' % (web_base, api_key)
    Env.set('api_base', api_base)

    # Basic config
    host = Env.setting('host', default = '0.0.0.0')
    # app.debug = development
    config = {
        'use_reloader': reloader,
        'port': tryInt(Env.setting('port', default = 5050)),
        'host': host if host and len(host) > 0 else '0.0.0.0',
        'ssl_cert': Env.setting('ssl_cert', default = None),
        'ssl_key': Env.setting('ssl_key', default = None),
    }

    # Load the app
    application = Application(
        [],
        log_function = lambda x: None,
        debug = config['use_reloader'],
        gzip = True,
        cookie_secret = api_key,
        login_url = '%slogin/' % web_base,
    )
    Env.set('app', application)

    # Request handlers
    application.add_handlers(".*$", [
        (r'%snonblock/(.*)(/?)' % api_base, NonBlockHandler),

        # API handlers
        (r'%s(.*)(/?)' % api_base, ApiHandler),  # Main API handler
        (r'%sgetkey(/?)' % web_base, KeyHandler),  # Get API key
        (r'%s' % api_base, RedirectHandler, {"url": web_base + 'docs/'}),  # API docs

        # Login handlers
        (r'%slogin(/?)' % web_base, LoginHandler),
        (r'%slogout(/?)' % web_base, LogoutHandler),

        # Catch all webhandlers
        (r'%s(.*)(/?)' % web_base, WebHandler),
        (r'(.*)', WebHandler),
    ])

    # Static paths
    static_path = '%sstatic/' % web_base
    for dir_name in ['fonts', 'images', 'scripts', 'style']:
        application.add_handlers(".*$", [
            ('%s%s/(.*)' % (static_path, dir_name), StaticFileHandler, {'path': sp(os.path.join(base_path, 'couchpotato', 'static', dir_name))})
        ])
    Env.set('static_path', static_path)

    # Load configs & plugins
    loader = Env.get('loader')
    loader.preload(root = sp(base_path))
    loader.run()

    # Fill database with needed stuff
    fireEvent('database.setup')
    if not db_exists:
        fireEvent('app.initialize', in_order = True)
    fireEvent('app.migrate')

    # Go go go!
    from tornado.ioloop import IOLoop
    from tornado.autoreload import add_reload_hook
    loop = IOLoop.current()

    # Reload hook
    def test():
        fireEvent('app.shutdown')
    add_reload_hook(test)

    # Some logging and fire load event
    try: log.info('Starting server on port %(port)s', config)
    except: pass
    fireEventAsync('app.load')

    if config['ssl_cert'] and config['ssl_key']:
        server = HTTPServer(application, no_keep_alive = True, ssl_options = {
            'certfile': config['ssl_cert'],
            'keyfile': config['ssl_key'],
        })
    else:
        server = HTTPServer(application, no_keep_alive = True)

    try_restart = True
    restart_tries = 5

    while try_restart:
        try:
            server.listen(config['port'], config['host'])
            loop.start()
        except Exception as e:
            log.error('Failed starting: %s', traceback.format_exc())
            try:
                nr, msg = e
                if nr == 48:
                    log.info('Port (%s) needed for CouchPotato is already in use, try %s more time after few seconds', (config.get('port'), restart_tries))
                    time.sleep(1)
                    restart_tries -= 1

                    if restart_tries > 0:
                        continue
                    else:
                        return
            except:
                pass

            raise

        try_restart = False
コード例 #48
0
ファイル: worker.py プロジェクト: x140y40/anger6Spider
         "anger6Spider.spiders.jd_spiders.Jd_BaseSpider"),
        (r"^http://item\.jd\.com.*",
         "anger6Spider.spiders.jd_spiders.Jd_Item_Spider"),
    ], **app_settings)

    cocurrency = 20

    from anger6Spider.spiderQueue.redisqueue import RedisQueue
    queue = RedisQueue(**settings)
    queue._create_redis_cli()
    #yield queue.put("http://www.jianshu.com")
    #yield queue.put("http://www.jd.com")
    #yield queue.put("http://www.ivsky.com")
    #yield queue.put("http://www.jd.com")

    workers = []
    for _ in range(cocurrency):
        workers.append(Worker(app, queue))

    for worker in workers:
        Log4Spider.debugLog("worker begin:", worker)
        worker.run()

    Log4Spider.debugLog("waitiing for spiderQueue empty:")
    yield queue.join(None)
    Log4Spider.debugLog("main done!")


if __name__ == "__main__":
    IOLoop.current().instance().run_sync(main)
コード例 #49
0
ファイル: handlers.py プロジェクト: fred521/jupyter_server
 def on_open(self, kernel_id, message_callback, **kwargs):
     """Web socket connection open against gateway server."""
     self._connect(kernel_id)
     loop = IOLoop.current()
     loop.add_future(self.ws_future,
                     lambda future: self._read_messages(message_callback))
コード例 #50
0
ファイル: main.py プロジェクト: newguangzhou/now-hello
            if end is not None:
                end_time = utils.str2datetime(end, "%Y-%m-%d %H:%M:%S")
        except Exception as e:
            self.write("arg error ")
            return

        op_ret = yield op_log_dao.get_log_info(start_time, end_time, imei,
                                               ("imei", "content", "log_time"))
        ret = "<html>"
        for item in op_ret:
            ret += " 【log_time】:%s 【imei】:%s 【content】:%s <br><br>" % (
                utils.date2str(
                    item["log_time"]), item["imei"], item["content"])
            #ret
        ret += "</html>"
        self.write(ret)


if __name__ == '__main__':
    tornado.options.options.logging = "debug"
    tornado.options.parse_command_line()
    IOLoop.current().run_sync(_async_init)
    webapp = Application(
        [
            (r"/op_log", GetOpLogHandler),
        ],
        op_log_dao=OPLogDAO.new(mongo_meta=mongo_conf.op_log_mongo_meta),
    )
    webapp.listen(listen_port)
    IOLoop.current().start()
コード例 #51
0
ファイル: fds.py プロジェクト: edwardjkim/stress-proxy
def start_echo(host, name, n):
    loop = IOLoop.current()
    url = 'ws://%s:8000/%s/ws' % (host, name)
    echo(url, n)
    loop.add_timeout(loop.time() + 0.25, start_echo, host, name, n + 1)
コード例 #52
0
ファイル: netutil.py プロジェクト: yufi113/tornado
 def resolve(self, host, port, family=socket.AF_UNSPEC):
     result = yield IOLoop.current().run_in_executor(
         None, _resolve_addr, host, port, family)
     raise gen.Return(result)
コード例 #53
0
ファイル: app.py プロジェクト: kinow/tornado-sandbox
 def open(self, *args, **kwargs):
     IOLoop.current().spawn_callback(self.subscription_server.handle, self)
コード例 #54
0
ファイル: fds.py プロジェクト: edwardjkim/stress-proxy
def sleep(t):
    loop = IOLoop.current()
    return Task(loop.add_timeout, loop.time() + t)
コード例 #55
0
ファイル: main.py プロジェクト: JinkelaCrops/tornado-learning
            item = yield q.get()
            print('Doing work on %s' % item)
            yield gen.sleep(0.5)


@gen.coroutine
def producer():
    for item in range(5):
        yield q.put(item)
        print('Put %s' % item)
    for item in range(5):
        yield q.put(item)
        print('Put %s' % item)


for item in range(5):
    q.put(item)


@gen.coroutine
def main():
    # Start consumer without waiting (since it never finishes).
    IOLoop.current().spawn_callback(consumer)
    # yield producer()  # Wait for producer to put all tasks.
    print("abc")
    yield q.join()  # Wait for consumer to finish all tasks.
    print('Done')


IOLoop.current().run_sync(main)
コード例 #56
0
ファイル: core.py プロジェクト: yadav-avneesh/distributed
    def __init__(
        self,
        handlers,
        blocked_handlers=None,
        stream_handlers=None,
        connection_limit=512,
        deserialize=True,
        io_loop=None,
    ):
        self.handlers = {
            "identity": self.identity,
            "connection_stream": self.handle_stream,
        }
        self.handlers.update(handlers)
        if blocked_handlers is None:
            blocked_handlers = dask.config.get(
                "distributed.%s.blocked-handlers" %
                type(self).__name__.lower(), [])
        self.blocked_handlers = blocked_handlers
        self.stream_handlers = {}
        self.stream_handlers.update(stream_handlers or {})

        self.id = type(self).__name__ + "-" + str(uuid.uuid4())
        self._address = None
        self._listen_address = None
        self._port = None
        self._comms = {}
        self.deserialize = deserialize
        self.monitor = SystemMonitor()
        self.counters = None
        self.digests = None
        self.events = None
        self.event_counts = None
        self._ongoing_coroutines = weakref.WeakSet()
        self._event_finished = Event()

        self.listener = None
        self.io_loop = io_loop or IOLoop.current()
        self.loop = self.io_loop

        if not hasattr(self.io_loop, "profile"):
            ref = weakref.ref(self.io_loop)

            if hasattr(self.io_loop, "asyncio_loop"):

                def stop():
                    loop = ref()
                    return loop is None or loop.asyncio_loop.is_closed()

            else:

                def stop():
                    loop = ref()
                    return loop is None or loop._closing

            self.io_loop.profile = profile.watch(
                omit=("profile.py", "selectors.py"),
                interval=dask.config.get(
                    "distributed.worker.profile.interval"),
                cycle=dask.config.get("distributed.worker.profile.cycle"),
                stop=stop,
            )

        # Statistics counters for various events
        with ignoring(ImportError):
            from .counter import Digest

            self.digests = defaultdict(partial(Digest, loop=self.io_loop))

        from .counter import Counter

        self.counters = defaultdict(partial(Counter, loop=self.io_loop))
        self.events = defaultdict(lambda: deque(maxlen=10000))
        self.event_counts = defaultdict(lambda: 0)

        self.periodic_callbacks = dict()

        pc = PeriodicCallback(self.monitor.update, 500, io_loop=self.io_loop)
        self.periodic_callbacks["monitor"] = pc

        self._last_tick = time()
        pc = PeriodicCallback(
            self._measure_tick,
            parse_timedelta(dask.config.get("distributed.admin.tick.interval"),
                            default="ms") * 1000,
            io_loop=self.io_loop,
        )
        self.periodic_callbacks["tick"] = pc

        self.thread_id = 0

        def set_thread_ident():
            self.thread_id = threading.get_ident()

        self.io_loop.add_callback(set_thread_ident)

        self.__stopped = False
コード例 #57
0
def main(
    scheduler,
    host,
    nthreads,
    name,
    memory_limit,
    device_memory_limit,
    pid_file,
    resources,
    dashboard,
    dashboard_address,
    local_directory,
    scheduler_file,
    interface,
    death_timeout,
    preload,
    dashboard_prefix,
    tls_ca_file,
    tls_cert,
    tls_key,
    **kwargs,
):
    enable_proctitle_on_current()
    enable_proctitle_on_children()

    sec = Security(tls_ca_file=tls_ca_file,
                   tls_worker_cert=tls_cert,
                   tls_worker_key=tls_key)

    try:
        nprocs = len(os.environ["CUDA_VISIBLE_DEVICES"].split(","))
    except KeyError:
        nprocs = get_n_gpus()

    if not nthreads:
        nthreads = min(1, multiprocessing.cpu_count() // nprocs)

    if pid_file:
        with open(pid_file, "w") as f:
            f.write(str(os.getpid()))

        def del_pid_file():
            if os.path.exists(pid_file):
                os.remove(pid_file)

        atexit.register(del_pid_file)

    services = {}

    if dashboard:
        try:
            from distributed.dashboard import BokehWorker
        except ImportError:
            pass
        else:
            if dashboard_prefix:
                result = (BokehWorker, {"prefix": dashboard_prefix})
            else:
                result = BokehWorker
            services[("dashboard", dashboard_address)] = result

    if resources:
        resources = resources.replace(",", " ").split()
        resources = dict(pair.split("=") for pair in resources)
        resources = valmap(float, resources)
    else:
        resources = None

    loop = IOLoop.current()

    preload_argv = kwargs.get("preload_argv", [])
    kwargs = {"worker_port": None, "listen_address": None}
    t = Nanny

    if not scheduler and not scheduler_file and "scheduler-address" not in config:
        raise ValueError("Need to provide scheduler address like\n"
                         "dask-worker SCHEDULER_ADDRESS:8786")

    if interface:
        if host:
            raise ValueError("Can not specify both interface and host")
        else:
            host = get_ip_interface(interface)

    nannies = [
        t(
            scheduler,
            scheduler_file=scheduler_file,
            nthreads=nthreads,
            services=services,
            loop=loop,
            resources=resources,
            memory_limit=memory_limit,
            host=host,
            preload=(list(preload) or []) + ["dask_cuda.initialize"],
            preload_argv=(list(preload_argv) or []) +
            ["--create-cuda-context"],
            security=sec,
            env={"CUDA_VISIBLE_DEVICES": cuda_visible_devices(i)},
            plugins={CPUAffinity(get_cpu_affinity(i))},
            name=name if nprocs == 1 or not name else name + "-" + str(i),
            local_directory=local_directory,
            data=(
                DeviceHostFile,
                {
                    "device_memory_limit":
                    get_device_total_memory(index=i) if
                    (device_memory_limit == "auto" or device_memory_limit
                     == int(0)) else parse_bytes(device_memory_limit),
                    "memory_limit":
                    parse_memory_limit(memory_limit,
                                       nthreads,
                                       total_cores=nprocs),
                    "local_directory":
                    local_directory,
                },
            ),
            **kwargs,
        ) for i in range(nprocs)
    ]

    @gen.coroutine
    def close_all():
        # Unregister all workers from scheduler
        yield [n._close(timeout=2) for n in nannies]

    def on_signal(signum):
        logger.info("Exiting on signal %d", signum)
        close_all()

    @gen.coroutine
    def run():
        yield nannies
        yield [n.finished() for n in nannies]

    install_signal_handlers(loop, cleanup=on_signal)

    try:
        loop.run_sync(run)
    except (KeyboardInterrupt, TimeoutError):
        pass
    finally:
        logger.info("End worker")
コード例 #58
0
 def start_rest_app(self):
     app = self.make_rest_app()
     app.listen(REST_PORT)
     log.info(
         "REST worker started and listening to port {}".format(REST_PORT))
     IOLoop.current().start()
コード例 #59
0
 def _do_heartbeat(self):
     """ 服务器心跳
     """
     from tbag.core.heartbeat import heartbeat
     logger.info('Heartbeat started...')
     IOLoop.current().call_later(3, heartbeat.start)
コード例 #60
0
ファイル: hardware.py プロジェクト: M0WUT/wspr-1
 def poll_serial(self):
     while True:
         data = self.serial.readline()
         IO.current().add_callback(self.route_command, data)