def test_unused_connection(self): stream = yield self.connect() event = Event() stream.set_close_callback(event.set) yield event.wait()
def __init__(self): self.event = Event()
def get(self): self.flush() never_finish = Event() yield never_finish.wait()
def __init__(self, rpc_method, params): self.id = id(self) self.rpc_method = rpc_method self.params = params self._event = Event() self.response = None
def __init__(self): self._mutex = Event() self._blocked = count() self._building_log = [] self._exception = None
def disconnect_tcp_socket(self): self.debugpy_stream.socket.disconnect(self._get_endpoint()) self.routing_id = None self.init_event = Event() self.init_event_seq = -1 self.wait_for_attach = True
def __init__( self, handlers, blocked_handlers=None, stream_handlers=None, connection_limit=512, deserialize=True, io_loop=None, ): self.handlers = { "identity": self.identity, "connection_stream": self.handle_stream, } self.handlers.update(handlers) if blocked_handlers is None: blocked_handlers = dask.config.get( "distributed.%s.blocked-handlers" % type(self).__name__.lower(), []) self.blocked_handlers = blocked_handlers self.stream_handlers = {} self.stream_handlers.update(stream_handlers or {}) self.id = type(self).__name__ + "-" + str(uuid.uuid4()) self._address = None self._listen_address = None self._port = None self._comms = {} self.deserialize = deserialize self.monitor = SystemMonitor() self.counters = None self.digests = None self.events = None self.event_counts = None self._ongoing_coroutines = weakref.WeakSet() self._event_finished = Event() self.listener = None self.io_loop = io_loop or IOLoop.current() self.loop = self.io_loop if not hasattr(self.io_loop, "profile"): ref = weakref.ref(self.io_loop) if hasattr(self.io_loop, "asyncio_loop"): def stop(): loop = ref() return loop is None or loop.asyncio_loop.is_closed() else: def stop(): loop = ref() return loop is None or loop._closing self.io_loop.profile = profile.watch( omit=("profile.py", "selectors.py"), interval=dask.config.get( "distributed.worker.profile.interval"), cycle=dask.config.get("distributed.worker.profile.cycle"), stop=stop, ) # Statistics counters for various events with ignoring(ImportError): from .counter import Digest self.digests = defaultdict(partial(Digest, loop=self.io_loop)) from .counter import Counter self.counters = defaultdict(partial(Counter, loop=self.io_loop)) self.events = defaultdict(lambda: deque(maxlen=10000)) self.event_counts = defaultdict(lambda: 0) self.periodic_callbacks = dict() pc = PeriodicCallback(self.monitor.update, 500, io_loop=self.io_loop) self.periodic_callbacks["monitor"] = pc self._last_tick = time() pc = PeriodicCallback( self._measure_tick, parse_timedelta(dask.config.get("distributed.admin.tick.interval"), default="ms") * 1000, io_loop=self.io_loop, ) self.periodic_callbacks["tick"] = pc self.thread_id = 0 def set_thread_ident(): self.thread_id = threading.get_ident() self.io_loop.add_callback(set_thread_ident) self.__stopped = False
def __init__(self, pubnub): self._cancelled_event = Event() super(TornadoReconnectionManager, self).__init__(pubnub)
def _restart(self): self._send_to_scheduler({'op': 'restart'}) self._restart_event = Event() yield self._restart_event.wait() raise gen.Return(self)
def __init__(self, scheduler=None, name=None): self.address = getattr(self, "address", None) self.external_address = None self.lock = asyncio.Lock() self.status = "created" self._event_finished = Event()
def map(self, func, *iterables, **kwargs): """ Map a function on a sequence of arguments Arguments can be normal objects or Futures Parameters ---------- func: callable iterables: Iterables pure: bool (defaults to True) Whether or not the function is pure. Set ``pure=False`` for impure functions like ``np.random.random``. workers: set, iterable of sets A set of worker hostnames on which computations may be performed. Leave empty to default to all workers (common case) Examples -------- >>> L = executor.map(func, sequence) # doctest: +SKIP Returns ------- list of futures See also -------- distributed.executor.Executor.submit """ pure = kwargs.pop('pure', True) workers = kwargs.pop('workers', None) if not callable(func): raise TypeError("First input to map must be a callable function") iterables = [list(it) for it in iterables] if pure: keys = [ funcname(func) + '-' + tokenize(func, kwargs, *args) for args in zip(*iterables) ] else: uid = str(uuid.uuid4()) keys = [ funcname(func) + '-' + uid + '-' + next(tokens) for i in range(min(map(len, iterables))) ] if not kwargs: dsk = { key: (func, ) + tuple(map(quote, args)) for key, args in zip(keys, zip(*iterables)) } else: dsk = { key: (apply, func, args, kwargs) for key, args in zip(keys, zip(*iterables)) } for key in dsk: if key not in self.futures: self.futures[key] = {'event': Event(), 'status': 'waiting'} if isinstance(workers, (list, set)): if workers and isinstance(first(workers), (list, set)): if len(workers) != len(keys): raise ValueError("You only provided %d worker restrictions" " for a sequence of length %d" % (len(workers), len(keys))) restrictions = dict(zip(keys, workers)) else: restrictions = {key: workers for key in keys} elif workers is None: restrictions = {} else: raise TypeError("Workers must be a list or set of workers or None") logger.debug("map(%s, ...)", funcname(func)) self.scheduler_queue.put_nowait({ 'op': 'update-graph', 'dsk': dsk, 'keys': keys, 'restrictions': restrictions }) return [Future(key, self) for key in keys]
def submit(self, func, *args, **kwargs): """ Submit a function application to the scheduler Parameters ---------- func: callable *args: **kwargs: pure: bool (defaults to True) Whether or not the function is pure. Set ``pure=False`` for impure functions like ``np.random.random``. workers: set, iterable of sets A set of worker hostnames on which computations may be performed. Leave empty to default to all workers (common case) Examples -------- >>> c = executor.submit(add, a, b) # doctest: +SKIP Returns ------- Future See Also -------- distributed.executor.Executor.submit: """ if not callable(func): raise TypeError( "First input to submit must be a callable function") key = kwargs.pop('key', None) pure = kwargs.pop('pure', True) workers = kwargs.pop('workers', None) if key is None: if pure: key = funcname(func) + '-' + tokenize(func, kwargs, *args) else: key = funcname(func) + '-' + next(tokens) if key in self.futures: return Future(key, self) args = quote(args) if kwargs: task = (apply, func, args, kwargs) else: task = (func, ) + args if workers is not None: restrictions = {key: workers} else: restrictions = {} if key not in self.futures: self.futures[key] = {'event': Event(), 'status': 'waiting'} logger.debug("Submit %s(...), %s", funcname(func), key) self.scheduler_queue.put_nowait({ 'op': 'update-graph', 'dsk': { key: task }, 'keys': [key], 'restrictions': restrictions }) return Future(key, self)
#!urs/bin/env python #coding:utf-8 # Event:一个阻塞协程的事件直到它的内部标识设置为True。类似于threading.Event,协程 # 可以等待一个事件被设置,一旦它被设置,调用yield event.wait()将不会被阻塞除非该事件 # 已经被清除。 from tornado import gen from tornado.ioloop import IOLoop from tornado.locks import Event event = Event() @gen.coroutine def waiter(): print('Waiting for event') yield event.wait() print('Not waiting this time') yield event.wait() print('Done') @gen.coroutine def setter(): print('About to set the event') event.set() @gen.coroutine def runner(): yield [waiter(), setter()]
def __init__(self, idl): self.status = None self.txn = ovs.db.idl.Transaction(idl) self.event = Event()