def abort(self): """Close the running coroutine. This is the last resort for releasing resources acquired by the coroutine, not a part of normal task cleanup. One good place to call ``abort`` is when kernel is closing. """ if self._completed: return LOG.warning('abort task: %r', self) # ``close`` returns None on success, and raises RuntimeError # when the coroutine cannot be aborted. ASSERT.none(self._tick(self._coroutine.close)) if self._completed: if (isinstance(self._exception, RuntimeError) and str( self._exception) == 'coroutine ignored GeneratorExit'): LOG.warning('task cannot be aborted: %r', self) self._completed = False self._exception = None else: self._call_callbacks() else: self._completed = True self._exception = errors.Cancelled('task abort') self._call_callbacks()
def html(self, encoding=None, errors=None): """Parse response as an HTML document. Caller may pass ``encoding`` and ``errors`` to instructing us how to decode response content. This is useful because lxml's default is to **silently** skip the rest of the document when there is any encoding error in the middle. lxml's strict-but-silent policy is counterproductive because web is full of malformed documents, and it should either be lenient about the error, or raise it to the caller, not a mix of both as it is right now. """ if encoding and errors: string = self.content.decode(encoding=encoding, errors=errors) parser = _get_html_parser(None) else: ASSERT.none(errors) string = self.content parser = _get_html_parser(encoding or ASSERT.not_none(self.encoding)) # Check whether fromstring returns None because apparently # HTMLParser is more lenient than XMLParser and may cause # fromstring to return None on some malformed HTML input. return ASSERT.not_none(lxml.etree.fromstring(string, parser))
def __post_init__(self): validate_app_name(self.name) if self.service_section is None: ASSERT.not_empty(self.exec) ASSERT.in_(self.type, _SERVICE_TYPES) ASSERT.in_(self.kill_mode, _KILL_MODES) else: ASSERT.empty(self.exec) ASSERT.none(self.type) ASSERT.none(self.kill_mode)
def __enter__(self): # ``set_wakeup_fd`` can only be called from the main thread. ASSERT.is_(threading.current_thread(), threading.main_thread()) # Disallow nested use; ``SignalSource`` is a singleton and is # intended to be used as such. ASSERT.none(self._wakeup_fd) sock_r, self._sock_w = socket.socketpair() self._sock_r = adapters.SocketAdapter(sock_r) self._sock_w.setblocking(False) self._wakeup_fd = signal.set_wakeup_fd(self._sock_w.fileno()) return self
def __init__( self, cluster_stubs, executor=None, num_pools=0, num_connections_per_pool=0, ): ASSERT.not_empty(cluster_stubs) self._base_session = bases.BaseSession( executor=executor, num_pools=num_pools, num_connections_per_pool=num_connections_per_pool, ) self._cluster_stubs = cluster_stubs for cluster_stub in self._cluster_stubs: ASSERT.none(cluster_stub._base_session) cluster_stub._base_session = self._base_session
def run(self, awaitable=None, timeout=None): """Run spawned tasks through completion. If ``awaitable`` is not ``None``, a task is spawned for it, and when the task completes, ``run`` returns its result. If ``timeout`` is non-positive, ``run`` is guarantee to iterate exactly once. """ ASSERT.false(self._closed) self._assert_owner() ASSERT.none(self._current_task) # Disallow recursive calls. main_task = self.spawn(awaitable) if awaitable else None run_timer = timers.make(timeout) while self._num_tasks > 0: # Do sanity check every ``_sanity_check_frequency`` ticks. if self._num_ticks % self._sanity_check_frequency == 0: self._sanity_check() self._num_ticks += 1 # Fire callbacks posted by other threads. with self._callbacks_lock: callbacks, self._callbacks = \ self._callbacks, collections.deque() for callback in callbacks: callback() del callbacks # Run all ready tasks. with self._managing_async_generators(): while self._ready_tasks: completed_task = self._run_one_ready_task() if completed_task and completed_task is main_task: # Return the result eagerly. If you want to run # all remaining tasks through completion, just # call ``run`` again with no arguments. return completed_task.get_result_nonblocking() if self._num_tasks > 0: # Poll I/O. now = time.monotonic() poll_timeout = min( run_timer.get_timeout(), self._sleep_blocker.get_min_timeout(now), self._timeout_after_blocker.get_min_timeout(now), key=timers.timeout_to_key, ) can_read, can_write = self._poller.poll(poll_timeout) for fd in can_read: if self._nudger.is_nudged(fd): self._nudger.ack() else: self._trap_return(self._read_blocker, fd) for fd in can_write: self._trap_return(self._write_blocker, fd) # Handle any task timeout. now = time.monotonic() self._trap_return(self._sleep_blocker, now) self._timeout_after_on_completion(now) # Break if ``run`` times out. if run_timer.is_expired(): raise errors.KernelTimeout
def __enter__(self): ASSERT.none(self._proc) self._proc = subprocess.Popen(self._cmd, stdout=subprocess.PIPE) return self._proc.stdout
def one_or_none(connectable, statement): with executing(connectable, statement) as result: row = result.fetchone() ASSERT.none(result.fetchone()) return row