Ejemplo n.º 1
0
 def decode(self, pieces):
     ASSERT.false(self.eof)
     output = []
     for data in pieces:
         if data:
             self._decode(data, output)
     return output
Ejemplo n.º 2
0
    def poll(self, timeout):
        ASSERT.false(self._epoll.closed)

        with self._lock:
            if self._closed_fds:
                closed_fds, self._closed_fds = self._closed_fds, set()
                return closed_fds, closed_fds

        if timeout is None:
            pass
        elif timeout <= 0:
            timeout = 0
        else:
            # epoll_wait() has a resolution of 1 millisecond.
            timeout = math.ceil(timeout * 1e3) * 1e-3

        can_read = []
        can_write = []
        # Since Python 3.5, poll retries with a re-computed timeout
        # rather than raising InterruptedError (see PEP 475).
        for fd, events in self._epoll.poll(timeout=timeout):
            if events & self._EVENT_IN:
                can_read.append(fd)
            if events & self._EVENT_OUT:
                can_write.append(fd)

        return can_read, can_write
Ejemplo n.º 3
0
    def _decode(self, data, output):

        def move(n):
            """Move ``n`` bytes from ``data`` to ``output``."""
            nonlocal data
            ASSERT.greater_or_equal(self._chunk_remaining, n)
            output.append(data[:n])
            data = data[n:]
            self._chunk_remaining -= n

        def expect(pattern):
            """Drop ``pattern`` prefix from ``data``."""
            nonlocal data
            n = min(len(pattern), len(data))
            ASSERT.equal(pattern[:n], data[:n])
            data = data[n:]
            return n

        while data:
            if self._chunk_remaining > 0:
                move(min(self._chunk_remaining, len(data)))
                continue

            if self._chunk_remaining == 0:
                self._chunk_remaining -= expect(b'\r\n')
                continue

            if self._chunk_remaining == -1:
                self._chunk_remaining -= expect(b'\n')
                continue

            match = self._CRLF_PATTERN.search(data)
            if not match:
                self._append(data)
                match = self._CRLF_PATTERN.search(self._buffer[:self._pos])
                if not match:
                    break
                data = self._reset()

            chunk_size = data[:match.start()]
            if self._pos > 0:
                self._append(chunk_size)
                chunk_size = self._reset()
            # TODO: Handle parameters (stuff after ';').
            chunk_size = int(
                bytes(chunk_size).split(b';', maxsplit=1)[0],
                base=16,
            )
            if chunk_size == 0:
                # TODO: Handle trailers.
                self.eof = True
            else:
                ASSERT.false(self.eof)

            data = data[match.end():]
            self._chunk_remaining = chunk_size

        if self.eof:
            ASSERT.empty(data)
Ejemplo n.º 4
0
 def notify_close(self, fd):
     ASSERT.false(self._epoll.closed)
     with self._lock:
         self._closed_fds.add(fd)
     try:
         self._epoll.unregister(fd)
     except OSError as exc:
         if exc.errno != errno.EBADF:
             raise
Ejemplo n.º 5
0
def _parse_version(ps):
    ASSERT.false(ps['//bases:build-xar-image'])
    return Version(
        *map(
            int,
            ASSERT.not_none(_VERSION_PATTERN.search(ps['archive'].url))\
            .groups(),
        )
    )
Ejemplo n.º 6
0
    def cancel(self, task):
        """Cancel the task.

        This is a no-op is task has been completed.
        """
        ASSERT.false(self._closed)
        self._assert_owner()
        ASSERT.is_(task._kernel, self)
        if not task.is_completed():
            self._disrupt(task, errors.TaskCancellation)
Ejemplo n.º 7
0
 def timeout_after(self, task, duration):
     ASSERT.false(self._closed)
     self._assert_owner()
     ASSERT.is_(task._kernel, self)
     if duration is None:
         return lambda: None
     # Even if duration <= 0, the kernel should raise ``Timeout`` at
     # the next blocking trap for consistency (so, don't raise here).
     self._timeout_after_blocker.block(time.monotonic() + duration, task)
     return functools.partial(self._timeout_after_blocker.cancel, task)
Ejemplo n.º 8
0
 def spawn(self, awaitable):
     """Spawn a new task onto the kernel."""
     ASSERT.false(self._closed)
     self._assert_owner()
     if tasks.Task.is_coroutine(awaitable):
         coroutine = awaitable
     elif inspect.isawaitable(awaitable):
         coroutine = awaitable.__await__()
     else:
         coroutine = awaitable()
     task = tasks.Task(self, coroutine)
     self._ready_tasks.append(TaskReady(task, None, None))
     self._num_tasks += 1
     return task
Ejemplo n.º 9
0
    async def begin(self, status, headers):
        ASSERT.false(self._has_begun)
        self._has_begun = True

        buffer = io.BytesIO()
        buffer.write(
            b'HTTP/1.1 %d %s\r\n' % (status, self._ENCODED_REASONS[status])
        )
        for key, value in headers:
            buffer.write(b'%s: %s\r\n' % (key, value))
        buffer.write(b'\r\n')

        await self._send_all(buffer.getvalue())
        self._headers_sent.set()
Ejemplo n.º 10
0
    def err_after_commit(self, exc):
        """Record exception raised after commit.

        This first closes the response, dropping remaining body data,
        and then calls start_response with HTTP 5xx and exc_info.  If
        the WSGI server has not yet started sending response, it resets
        the response to HTTP 500; otherwise it re-raises the exception.
        """
        ASSERT.false(self.is_uncommitted())
        self._body.close(graceful=False)
        self._start_response(
            self._format_status(consts.Statuses.INTERNAL_SERVER_ERROR),
            [],
            (exc.__class__, exc, exc.__traceback__),
        )
Ejemplo n.º 11
0
 def build(parameters):
     src_path = _find_project(parameters, foreman.get_relpath())
     root_path = _find_root_project(src_path)
     ASSERT.false(src_path.samefile(root_path))
     output_path = src_path / ('build/libs/%s-all.jar' % src_path.name)
     task = ':'.join(src_path.relative_to(root_path).parts)
     task = ':%s:shadowJar' % task
     target_dir_path = parameters[root_project + ':packages']
     if (target_dir_path / output_path.name).exists():
         LOG.info('skip: run task %s', task)
         return
     LOG.info('run task %s', task)
     with scripts.using_cwd(root_path):
         scripts.run(['./gradlew', task])
     with scripts.using_sudo():
         scripts.mkdir(target_dir_path)
         scripts.cp(output_path, target_dir_path)
Ejemplo n.º 12
0
    def __init__(self, data=b'', *, msg_p=None):

        # In case ``__init__`` raises.
        self._msg_p = None

        ASSERT.isinstance(data, bytes)

        if msg_p is None:
            msg_p = _nng.nng_msg_p()
            errors.check(_nng.F.nng_msg_alloc(ctypes.byref(msg_p), len(data)))
            if data:
                ctypes.memmove(_nng.F.nng_msg_body(msg_p), data, len(data))

        else:
            # We are taking ownership of ``msg_p`` and should not take
            # any initial data.
            ASSERT.false(data)

        self._msg_p = msg_p
        self.header = Header(self._get)
        self.body = Body(self._get)
Ejemplo n.º 13
0
    def tick(self, trap_result, trap_exception):
        """Run coroutine through the next trap point.

        NOTE: ``tick`` catches ``BaseException`` raised from the
        coroutine.  As a result, ``SystemExit`` does not bubble up to
        the kernel event loop.  I believe this behavior is similar to
        Python threading library and thus more expected (``SystemExit``
        raised in non- main thread does not cause CPython process to
        exit).  If you want raising ``SystemExit`` in a task to be
        effective, you have to call ``Task.get_result_nonblocking`` in
        the main thread (or implicitly through ``Kernel.run``).
        """
        ASSERT.false(self._completed)
        if trap_exception:
            trap = self._tick(self._coroutine.throw, trap_exception)
        else:
            trap = self._tick(self._coroutine.send, trap_result)
        if trap is not None:
            return trap
        ASSERT.true(self._completed)
        self._call_callbacks()
        return None
Ejemplo n.º 14
0
    def start_response(self, status, response_headers, exc_info=None):
        if exc_info:
            try:
                if self._is_committed:
                    exc = exc_info[1]
                    if exc is None:
                        exc = exc_info[0]()
                    if exc.__traceback__ is not exc_info[2]:
                        exc.with_traceback(exc_info[2])
                    raise exc
            finally:
                exc_info = None  # Avoid dangling cyclic ref.
        else:
            ASSERT.false(self._is_committed)

        # Get the status code from status line like "200 OK".
        self._status = http.HTTPStatus(int(status.split(maxsplit=1)[0]))
        self._headers = [
            (name.encode('iso-8859-1'), value.encode('iso-8859-1'))
            for name, value in response_headers
        ]

        return self.write
Ejemplo n.º 15
0
    def _run_one_ready_task(self):

        task, trap_result, trap_exception = self._ready_tasks.popleft()

        override = self._to_raise.pop(task, None)
        if override is not None:
            trap_result = None
            trap_exception = override

        self._current_task = task
        try:
            trap = task.tick(trap_result, trap_exception)
        finally:
            self._current_task = None

        if trap is None:
            ASSERT.true(task.is_completed())
            self._trap_return(self._task_completion_blocker, task)
            # Clear disrupter.
            self._to_raise.pop(task, None)
            self._timeout_after_blocker.cancel(task)
            self._num_tasks -= 1
            return task

        ASSERT.false(task.is_completed())
        override = self._to_raise.pop(task, None)
        if override:
            self._ready_tasks.append(TaskReady(task, None, override))
        else:
            handler = self._blocking_trap_handlers[trap.kind]
            try:
                handler(task, trap)
            except Exception as exc:
                self._ready_tasks.append(TaskReady(task, None, exc))

        return None
Ejemplo n.º 16
0
 def notify_open(self, fd):
     ASSERT.false(self._epoll.closed)
     try:
         self._epoll.register(fd, self._EVENT_MASK)
     except FileExistsError:
         pass
Ejemplo n.º 17
0
            parser = _get_html_parser(encoding
                                      or ASSERT.not_none(self.encoding))
        # Check whether fromstring returns None because apparently
        # HTMLParser is more lenient than XMLParser and may cause
        # fromstring to return None on some malformed HTML input.
        return ASSERT.not_none(lxml.etree.fromstring(string, parser))

    def xml(self):
        """Parse response as an XML document."""
        return lxml.etree.fromstring(self.content, _XML_PARSER)


@functools.lru_cache(maxsize=8)
def _get_html_parser(encoding):
    return lxml.etree.HTMLParser(encoding=encoding)


_XML_PARSER = lxml.etree.XMLParser()

#
# Monkey-patch ``requests.Response``.
#

# Just to make sure we do not accidentally override them.
ASSERT.false(hasattr(requests.Response, 'recvfile'))
requests.Response.recvfile = recvfiles.recvfile
ASSERT.false(hasattr(requests.Response, 'html'))
requests.Response.html = Response.html
ASSERT.false(hasattr(requests.Response, 'xml'))
requests.Response.xml = Response.xml
Ejemplo n.º 18
0
 def block(self, source, task):
     """Record that ``task`` is joining on ``source`` task."""
     ASSERT.isinstance(source, tasks.Task)
     ASSERT.is_not(source, task)  # A task can't join on itself.
     ASSERT.false(source.is_completed())
     return super().block(source, task)
Ejemplo n.º 19
0
 def write_nonblocking(self, data):
     ASSERT.false(self._closed)
     self._gate.unblock()
     return self._buffer.write(data)
Ejemplo n.º 20
0
 def assert_keyless(self):
     ASSERT.false(self.is_keyed(), message='expect keyless schema')
Ejemplo n.º 21
0
 def set(self, value):
     ASSERT.false(self._have_been_read)
     self._value = self._validate(value)
Ejemplo n.º 22
0
    def run(self, awaitable=None, timeout=None):
        """Run spawned tasks through completion.

        If ``awaitable`` is not ``None``, a task is spawned for it, and
        when the task completes, ``run`` returns its result.

        If ``timeout`` is non-positive, ``run`` is guarantee to iterate
        exactly once.
        """
        ASSERT.false(self._closed)
        self._assert_owner()
        ASSERT.none(self._current_task)  # Disallow recursive calls.

        main_task = self.spawn(awaitable) if awaitable else None
        run_timer = timers.make(timeout)

        while self._num_tasks > 0:

            # Do sanity check every ``_sanity_check_frequency`` ticks.
            if self._num_ticks % self._sanity_check_frequency == 0:
                self._sanity_check()
            self._num_ticks += 1

            # Fire callbacks posted by other threads.
            with self._callbacks_lock:
                callbacks, self._callbacks = \
                    self._callbacks, collections.deque()
            for callback in callbacks:
                callback()
            del callbacks

            # Run all ready tasks.
            with self._managing_async_generators():
                while self._ready_tasks:
                    completed_task = self._run_one_ready_task()
                    if completed_task and completed_task is main_task:
                        # Return the result eagerly.  If you want to run
                        # all remaining tasks through completion, just
                        # call ``run`` again with no arguments.
                        return completed_task.get_result_nonblocking()

            if self._num_tasks > 0:
                # Poll I/O.
                now = time.monotonic()
                poll_timeout = min(
                    run_timer.get_timeout(),
                    self._sleep_blocker.get_min_timeout(now),
                    self._timeout_after_blocker.get_min_timeout(now),
                    key=timers.timeout_to_key,
                )
                can_read, can_write = self._poller.poll(poll_timeout)
                for fd in can_read:
                    if self._nudger.is_nudged(fd):
                        self._nudger.ack()
                    else:
                        self._trap_return(self._read_blocker, fd)
                for fd in can_write:
                    self._trap_return(self._write_blocker, fd)

                # Handle any task timeout.
                now = time.monotonic()
                self._trap_return(self._sleep_blocker, now)
                self._timeout_after_on_completion(now)

            # Break if ``run`` times out.
            if run_timer.is_expired():
                raise errors.KernelTimeout
Ejemplo n.º 23
0
 def notify_open(self, fd):
     ASSERT.false(self._closed)
     self._assert_owner()
     self._poller.notify_open(fd)
Ejemplo n.º 24
0
 def image_path(self):
     ASSERT.false(self.deploy_instruction.is_zipapp())
     return self.path / models.XAR_BUNDLE_IMAGE_FILENAME
Ejemplo n.º 25
0
 def unblock(self, source):
     """Unblock tasks blocked by ``source``."""
     ASSERT.false(self._closed)
     self._assert_owner()
     self._trap_return(self._generic_blocker, source)
Ejemplo n.º 26
0
async def recvfile(response, file):
    """Receive response body into a file.

    The caller must set ``stream`` to true when make the request.

    DANGER! This breaks the multiple levels of encapsulation, from
    requests.Response all the way down to http.client.HTTPResponse.
    As a result, the response object is most likely unusable after a
    recvfile call, and you should probably close it immediately.
    """
    # requests sets _content to False initially.
    ASSERT.is_(response._content, False)
    ASSERT.false(response._content_consumed)

    urllib3_response = ASSERT.not_none(response.raw)
    chunked = urllib3_response.chunked

    httplib_response = ASSERT.isinstance(
        urllib3_response._fp, http.client.HTTPResponse
    )
    ASSERT.false(httplib_response.closed)
    sock = ASSERT.isinstance(httplib_response.fp.raw._sock, socket.socket)

    output = DecoderChain(file)

    if chunked:
        chunk_decoder = ChunkDecoder()
        output.add(chunk_decoder)
        num_to_read = 0
        eof = lambda: chunk_decoder.eof
    else:
        num_to_read = ASSERT.greater(
            ASSERT.not_none(httplib_response.length), 0
        )
        eof = lambda: num_to_read <= 0

    # Use urllib3's decoder code.
    urllib3_response._init_decoder()
    if urllib3_response._decoder is not None:
        output.add(ContentDecoder(urllib3_response._decoder))

    with contextlib.ExitStack() as stack:
        src = adapters.FileAdapter(httplib_response.fp)
        stack.callback(src.disown)

        sock.setblocking(False)
        stack.callback(sock.setblocking, True)

        buffer = memoryview(stack.enter_context(_BUFFER_POOL.using()))
        while not eof():
            if chunked:
                # TODO: If server sends more data at the end, like
                # response of the next request, for now recvfile might
                # read them, and then err out.  Maybe recvfile should
                # check this, and not read more than it should instead?
                num_read = await src.readinto1(buffer)
            else:
                num_read = await src.readinto1(
                    buffer[:min(num_to_read, _CHUNK_SIZE)]
                )
            if num_read == 0:
                break
            output.write(buffer[:num_read])
            num_to_read -= num_read

        output.flush()

    # Sanity check.
    if not chunked:
        ASSERT.equal(num_to_read, 0)

    # Trick requests to release the connection back to the connection
    # pool, rather than closing/discarding it.
    response._content_consumed = True
    # http.client.HTTPConnection tracks the last response; so you have
    # to close it to make the connection object useable again.
    httplib_response.close()

    # Close the response for the caller since response is not useable
    # after recvfile.
    response.close()

    loggings.ONCE_PER(
        1000, LOG.info, 'buffer pool stats: %r', _BUFFER_POOL.get_stats()
    )
Ejemplo n.º 27
0
def popen(args):
    LOG.debug('popen: args=%s, context=%s', args, _CONTEXT)
    # It does not seem like we can return a fake Popen object.
    ASSERT.false(_get(_DRY_RUN))
    return subprocess.Popen(_prepare_args(args), **_prepare_kwargs())