Ejemplo n.º 1
0
def apply_target(target,
                 args=(),
                 kwargs={},
                 callback=None,
                 accept_callback=None,
                 pid=None,
                 getpid=os.getpid,
                 propagate=(),
                 monotonic=monotonic,
                 **_):
    if accept_callback:
        accept_callback(pid or getpid(), monotonic())
    try:
        ret = target(*args, **kwargs)
    except propagate:
        raise
    except Exception:
        raise
    except (WorkerShutdown, WorkerTerminate):
        raise
    except BaseException as exc:
        try:
            reraise(WorkerLostError, WorkerLostError(repr(exc)),
                    sys.exc_info()[2])
        except WorkerLostError:
            callback(ExceptionInfo())
    else:
        callback(ret)
Ejemplo n.º 2
0
    def on_poll_init(self, w, hub):
        """Initialize async pool using the eventloop hub."""
        pool = self._pool
        pool._active_writers = self._active_writers

        self._create_timelimit_handlers(hub)
        self._create_process_handlers(hub)
        self._create_write_handlers(hub)

        # did_start_ok will verify that pool processes were able to start,
        # but this will only work the first time we start, as
        # maxtasksperchild will mess up metrics.
        if not w.consumer.restart_count and not pool.did_start_ok():
            raise WorkerLostError('Could not start worker processes')

        # Maintain_pool is called whenever a process exits.
        hub.add(pool.process_sentinels, self.maintain_pool, READ | ERR)
        # Handle_result_event is called whenever one of the
        # result queues are readable.
        hub.add(pool._fileno_to_outq, self.handle_result_event, READ | ERR)

        # Timers include calling maintain_pool at a regular interval
        # to be certain processes are restarted.
        for handler, interval in items(self.timers):
            hub.timer.apply_interval(interval * 1000.0, handler)
    def on_poll_init(self, pool, w, hub):
        apply_after = hub.timer.apply_after
        apply_at = hub.timer.apply_at
        on_soft_timeout = pool.on_soft_timeout
        on_hard_timeout = pool.on_hard_timeout
        maintain_pool = pool.maintain_pool
        add_reader = hub.add_reader
        remove = hub.remove
        now = time.time

        # did_start_ok will verify that pool processes were able to start,
        # but this will only work the first time we start, as
        # maxtasksperchild will mess up metrics.
        if not w.consumer.restart_count and not pool.did_start_ok():
            raise WorkerLostError('Could not start worker processes')

        # need to handle pool results before every task
        # since multiple tasks can be received in a single poll()
        hub.on_task.append(pool.maybe_handle_result)

        hub.update_readers(pool.readers)
        for handler, interval in pool.timers.iteritems():
            hub.timer.apply_interval(interval * 1000.0, handler)

        def on_timeout_set(R, soft, hard):
            def _on_soft_timeout():
                if hard:
                    R._tref = apply_at(now() + (hard - soft), on_hard_timeout,
                                       (R, ))
                on_soft_timeout(R)

            if soft:
                R._tref = apply_after(soft * 1000.0, _on_soft_timeout)
            elif hard:
                R._tref = apply_after(hard * 1000.0, on_hard_timeout, (R, ))

        def on_timeout_cancel(result):
            try:
                result._tref.cancel()
                delattr(result, '_tref')
            except AttributeError:
                pass

        pool.init_callbacks(
            on_process_up=lambda w: add_reader(w.sentinel, maintain_pool),
            on_process_down=lambda w: remove(w.sentinel),
            on_timeout_set=on_timeout_set,
            on_timeout_cancel=on_timeout_cancel,
        )
Ejemplo n.º 4
0
    def on_poll_init(self, pool, hub):
        apply_after = hub.timer.apply_after
        apply_at = hub.timer.apply_at
        on_soft_timeout = pool.on_soft_timeout
        on_hard_timeout = pool.on_hard_timeout
        maintain_pool = pool.maintain_pool
        add_reader = hub.add_reader
        remove = hub.remove
        now = time.time

        if not pool.did_start_ok():
            raise WorkerLostError("Could not start worker processes")

        hub.update_readers(pool.readers)
        for handler, interval in pool.timers.iteritems():
            hub.timer.apply_interval(interval * 1000.0, handler)

        def on_timeout_set(R, soft, hard):
            def _on_soft_timeout():
                if hard:
                    R._tref = apply_at(now() + (hard - soft), on_hard_timeout,
                                       (R, ))
                    on_soft_timeout(R)

            if soft:
                R._tref = apply_after(soft * 1000.0, _on_soft_timeout)
            elif hard:
                R._tref = apply_after(hard * 1000.0, on_hard_timeout, (R, ))

        def on_timeout_cancel(result):
            try:
                result._tref.cancel()
                delattr(result, "_tref")
            except AttributeError:
                pass

        pool.init_callbacks(
            on_process_up=lambda w: add_reader(w.sentinel, maintain_pool),
            on_process_down=lambda w: remove(w.sentinel),
            on_timeout_set=on_timeout_set,
            on_timeout_cancel=on_timeout_cancel,
        )
Ejemplo n.º 5
0
    def on_poll_init(self,
                     w,
                     hub,
                     now=time,
                     protocol=HIGHEST_PROTOCOL,
                     pack=struct.pack,
                     dumps=_pickle.dumps):
        pool = self._pool
        apply_after = hub.timer.apply_after
        apply_at = hub.timer.apply_at
        maintain_pool = self.maintain_pool
        on_soft_timeout = self.on_soft_timeout
        on_hard_timeout = self.on_hard_timeout
        fileno_to_inq = pool._fileno_to_inq
        fileno_to_outq = pool._fileno_to_outq
        fileno_to_synq = pool._fileno_to_synq
        outbound = self.outbound_buffer
        pop_message = outbound.popleft
        put_message = outbound.append
        all_inqueues = pool._all_inqueues
        active_writes = self._active_writes
        diff = all_inqueues.difference
        hub_add, hub_remove = hub.add, hub.remove
        mark_write_fd_as_active = active_writes.add
        mark_write_gen_as_active = self._active_writers.add
        write_generator_gone = self._active_writers.discard
        get_job = pool._cache.__getitem__
        pool._put_back = put_message

        # did_start_ok will verify that pool processes were able to start,
        # but this will only work the first time we start, as
        # maxtasksperchild will mess up metrics.
        if not w.consumer.restart_count and not pool.did_start_ok():
            raise WorkerLostError('Could not start worker processes')

        hub_add(pool.process_sentinels, self.maintain_pool, READ | ERR)
        hub_add(fileno_to_outq, self.handle_result_event, READ | ERR)
        for handler, interval in items(self.timers):
            hub.timer.apply_interval(interval * 1000.0, handler)

        # need to handle pool results before every task
        # since multiple tasks can be received in a single poll()
        # XXX do we need this now?!?
        # hub.on_task.append(pool.maybe_handle_result)

        def on_timeout_set(R, soft, hard):
            def _on_soft_timeout():
                if hard:
                    R._tref = apply_at(now() + (hard - soft), on_hard_timeout,
                                       (R, ))
                on_soft_timeout(R)

            if soft:
                R._tref = apply_after(soft * 1000.0, _on_soft_timeout)
            elif hard:
                R._tref = apply_after(hard * 1000.0, on_hard_timeout, (R, ))

        self._pool.on_timeout_set = on_timeout_set

        def on_timeout_cancel(result):
            try:
                result._tref.cancel()
                delattr(result, '_tref')
            except AttributeError:
                pass

        self._pool.on_timeout_cancel = on_timeout_cancel

        def on_process_up(proc):
            fileno_to_outq[proc.outqR_fd] = proc
            hub_add(proc.sentinel, maintain_pool, READ | ERR)
            hub_add(proc.outqR_fd, pool.handle_result_event, READ | ERR)

        self._pool.on_process_up = on_process_up

        def on_process_down(proc):
            pool.process_flush_queues(proc)
            fileno_to_outq.pop(proc.outqR_fd, None)
            fileno_to_inq.pop(proc.inqW_fd, None)
            fileno_to_synq.pop(proc.synqW_fd, None)
            all_inqueues.discard(proc.inqW_fd)
            hub_remove(proc.sentinel)
            hub_remove(proc.outqR_fd)

        self._pool.on_process_down = on_process_down

        class Ack(object):
            __slots__ = ('id', 'fd', '_payload')

            def __init__(self, id, fd, payload):
                self.id = id
                self.fd = fd
                self._payload = payload

            def __eq__(self, other):
                return self.i == other.i

            def __hash__(self):
                return self.i

        def _write_ack(fd, ack, callback=None):
            header, body, body_size = ack._payload
            try:
                try:
                    proc = fileno_to_synq[fd]
                except KeyError:
                    raise StopIteration()
                send_offset = proc.synq._writer.send_offset

                Hw = Bw = 0
                while Hw < 4:
                    try:
                        Hw += send_offset(header, Hw)
                    except Exception as exc:
                        if get_errno(exc) not in UNAVAIL:
                            raise
                        yield
                while Bw < body_size:
                    try:
                        Bw += send_offset(body, Bw)
                    except Exception as exc:
                        if get_errno(exc) not in UNAVAIL:
                            raise
                        # suspend until more data
                        yield
            finally:
                if callback:
                    callback()
                active_writes.discard(fd)

        def _write_job(fd, job, callback=None):
            header, body, body_size = job._payload
            try:
                try:
                    proc = fileno_to_inq[fd]
                except KeyError:
                    put_message(job)
                    raise StopIteration()
                # job result keeps track of what process the job is sent to.
                job._write_to = proc
                send_offset = proc.inq._writer.send_offset

                Hw = Bw = 0
                while Hw < 4:
                    try:
                        Hw += send_offset(header, Hw)
                    except Exception as exc:
                        if get_errno(exc) not in UNAVAIL:
                            raise
                        # suspend until more data
                        yield
                while Bw < body_size:
                    try:
                        Bw += send_offset(body, Bw)
                    except Exception as exc:
                        if get_errno(exc) not in UNAVAIL:
                            raise
                        # suspend until more data
                        yield
            finally:
                if callback:
                    callback()
                active_writes.discard(fd)

        def on_inqueue_close(fd):
            active_writes.discard(fd)
            all_inqueues.discard(fd)

        self._pool.on_inqueue_close = on_inqueue_close

        def schedule_writes(ready_fd, events):
            if ready_fd in active_writes:
                return
            try:
                job = pop_message()
            except IndexError:
                for inqfd in diff(active_writes):
                    hub_remove(inqfd)
            else:
                if not job._accepted:
                    callback = promise(write_generator_gone)
                    try:
                        job._scheduled_for = fileno_to_inq[ready_fd]
                    except KeyError:
                        # process gone since scheduled, put back
                        return put_message(job)
                    cor = _write_job(ready_fd, job, callback=callback)
                    mark_write_gen_as_active(cor)
                    mark_write_fd_as_active(ready_fd)
                    callback.args = (cor, )  # tricky as we need to pass ref
                    hub_add((ready_fd, ), cor, WRITE | ERR)

        def _create_payload(type_, args):
            body = dumps((type_, args), protocol=protocol)
            size = len(body)
            header = pack('>I', size)
            return header, body, size

        MESSAGES = {
            ACK: _create_payload(ACK, (0, )),
            NACK: _create_payload(NACK, (0, ))
        }

        def send_ack(response, pid, job, fd):
            msg = Ack(job, fd, MESSAGES[response])
            callback = promise(write_generator_gone)
            cor = _write_ack(fd, msg, callback=callback)
            mark_write_gen_as_active(cor)
            mark_write_fd_as_active(fd)
            callback.args = (cor, )
            hub_add((fd, ), cor, WRITE | ERR)

        self._pool.send_ack = send_ack

        def on_poll_start(hub):
            if outbound:
                hub_add(diff(active_writes), schedule_writes, WRITE | ERR)

        self.on_poll_start = on_poll_start

        def quick_put(tup):
            body = dumps(tup, protocol=protocol)
            body_size = len(body)
            header = pack('>I', body_size)
            # index 1,0 is the job ID.
            job = get_job(tup[1][0])
            job._payload = header, buffer(body), body_size
            put_message(job)

        self._pool._quick_put = quick_put
Ejemplo n.º 6
0
    def on_poll_init(self, pool, w, hub):
        apply_after = hub.timer.apply_after
        apply_at = hub.timer.apply_at
        on_soft_timeout = pool.on_soft_timeout
        on_hard_timeout = pool.on_hard_timeout
        maintain_pool = pool.maintain_pool
        add_reader = hub.add_reader
        remove = hub.remove
        now = time.time
        cache = getattr(pool._pool, '_cache', None)

        # did_start_ok will verify that pool processes were able to start,
        # but this will only work the first time we start, as
        # maxtasksperchild will mess up metrics.
        if not w.consumer.restart_count and not pool.did_start_ok():
            raise WorkerLostError('Could not start worker processes')

        # need to handle pool results before every task
        # since multiple tasks can be received in a single poll()
        hub.on_task.append(pool.maybe_handle_result)

        hub.update_readers(pool.readers)
        for handler, interval in pool.timers.iteritems():
            hub.timer.apply_interval(interval * 1000.0, handler)

        trefs = pool._tref_for_id = WeakValueDictionary()

        def _discard_tref(job):
            try:
                tref = trefs.pop(job)
                tref.cancel()
                del (tref)
            except (KeyError, AttributeError):
                pass  # out of scope

        def _on_hard_timeout(job):
            try:
                result = cache[job]
            except KeyError:
                pass  # job ready
            else:
                on_hard_timeout(result)
            finally:
                # remove tref
                _discard_tref(job)

        def _on_soft_timeout(job, soft, hard, hub):
            if hard:
                trefs[job] = apply_at(
                    now() + (hard - soft),
                    _on_hard_timeout,
                    (job, ),
                )
            try:
                result = cache[job]
            except KeyError:
                pass  # job ready
            else:
                on_soft_timeout(result)
            finally:
                if not hard:
                    # remove tref
                    _discard_tref(job)

        def on_timeout_set(R, soft, hard):
            if soft:
                trefs[R._job] = apply_after(
                    soft * 1000.0,
                    _on_soft_timeout,
                    (R._job, soft, hard, hub),
                )
            elif hard:
                trefs[R._job] = apply_after(hard * 1000.0, _on_hard_timeout,
                                            (R._job, ))

        def on_timeout_cancel(R):
            _discard_tref(R._job)

        pool.init_callbacks(
            on_process_up=lambda w: add_reader(w.sentinel, maintain_pool),
            on_process_down=lambda w: remove(w.sentinel),
            on_timeout_set=on_timeout_set,
            on_timeout_cancel=on_timeout_cancel,
        )