示例#1
0
    def __init__(self,
                 processes=None,
                 synack=False,
                 sched_strategy=None,
                 *args,
                 **kwargs):
        self.sched_strategy = SCHED_STRATEGIES.get(sched_strategy,
                                                   sched_strategy)
        processes = self.cpu_count() if processes is None else processes
        self.synack = synack
        # create queue-pairs for all our processes in advance.
        self._queues = {
            self.create_process_queues(): None
            for _ in range(processes)
        }

        # inqueue fileno -> process mapping
        self._fileno_to_inq = {}
        # outqueue fileno -> process mapping
        self._fileno_to_outq = {}
        # synqueue fileno -> process mapping
        self._fileno_to_synq = {}

        # We keep track of processes that have not yet
        # sent a WORKER_UP message.  If a process fails to send
        # this message within proc_up_timeout we terminate it
        # and hope the next process will recover.
        self._proc_alive_timeout = PROC_ALIVE_TIMEOUT
        self._waiting_to_start = set()

        # denormalized set of all inqueues.
        self._all_inqueues = set()

        # Set of fds being written to (busy)
        self._active_writes = set()

        # Set of active co-routines currently writing jobs.
        self._active_writers = set()

        # Set of fds that are busy (executing task)
        self._busy_workers = set()
        self._mark_worker_as_available = self._busy_workers.discard

        # Holds jobs waiting to be written to child processes.
        self.outbound_buffer = deque()

        self.write_stats = Counter()

        super(AsynPool, self).__init__(processes, *args, **kwargs)

        for proc in self._pool:
            # create initial mappings, these will be updated
            # as processes are recycled, or found lost elsewhere.
            self._fileno_to_outq[proc.outqR_fd] = proc
            self._fileno_to_synq[proc.synqW_fd] = proc
        self.on_soft_timeout = self._timeout_handler.on_soft_timeout
        self.on_hard_timeout = self._timeout_handler.on_hard_timeout
示例#2
0
文件: state.py 项目: zzkristy/celery
#: maximum number of revokes to keep in memory.
REVOKES_MAX = 50000

#: how many seconds a revoke will be active before
#: being expired when the max limit has been exceeded.
REVOKE_EXPIRES = 10800

#: set of all reserved :class:`~celery.worker.request.Request`'s.
reserved_requests = set()

#: set of currently active :class:`~celery.worker.request.Request`'s.
active_requests = set()

#: count of tasks accepted by the worker, sorted by type.
total_count = Counter()

#: count of all tasks accepted by the worker
all_total_count = [0]

#: the list of currently revoked tasks.  Persistent if ``statedb`` set.
revoked = LimitedSet(maxlen=REVOKES_MAX, expires=REVOKE_EXPIRES)

#: Update global state when a task has been reserved.
task_reserved = reserved_requests.add

should_stop = None
should_terminate = None


def reset_state():
示例#3
0
    def _create_write_handlers(self, hub,
                               pack=struct.pack, dumps=_pickle.dumps,
                               protocol=HIGHEST_PROTOCOL):
        """For async pool this creates the handlers used to write data to
        child processes."""
        fileno_to_inq = self._fileno_to_inq
        fileno_to_synq = self._fileno_to_synq
        outbound = self.outbound_buffer
        pop_message = outbound.popleft
        put_message = outbound.append
        all_inqueues = self._all_inqueues
        active_writes = self._active_writes
        diff = all_inqueues.difference
        add_reader, add_writer = hub.add_reader, hub.add_writer
        hub_add, hub_remove = hub.add, hub.remove
        mark_write_fd_as_active = active_writes.add
        mark_write_gen_as_active = self._active_writers.add
        write_generator_done = self._active_writers.discard
        get_job = self._cache.__getitem__
        # puts back at the end of the queue
        self._put_back = outbound.appendleft
        precalc = {ACK: self._create_payload(ACK, (0, )),
                   NACK: self._create_payload(NACK, (0, ))}

        def on_poll_start():
            # called for every event loop iteration, and if there
            # are messages pending this will schedule writing one message
            # by registering the 'schedule_writes' function for all currently
            # inactive inqueues (not already being written to)

            # consolidate means the event loop will merge them
            # and call the callback once with the list writable fds as
            # argument.  Using this means we minimize the risk of having
            # the same fd receive every task if the pipe read buffer is not
            # full.
            if outbound:
                [hub_add(fd, None, WRITE | ERR, consolidate=True)
                 for fd in diff(active_writes)]
        self.on_poll_start = on_poll_start

        def on_inqueue_close(fd):
            # Makes sure the fd is removed from tracking when
            # the connection is closed, this is essential as fds may be reused.
            active_writes.discard(fd)
            all_inqueues.discard(fd)
        self.on_inqueue_close = on_inqueue_close

        def schedule_writes(ready_fds, shuffle=random.shuffle):
            # Schedule write operation to ready file descriptor.
            # The file descriptor is writeable, but that does not
            # mean the process is currently reading from the socket.
            # The socket is buffered so writeable simply means that
            # the buffer can accept at least 1 byte of data.
            shuffle(ready_fds)
            for ready_fd in ready_fds:
                if ready_fd in active_writes:
                    # already writing to this fd
                    continue
                try:
                    job = pop_message()
                except IndexError:
                    # no more messages, remove all inactive fds from the hub.
                    # this is important since the fds are always writeable
                    # as long as there's 1 byte left in the buffer, and so
                    # this may create a spinloop where the event loop
                    # always wakes up.
                    for inqfd in diff(active_writes):
                        hub_remove(inqfd)
                    break

                else:
                    if not job._accepted:  # job not accepted by another worker
                        try:
                            # keep track of what process the write operation
                            # was scheduled for.
                            proc = job._scheduled_for = fileno_to_inq[ready_fd]
                        except KeyError:
                            # write was scheduled for this fd but the process
                            # has since exited and the message must be sent to
                            # another process.
                            put_message(job)
                            continue
                        cor = _write_job(proc, ready_fd, job)
                        job._writer = ref(cor)
                        mark_write_gen_as_active(cor)
                        mark_write_fd_as_active(ready_fd)

                        # Try to write immediately, in case there's an error.
                        try:
                            next(cor)
                            add_writer(ready_fd, cor)
                        except StopIteration:
                            pass
        hub.consolidate_callback = schedule_writes

        def send_job(tup):
            # Schedule writing job request for when one of the process
            # inqueues are writable.
            body = dumps(tup, protocol=protocol)
            body_size = len(body)
            header = pack('>I', body_size)
            # index 1,0 is the job ID.
            job = get_job(tup[1][0])
            job._payload = header, body, body_size
            put_message(job)
        self._quick_put = send_job

        write_stats = self.write_stats = Counter()

        def on_not_recovering(proc):
            # XXX Theoretically a possibility, but not seen in practice yet.
            raise Exception(
                'Process writable but cannot write. Contact support!')

        def _write_job(proc, fd, job):
            # writes job to the worker process.
            # Operation must complete if more than one byte of data
            # was written.  If the broker connection is lost
            # and no data was written the operation shall be cancelled.
            header, body, body_size = job._payload
            errors = 0
            try:
                # job result keeps track of what process the job is sent to.
                job._write_to = proc
                send = proc.send_job_offset

                Hw = Bw = 0
                # write header
                while Hw < 4:
                    try:
                        Hw += send(header, Hw)
                    except Exception as exc:
                        if get_errno(exc) not in UNAVAIL:
                            raise
                        # suspend until more data
                        errors += 1
                        if errors > 100:
                            on_not_recovering(proc)
                            raise StopIteration()
                        yield
                    errors = 0

                # write body
                while Bw < body_size:
                    try:
                        Bw += send(body, Bw)
                    except Exception as exc:
                        if get_errno(exc) not in UNAVAIL:
                            raise
                        # suspend until more data
                        errors += 1
                        if errors > 100:
                            on_not_recovering(proc)
                            raise StopIteration()
                        yield
                    errors = 0
            finally:
                write_stats[proc.index] += 1
                # message written, so this fd is now available
                active_writes.discard(fd)
                write_generator_done(job._writer())  # is a weakref

        def send_ack(response, pid, job, fd, WRITE=WRITE, ERR=ERR):
            # Only used when synack is enabled.
            # Schedule writing ack response for when the fd is writeable.
            msg = Ack(job, fd, precalc[response])
            callback = promise(write_generator_done)
            cor = _write_ack(fd, msg, callback=callback)
            mark_write_gen_as_active(cor)
            mark_write_fd_as_active(fd)
            callback.args = (cor, )
            add_writer(fd, cor)
        self.send_ack = send_ack

        def _write_ack(fd, ack, callback=None):
            # writes ack back to the worker if synack enabled.
            # this operation *MUST* complete, otherwise
            # the worker process will hang waiting for the ack.
            header, body, body_size = ack[2]
            try:
                try:
                    proc = fileno_to_synq[fd]
                except KeyError:
                    # process died, we can safely discard the ack at this
                    # point.
                    raise StopIteration()
                send = proc.send_syn_offset

                Hw = Bw = 0
                # write header
                while Hw < 4:
                    try:
                        Hw += send(header, Hw)
                    except Exception as exc:
                        if get_errno(exc) not in UNAVAIL:
                            raise
                        yield

                # write body
                while Bw < body_size:
                    try:
                        Bw += send(body, Bw)
                    except Exception as exc:
                        if get_errno(exc) not in UNAVAIL:
                            raise
                        # suspend until more data
                        yield
            finally:
                if callback:
                    callback()
                # message written, so this fd is now available
                active_writes.discard(fd)