示例#1
0
def upgrade(state, **kwargs):
    svn_version_regex = re.compile(r'Revision:?\s+(\d+)', re.I)
    cur_version = None
    latest_version = None
    result = {'error': 0, 'msg': None}
    out, err = run_cmd("svn info")
    if out:
        m = svn_version_regex.search(out)
        if m:
            cur_version = m.group(1)
            logger.info("current svn revision: {}".format(cur_version))
        else:
            logger.error("Failed to get version, give up upgrade: output: {}, error: {}", out, err)
            result['error'] = 1
    out, err = run_cmd("svn update")
    if out:
        m = svn_version_regex.search(out)
        if m:
            latest_version = m.group(1)
            logger.info("Update to latest svn revision: {}".format(latest_version))
        else:
            logger.error("Failed to update svn, give up upgrade: output: {}, error: {}", out, err)
            result['error'] = 1
    if cur_version and latest_version and cur_version != latest_version:
        logger.info("Upgrade to from {} to {}, so restart current worker!".format(cur_version, latest_version))
        # state.app.control.broadcast('shutdown', destination=[os.environ["CELERY_WORKER_NAME"]])
        result['msg'] = "upgraded from {} to {}, restart...".format(cur_version, latest_version)
        raise WorkerShutdown(result['msg'])
    else:
        result['msg'] = "no upgrade: {}=={}".format(cur_version, latest_version)
    logger.info("result={!r}".format(result))
    return result
示例#2
0
def _shutdown_worker(context):
    # Inform the Watchdog Monitor [leave]
    watchdog_context = context['worker_watchdog']
    watchdog.inform_worker_leave(
        watchdog_context['intercom'],
        watchdog_context['worker_id'],
        prefix=watchdog_context['prefix'],
    )
    raise WorkerShutdown()
 def _shutdown_worker(self):
     # Inform the Watchdog Monitor [leave]
     watchdog_context = self._watchdog_context
     watchdog.inform_worker_leave(
         watchdog_context['intercom'],
         watchdog_context['worker_id'],
         prefix=watchdog_context['prefix'],
     )
     self.is_shutting_down = False
     raise WorkerShutdown()
示例#4
0
    def _wait_stop_pool(self):
        try:
            res = self.pool._pool._join_exited_workers()
            self.pool.shrink(len(self.pool._pool._pool))
        except ValueError:  #ignore shrink error
            pass

        if not self.pool._pool._pool:
            self.timer.cancel(self.wait_stop_timer)
            raise WorkerShutdown()
示例#5
0
    def _get_on_message(self, c):
        if c.qos is None:
            c.qos = Mock()
        c.task_consumer = Mock()
        c.event_dispatcher = mock_event_dispatcher()
        c.connection = Mock(name='.connection')
        c.connection.drain_events.side_effect = WorkerShutdown()

        with pytest.raises(WorkerShutdown):
            c.loop(*c.loop_args())
        assert c.task_consumer.on_message
        return c.task_consumer.on_message
示例#6
0
    def _get_on_message(self, l):
        if l.qos is None:
            l.qos = Mock()
        l.event_dispatcher = mock_event_dispatcher()
        l.task_consumer = Mock()
        l.connection = Mock()
        l.connection.drain_events.side_effect = WorkerShutdown()

        with self.assertRaises(WorkerShutdown):
            l.loop(*l.loop_args())
        self.assertTrue(l.task_consumer.on_message)
        return l.task_consumer.on_message
示例#7
0
    def test_start_catches_base_exceptions(self):
        worker1 = self.create_worker()
        worker1.blueprint.state = RUN
        stc = MockStep()
        stc.start.side_effect = WorkerTerminate()
        worker1.steps = [stc]
        worker1.start()
        stc.start.assert_called_with(worker1)
        assert stc.terminate.call_count

        worker2 = self.create_worker()
        worker2.blueprint.state = RUN
        sec = MockStep()
        sec.start.side_effect = WorkerShutdown()
        sec.terminate = None
        worker2.steps = [sec]
        worker2.start()
        assert sec.stop.call_count
示例#8
0
 def start(self):
     blueprint = self.blueprint
     while blueprint.state not in STOP_CONDITIONS:
         maybe_shutdown()
         if self.restart_count:
             try:
                 self._restart_state.step()
             except RestartFreqExceeded as exc:
                 crit('Frequent restarts detected: %r', exc, exc_info=1)
                 sleep(1)
         self.restart_count += 1
         try:
             blueprint.start(self)
         except self.connection_errors as exc:
             # If we're not retrying connections, we need to properly shutdown or terminate
             # the Celery main process instead of abruptly aborting the process without any cleanup.
             is_connection_loss_on_startup = self.restart_count == 0
             connection_retry_type = self._get_connection_retry_type(
                 is_connection_loss_on_startup)
             connection_retry = self.app.conf[connection_retry_type]
             if not connection_retry:
                 crit(
                     f"Retrying to {'establish' if is_connection_loss_on_startup else 're-establish'} "
                     f"a connection to the message broker after a connection loss has "
                     f"been disabled (app.conf.{connection_retry_type}=False). Shutting down..."
                 )
                 raise WorkerShutdown(1) from exc
             if isinstance(exc, OSError) and exc.errno == errno.EMFILE:
                 crit("Too many open files. Aborting...")
                 raise WorkerTerminate(1) from exc
             maybe_shutdown()
             if blueprint.state not in STOP_CONDITIONS:
                 if self.connection:
                     self.on_connection_error_after_connected(exc)
                 else:
                     self.on_connection_error_before_connected(exc)
                 self.on_close()
                 blueprint.restart(self)
示例#9
0
文件: state.py 项目: zzkristy/celery
def maybe_shutdown():
    if should_stop is not None and should_stop is not False:
        raise WorkerShutdown(should_stop)
    elif should_terminate is not None and should_terminate is not False:
        raise WorkerTerminate(should_terminate)
示例#10
0
def asynloop(obj,
             connection,
             consumer,
             blueprint,
             hub,
             qos,
             heartbeat,
             clock,
             hbrate=2.0,
             RUN=RUN):
    """Non-blocking event loop consuming messages until connection is lost,
    or shutdown is requested."""
    update_qos = qos.update
    hbtick = connection.heartbeat_check
    errors = connection.connection_errors
    heartbeat = connection.get_heartbeat_interval()  # negotiated

    on_task_received = obj.create_task_handler()

    if heartbeat and connection.supports_heartbeats:
        hub.call_repeatedly(heartbeat / hbrate, hbtick, hbrate)

    consumer.on_message = on_task_received
    consumer.consume()
    obj.on_ready()
    obj.controller.register_with_event_loop(hub)
    obj.register_with_event_loop(hub)

    # did_start_ok will verify that pool processes were able to start,
    # but this will only work the first time we start, as
    # maxtasksperchild will mess up metrics.
    if not obj.restart_count and not obj.pool.did_start_ok():
        raise WorkerLostError('Could not start worker processes')

    # consumer.consume() may have prefetched up to our
    # limit - drain an event so we are in a clean state
    # prior to starting our event loop.
    if connection.transport.driver_type == 'amqp':
        hub.call_soon(_quick_drain, connection)

    # FIXME: Use loop.run_forever
    # Tried and works, but no time to test properly before release.
    hub.propagate_errors = errors
    loop = hub.create_loop()

    try:
        while blueprint.state == RUN and obj.connection:
            # shutdown if signal handlers told us to.
            should_stop, should_terminate = (
                state.should_stop,
                state.should_terminate,
            )
            # False == EX_OK, so must use is not False
            if should_stop is not None and should_stop is not False:
                raise WorkerShutdown(should_stop)
            elif should_terminate is not None and should_stop is not False:
                raise WorkerTerminate(should_terminate)

            # We only update QoS when there is no more messages to read.
            # This groups together qos calls, and makes sure that remote
            # control commands will be prioritized over task messages.
            if qos.prev != qos.value:
                update_qos()

            try:
                next(loop)
            except StopIteration:
                loop = hub.create_loop()
    finally:
        try:
            hub.reset()
        except Exception as exc:
            error(
                'Error cleaning up after event loop: %r',
                exc,
                exc_info=1,
            )
示例#11
0
 def test_apply_target__raises_WorkerShutdown(self):
     target = Mock(name='target')
     target.side_effect = WorkerShutdown()
     with self.assertRaises(WorkerShutdown):
         apply_target(target)
示例#12
0
文件: control.py 项目: skizhak/celery
def shutdown(state, msg='Got shutdown from remote', **kwargs):
    logger.warning(msg)
    raise WorkerShutdown(msg)
def maybe_shutdown():
    """Shutdown if flags have been set."""
    if should_stop is not None and should_stop is not False:
        raise WorkerShutdown(should_stop)
    elif should_terminate is not None and should_terminate is not False:
        raise WorkerTerminate(should_terminate)
示例#14
0
def shutdown(state, msg="Got shutdown from remote", **kwargs):
    """Shutdown worker(s)."""
    logger.warning(msg)
    raise WorkerShutdown(msg)
示例#15
0
文件: state.py 项目: sysras0000/panop
def maybe_shutdown():
    if should_stop:
        raise WorkerShutdown()
    elif should_terminate:
        raise WorkerTerminate()
示例#16
0
def asynloop(obj,
             connection,
             consumer,
             blueprint,
             hub,
             qos,
             heartbeat,
             clock,
             hbrate=2.0,
             RUN=RUN):
    """Non-blocking event loop consuming messages until connection is lost,
    or shutdown is requested."""
    update_qos = qos.update
    readers, writers = hub.readers, hub.writers
    hbtick = connection.heartbeat_check
    errors = connection.connection_errors
    hub_add, hub_remove = hub.add, hub.remove

    on_task_received = obj.create_task_handler()

    if heartbeat and connection.supports_heartbeats:
        hub.call_repeatedly(heartbeat / hbrate, hbtick, hbrate)

    consumer.callbacks = [on_task_received]
    consumer.consume()
    obj.on_ready()
    obj.controller.register_with_event_loop(hub)
    obj.register_with_event_loop(hub)

    # did_start_ok will verify that pool processes were able to start,
    # but this will only work the first time we start, as
    # maxtasksperchild will mess up metrics.
    if not obj.restart_count and not obj.pool.did_start_ok():
        raise WorkerLostError('Could not start worker processes')

    # FIXME: Use loop.run_forever
    # Tried and works, but no time to test properly before release.
    hub.propagate_errors = errors
    loop = hub.create_loop()

    try:
        while blueprint.state == RUN and obj.connection:
            # shutdown if signal handlers told us to.
            if state.should_stop:
                raise WorkerShutdown()
            elif state.should_terminate:
                raise WorkerTerminate()

            # We only update QoS when there is no more messages to read.
            # This groups together qos calls, and makes sure that remote
            # control commands will be prioritized over task messages.
            if qos.prev != qos.value:
                update_qos()

            try:
                next(loop)
            except StopIteration:
                loop = hub.create_loop()
    finally:
        try:
            hub.reset()
        except Exception as exc:
            error(
                'Error cleaning up after event loop: %r',
                exc,
                exc_info=1,
            )