def test_setup_logger(self):
     logger = setup_logger(loglevel=logging.ERROR, logfile=None)
     logger.handlers = [] # Reset previously set logger.
     logger = setup_logger(loglevel=logging.ERROR, logfile=None)
     self.assertIs(logger.handlers[0].stream, sys.__stderr__,
             "setup_logger logs to stderr without logfile argument.")
     self.assertDidLogFalse(logger, "Logging something",
             "Logger doesn't info when loglevel is ERROR",
             loglevel=logging.INFO)
Exemplo n.º 2
0
 def test_setup_logger(self):
     logger = setup_logger(loglevel=logging.ERROR, logfile=None)
     logger.handlers = [] # Reset previously set logger.
     logger = setup_logger(loglevel=logging.ERROR, logfile=None)
     self.assertTrue(logger.handlers[0].stream is sys.stderr,
             "setup_logger logs to stderr without logfile argument.")
     #self.assertTrue(logger._process_aware,
     #        "setup_logger() returns process aware logger.")
     self.assertDidLogTrue(logger, "Logging something",
             "Logger logs error when loglevel is ERROR",
             loglevel=logging.ERROR)
     self.assertDidLogFalse(logger, "Logging something",
             "Logger doesn't info when loglevel is ERROR",
             loglevel=logging.INFO)
Exemplo n.º 3
0
    def __init__(self, concurrency=None, logfile=None, loglevel=None,
            is_detached=False):

        # Options
        self.loglevel = loglevel or self.loglevel
        self.concurrency = concurrency or self.concurrency
        self.logfile = logfile or self.logfile
        self.is_detached = is_detached
        self.logger = setup_logger(loglevel, logfile)

        # Queues
        self.bucket_queue = Queue()
        self.hold_queue = Queue()

        self.logger.debug("Instantiating thread components...")

        # Threads+Pool
        self.periodic_work_controller = PeriodicWorkController(
                                                    self.bucket_queue,
                                                    self.hold_queue)
        self.pool = TaskPool(self.concurrency, logger=self.logger)
        self.amqp_listener = AMQPListener(self.bucket_queue, self.hold_queue,
                                          logger=self.logger,
                                          initial_prefetch_count=concurrency)
        self.mediator = Mediator(self.bucket_queue, self.safe_process_task)

        # The order is important here;
        #   the first in the list is the first to start,
        # and they must be stopped in reverse order.
        self.components = [self.pool,
                           self.mediator,
                           self.periodic_work_controller,
                           self.amqp_listener]
Exemplo n.º 4
0
    def get_logger(self, **kwargs):
        """Get process-aware logger object.

        See :func:`celery.log.setup_logger`.

        """
        return setup_logger(**kwargs)
Exemplo n.º 5
0
def run_clockservice(detach=False, loglevel=conf.CELERYBEAT_LOG_LEVEL,
        logfile=conf.CELERYBEAT_LOG_FILE, pidfile=conf.CELERYBEAT_PID_FILE,
        umask=0, uid=None, gid=None, working_directory=None, chroot=None,
        schedule=conf.CELERYBEAT_SCHEDULE_FILENAME, **kwargs):
    """Starts the celerybeat clock server."""

    print("celerybeat %s is starting." % __version__)

    # Setup logging
    if not isinstance(loglevel, int):
        loglevel = conf.LOG_LEVELS[loglevel.upper()]
    if not detach:
        logfile = None # log to stderr when not running in the background.

    # Dump configuration to screen so we have some basic information
    # when users sends e-mails.

    print(STARTUP_INFO_FMT % {
            "conninfo": get_connection_info(),
            "exchange": conf.AMQP_EXCHANGE,
            "exchange_type": conf.AMQP_EXCHANGE_TYPE,
            "consumer_queue": conf.AMQP_CONSUMER_QUEUE,
            "consumer_rkey": conf.AMQP_CONSUMER_ROUTING_KEY,
            "publisher_rkey": conf.AMQP_PUBLISHER_ROUTING_KEY,
            "loglevel": loglevel,
            "pidfile": pidfile,
            "schedule": schedule,
    })

    print("celerybeat has started.")
    arg_start = "manage" in sys.argv[0] and 2 or 1
    platform.set_process_title("celerybeat",
                               info=" ".join(sys.argv[arg_start:]))
    from celery.log import setup_logger, redirect_stdouts_to_logger
    on_stop = noop
    if detach:
        context, on_stop = platform.create_daemon_context(logfile, pidfile,
                                        chroot_directory=chroot,
                                        working_directory=working_directory,
                                        umask=umask)
        context.open()
        logger = setup_logger(loglevel, logfile)
        redirect_stdouts_to_logger(logger, loglevel)
        platform.set_effective_user(uid, gid)

    # Run the worker init handler.
    # (Usually imports task modules and such.)
    current_loader.on_worker_init()

    def _run_clock():
        logger = setup_logger(loglevel, logfile)
        clockservice = ClockService(logger=logger, is_detached=detach,
                                    schedule_filename=schedule)

        try:
            clockservice.start()
        except Exception, e:
            emergency_error(logfile,
                    "celerybeat raised exception %s: %s\n%s" % (
                            e.__class__, e, traceback.format_exc()))
Exemplo n.º 6
0
 def test_setup_logger_no_handlers_file(self):
     from multiprocessing import get_logger
     l = get_logger()
     l.handlers = []
     tempfile = mktemp(suffix="unittest", prefix="celery")
     l = setup_logger(logfile=tempfile, loglevel=0)
     self.assertTrue(isinstance(l.handlers[0], logging.FileHandler))
 def test_setup_logger_no_handlers_file(self):
     from multiprocessing import get_logger
     l = get_logger()
     l.handlers = []
     tempfile = mktemp(suffix="unittest", prefix="celery")
     l = setup_logger(logfile=tempfile, loglevel=0)
     self.assertIsInstance(l.handlers[0], logging.FileHandler)
Exemplo n.º 8
0
    def get_logger(self, loglevel=None, logfile=None, **kwargs):
        """Get process-aware logger object.

        See :func:`celery.log.setup_logger`.

        """
        return setup_logger(loglevel=loglevel, logfile=logfile)
Exemplo n.º 9
0
    def get_logger(self, **kwargs):
        """Get process-aware logger object.

        See :func:`celery.log.setup_logger`.

        """
        logfile = kwargs.get("logfile")
        loglevel = kwargs.get("loglevel")
        return setup_logger(loglevel=loglevel, logfile=logfile)
Exemplo n.º 10
0
 def _run_monitor():
     logger = setup_logger(loglevel, logfile)
     monitor = MonitorService(logger=logger, http_port=http_port)
     try:
         monitor.start()
     except Exception, e:
         emergency_error(logfile,
                 "celerymon raised exception %s: %s\n%s" % (
                         e.__class__, e, traceback.format_exc()))
Exemplo n.º 11
0
 def test_setup_logger_no_handlers_stream(self):
     from multiprocessing import get_logger
     l = get_logger()
     l.handlers = []
     with override_stdouts() as outs:
         stdout, stderr = outs
         l = setup_logger(logfile=stderr, loglevel=logging.INFO)
         l.info("The quick brown fox...")
         self.assertTrue("The quick brown fox..." in stderr.getvalue())
Exemplo n.º 12
0
    def __init__(self, concurrency=None, logfile=None, loglevel=None,
            send_events=conf.SEND_EVENTS, hostname=None,
            ready_callback=noop, embed_clockservice=False,
            schedule_filename=conf.CELERYBEAT_SCHEDULE_FILENAME):

        # Options
        self.loglevel = loglevel or self.loglevel
        self.concurrency = concurrency or self.concurrency
        self.logfile = logfile or self.logfile
        self.logger = setup_logger(loglevel, logfile)
        self.hostname = hostname or socket.gethostname()
        self.embed_clockservice = embed_clockservice
        self.ready_callback = ready_callback
        self.send_events = send_events
        self._finalize = Finalize(self, self.stop, exitpriority=20)

        # Queues
        if conf.DISABLE_RATE_LIMITS:
            self.ready_queue = Queue()
        else:
            self.ready_queue = TaskBucket(task_registry=registry.tasks)
        self.eta_schedule = Scheduler(self.ready_queue)

        self.logger.debug("Instantiating thread components...")

        # Threads + Pool + Consumer
        self.pool = TaskPool(self.concurrency,
                             logger=self.logger,
                             initializer=process_initializer)
        self.mediator = Mediator(self.ready_queue,
                                 callback=self.process_task,
                                 logger=self.logger)
        self.scheduler = ScheduleController(self.eta_schedule,
                                            logger=self.logger)

        self.clockservice = None
        if self.embed_clockservice:
            self.clockservice = EmbeddedClockService(logger=self.logger,
                                    schedule_filename=schedule_filename)

        prefetch_count = self.concurrency * conf.CELERYD_PREFETCH_MULTIPLIER
        self.listener = CarrotListener(self.ready_queue,
                                       self.eta_schedule,
                                       logger=self.logger,
                                       hostname=self.hostname,
                                       send_events=self.send_events,
                                       init_callback=self.ready_callback,
                                       initial_prefetch_count=prefetch_count)

        # The order is important here;
        #   the first in the list is the first to start,
        # and they must be stopped in reverse order.
        self.components = filter(None, (self.pool,
                                        self.mediator,
                                        self.scheduler,
                                        self.clockservice,
                                        self.listener))
Exemplo n.º 13
0
    def _run_clock():
        logger = setup_logger(loglevel, logfile)
        clockservice = ClockService(logger=logger, is_detached=detach)

        try:
            clockservice.start()
        except Exception, e:
            emergency_error(
                logfile, "celerybeat raised exception %s: %s\n%s" % (e.__class__, e, traceback.format_exc())
            )
Exemplo n.º 14
0
    def __init__(self, concurrency=None, logfile=None, loglevel=None,
            send_events=conf.SEND_EVENTS, hostname=None,
            embed_clockservice=False):

        # Options
        self.loglevel = loglevel or self.loglevel
        self.concurrency = concurrency or self.concurrency
        self.logfile = logfile or self.logfile
        self.logger = setup_logger(loglevel, logfile)
        self.hostname = hostname or socket.gethostname()
        self.embed_clockservice = embed_clockservice
        self.send_events = send_events

        # Queues
        if conf.DISABLE_RATE_LIMITS:
            self.ready_queue = Queue()
        else:
            self.ready_queue = TaskBucket(task_registry=registry.tasks)
        self.eta_schedule = Scheduler(self.ready_queue)

        self.logger.debug("Instantiating thread components...")

        # Threads + Pool + Consumer
        self.pool = TaskPool(self.concurrency,
                             logger=self.logger,
                             initializer=process_initializer)
        self.mediator = Mediator(self.ready_queue,
                                 callback=self.process_task,
                                 logger=self.logger)
        self.scheduler = ScheduleController(self.eta_schedule,
                                            logger=self.logger)

        # Need a tight loop interval when embedded so the program
        # can be stopped in a sensible short time.
        self.clockservice = self.embed_clockservice and ClockServiceThread(
                                logger=self.logger,
                                max_interval=1) or None

        prefetch_count = concurrency * conf.CELERYD_PREFETCH_MULTIPLIER
        self.listener = CarrotListener(self.ready_queue,
                                       self.eta_schedule,
                                       logger=self.logger,
                                       hostname=self.hostname,
                                       send_events=send_events,
                                       initial_prefetch_count=prefetch_count)

        # The order is important here;
        #   the first in the list is the first to start,
        # and they must be stopped in reverse order.
        self.components = filter(None, (self.pool,
                                        self.mediator,
                                        self.scheduler,
                                        self.clockservice,
                                        self.listener))
Exemplo n.º 15
0
    def _run_clock():
        from celery.log import setup_logger
        logger = setup_logger(loglevel, logfile)
        clockservice = ClockService(logger=logger, schedule_filename=schedule)

        try:
            clockservice.start()
        except Exception, e:
            emergency_error(logfile,
                    "celerybeat raised exception %s: %s\n%s" % (
                            e.__class__, e, traceback.format_exc()))
    def start_scheduler(self):
        from celery.log import setup_logger
        logger = setup_logger(self.loglevel, self.logfile)
        beat = self.ClockService(logger, schedule_filename=self.schedule)

        try:
            self.install_sync_handler(beat)
            beat.start()
        except Exception, exc:
            emergency_error(
                self.logfile, "celerybeat raised exception %s: %s\n%s" %
                (exc.__class__, exc, traceback.format_exc()))
Exemplo n.º 17
0
def detach(path, argv, logfile=None, pidfile=None, uid=None,
           gid=None, umask=0, working_directory=None):
    with detached(logfile, pidfile, uid, gid, umask, working_directory):
        try:
            os.execv(path, [path] + argv)
        except Exception:
            import logging
            from celery.log import setup_logger
            logger = setup_logger(logfile=logfile, loglevel=logging.ERROR)
            logger.critical("Can't exec %r" % (
                    " ".join([path] + argv), ),
                    exc_info=sys.exc_info())
    def test_redirect_stdouts(self):
        logger = setup_logger(loglevel=logging.ERROR, logfile=None)
        try:
            def with_wrap_logger(sio):
                redirect_stdouts_to_logger(logger, loglevel=logging.ERROR)
                logger.error("foo")
                self.assertIn("foo", sio.getvalue())

            context = wrap_logger(logger)
            execute_context(context, with_wrap_logger)
        finally:
            sys.stdout, sys.stderr = sys.__stdout__, sys.__stderr__
Exemplo n.º 19
0
 def __init__(self, concurrency=None, logfile=None, loglevel=None,
         queue_wakeup_after=None, is_detached=False):
     self.loglevel = loglevel or self.loglevel
     self.concurrency = concurrency or self.concurrency
     self.logfile = logfile or self.logfile
     self.queue_wakeup_after = queue_wakeup_after or \
                                 self.queue_wakeup_after
     self.logger = setup_logger(loglevel, logfile)
     self.pool = TaskPool(self.concurrency, logger=self.logger)
     self.task_consumer = None
     self.task_consumer_it = None
     self.is_detached = is_detached
     self.reset_connection()
Exemplo n.º 20
0
    def start_scheduler(self):
        from celery.log import setup_logger
        logger = setup_logger(self.loglevel, self.logfile)
        beat = ClockService(logger,
                            schedule_filename=self.schedule)

        try:
            self.install_sync_handler(beat)
            beat.start()
        except Exception, exc:
            emergency_error(self.logfile,
                    "celerybeat raised exception %s: %s\n%s" % (
                            exc.__class__, exc, traceback.format_exc()))
Exemplo n.º 21
0
    def __init__(self, concurrency=None, logfile=None, loglevel=None,
            is_detached=False, embed_clockservice=False):

        # Options
        self.loglevel = loglevel or self.loglevel
        self.concurrency = concurrency or self.concurrency
        self.logfile = logfile or self.logfile
        self.is_detached = is_detached
        self.logger = setup_logger(loglevel, logfile)
        self.embed_clockservice = embed_clockservice

        # Queues
        if conf.DISABLE_RATE_LIMITS:
            self.ready_queue = Queue()
        else:
            self.ready_queue = TaskBucket(task_registry=registry.tasks)
        self.eta_scheduler = Scheduler(self.ready_queue)

        self.logger.debug("Instantiating thread components...")

        # Threads+Pool
        self.schedule_controller = ScheduleController(self.eta_scheduler)
        self.pool = TaskPool(self.concurrency, logger=self.logger)
        self.broker_listener = CarrotListener(self.ready_queue,
                                        self.eta_scheduler,
                                        logger=self.logger,
                                        initial_prefetch_count=concurrency)
        self.mediator = Mediator(self.ready_queue, self.safe_process_task)

        self.clockservice = None
        if self.embed_clockservice:
            self.clockservice = ClockServiceThread(logger=self.logger,
                                                is_detached=self.is_detached)

        # The order is important here;
        #   the first in the list is the first to start,
        # and they must be stopped in reverse order.
        self.components = filter(None, (self.pool,
                                        self.mediator,
                                        self.schedule_controller,
                                        self.clockservice,
                                        self.broker_listener))
Exemplo n.º 22
0
    def test_on_failure(self):
        tid = gen_unique_id()
        tw = TaskWrapper("cu.mytask", tid, mytask, [4], {"f": "x"})
        try:
            raise Exception("Inside unit tests")
        except Exception:
            exc_info = ExceptionInfo(sys.exc_info())

        logfh = StringIO()
        tw.logger.handlers = []
        tw.logger = setup_logger(logfile=logfh, loglevel=logging.INFO)

        from celery import conf
        conf.SEND_CELERY_TASK_ERROR_EMAILS = True

        tw.on_failure(exc_info, {"task_id": tid, "task_name": "cu.mytask"})
        logvalue = logfh.getvalue()
        self.assertTrue("cu.mytask" in logvalue)
        self.assertTrue(tid in logvalue)
        self.assertTrue("ERROR" in logvalue)

        conf.SEND_CELERY_TASK_ERROR_EMAILS = False
Exemplo n.º 23
0
 def start(self):
     context, on_stop = create_daemon_context(
                             logfile=self.logfile,
                             pidfile=self.pidfile,
                             uid=self.uid,
                             gid=self.gid,
                             umask=self.umask,
                             working_directory=self.working_directory)
     context.open()
     try:
         try:
             os.execv(self.path, [self.path] + self.argv)
         except Exception:
             import logging
             from celery.log import setup_logger
             logger = setup_logger(logfile=self.logfile,
                                   loglevel=logging.ERROR)
             logger.critical("Can't exec %r" % (
                 " ".join([self.path] + self.argv), ),
                 exc_info=sys.exc_info())
     finally:
         on_stop()
Exemplo n.º 24
0
    def _test_on_failure(self, exception):
        tid = gen_unique_id()
        tw = TaskRequest(mytask.name, tid, [4], {"f": "x"})
        try:
            raise exception
        except Exception:
            exc_info = ExceptionInfo(sys.exc_info())

        logfh = StringIO()
        tw.logger.handlers = []
        tw.logger = setup_logger(logfile=logfh, loglevel=logging.INFO)

        from celery import conf
        conf.CELERY_SEND_TASK_ERROR_EMAILS = True

        tw.on_failure(exc_info)
        logvalue = logfh.getvalue()
        self.assertIn(mytask.name, logvalue)
        self.assertIn(tid, logvalue)
        self.assertIn("ERROR", logvalue)

        conf.CELERY_SEND_TASK_ERROR_EMAILS = False
    def test_logging_proxy(self):
        logger = setup_logger(loglevel=logging.ERROR, logfile=None)

        def with_wrap_logger(sio):
            p = LoggingProxy(logger)
            p.close()
            p.write("foo")
            self.assertNotIn("foo", sio.getvalue())
            p.closed = False
            p.write("foo")
            self.assertIn("foo", sio.getvalue())
            lines = ["baz", "xuzzy"]
            p.writelines(lines)
            for line in lines:
                self.assertIn(line, sio.getvalue())
            p.flush()
            p.close()
            self.assertFalse(p.isatty())
            self.assertIsNone(p.fileno())

        context = wrap_logger(logger)
        execute_context(context, with_wrap_logger)
Exemplo n.º 26
0
    def _test_on_failure(self, exception):
        app = app_or_default()
        tid = uuid()
        tw = TaskRequest(mytask.name, tid, [4], {"f": "x"})
        try:
            raise exception
        except Exception:
            exc_info = ExceptionInfo(sys.exc_info())

            logfh = WhateverIO()
            tw.logger.handlers = []
            tw.logger = setup_logger(logfile=logfh, loglevel=logging.INFO, root=False)

            app.conf.CELERY_SEND_TASK_ERROR_EMAILS = True

            tw.on_failure(exc_info)
            logvalue = logfh.getvalue()
            self.assertIn(mytask.name, logvalue)
            self.assertIn(tid, logvalue)
            self.assertIn("ERROR", logvalue)

            app.conf.CELERY_SEND_TASK_ERROR_EMAILS = False
Exemplo n.º 27
0
    def _test_on_failure(self, exception):
        tid = gen_unique_id()
        tw = TaskRequest(mytask.name, tid, [4], {"f": "x"})
        try:
            raise exception
        except Exception:
            exc_info = ExceptionInfo(sys.exc_info())

        logfh = StringIO()
        tw.logger.handlers = []
        tw.logger = setup_logger(logfile=logfh, loglevel=logging.INFO)

        from celery import conf
        conf.CELERY_SEND_TASK_ERROR_EMAILS = True

        tw.on_failure(exc_info)
        logvalue = logfh.getvalue()
        self.assertIn(mytask.name, logvalue)
        self.assertIn(tid, logvalue)
        self.assertIn("ERROR", logvalue)

        conf.CELERY_SEND_TASK_ERROR_EMAILS = False
Exemplo n.º 28
0
    def _test_on_failure(self, exception):
        app = app_or_default()
        tid = uuid()
        tw = TaskRequest(mytask.name, tid, [4], {"f": "x"})
        try:
            raise exception
        except Exception:
            exc_info = ExceptionInfo(sys.exc_info())

            logfh = WhateverIO()
            tw.logger.handlers = []
            tw.logger = setup_logger(logfile=logfh, loglevel=logging.INFO,
                                     root=False)

            app.conf.CELERY_SEND_TASK_ERROR_EMAILS = True

            tw.on_failure(exc_info)
            logvalue = logfh.getvalue()
            self.assertIn(mytask.name, logvalue)
            self.assertIn(tid, logvalue)
            self.assertIn("ERROR", logvalue)

            app.conf.CELERY_SEND_TASK_ERROR_EMAILS = False
Exemplo n.º 29
0
    def __init__(self, servers=None, keyspace=None, column_family=None,
            cassandra_options=None, **kwargs):
        """Initialize Cassandra backend.

        Raises :class:`celery.exceptions.ImproperlyConfigured` if
        the :setting:`CASSANDRA_SERVERS` setting is not set.

        """
        self.logger = setup_logger("celery.backends.cassandra")

        self.result_expires = kwargs.get("result_expires") or \
                                conf.TASK_RESULT_EXPIRES

        if not pycassa:
            raise ImproperlyConfigured(
                    "You need to install the pycassa library to use the "
                    "Cassandra backend. See http://github.com/vomjom/pycassa")

        settings = load_settings()

        self.servers = servers or \
                         getattr(settings, "CASSANDRA_SERVERS", self.servers)
        self.keyspace = keyspace or \
                          getattr(settings, "CASSANDRA_KEYSPACE",
                                  self.keyspace)
        self.column_family = column_family or \
                               getattr(settings, "CASSANDRA_COLUMN_FAMILY",
                                       self.column_family)
        self.cassandra_options = dict(cassandra_options or {},
                                   **getattr(settings,
                                             "CASSANDRA_OPTIONS", {}))
        if not self.servers or not self.keyspace or not self.column_family:
            raise ImproperlyConfigured(
                    "Cassandra backend not configured.")

        super(CassandraBackend, self).__init__()
        self._column_family = None
Exemplo n.º 30
0
    def __init__(self, servers=None, keyspace=None, column_family=None,
            cassandra_options=None, **kwargs):
        """Initialize Cassandra backend.

        Raises :class:`celery.exceptions.ImproperlyConfigured` if
        the ``CASSANDRA_SERVERS`` setting is not set.

        """
        self.logger = setup_logger("celery.backends.cassandra")

        self.result_expires = kwargs.get("result_expires") or \
                                conf.TASK_RESULT_EXPIRES

        if not pycassa:
            raise ImproperlyConfigured(
                    "You need to install the pycassa library to use the "
                    "Cassandra backend. See http://github.com/vomjom/pycassa")

        settings = load_settings()

        self.servers = servers or \
                         getattr(settings, "CASSANDRA_SERVERS", self.servers)
        self.keyspace = keyspace or \
                          getattr(settings, "CASSANDRA_KEYSPACE",
                                  self.keyspace)
        self.column_family = column_family or \
                               getattr(settings, "CASSANDRA_COLUMN_FAMILY",
                                       self.column_family)
        self.cassandra_options = dict(cassandra_options or {},
                                   **getattr(settings,
                                             "CASSANDRA_OPTIONS", {}))
        if not self.servers or not self.keyspace or not self.column_family:
            raise ImproperlyConfigured(
                    "Cassandra backend not configured.")

        super(CassandraBackend, self).__init__()
        self._column_family = None
Exemplo n.º 31
0
def evcam(camera, freq=1.0, maxrate=None, loglevel=0,
        logfile=None):
    if not isinstance(loglevel, int):
        loglevel = conf.LOG_LEVELS[loglevel.upper()]
    logger = log.setup_logger(loglevel=loglevel,
                              logfile=logfile,
                              name="celery.evcam")
    logger.info(
        "-> evcam: Taking snapshots with %s (every %s secs.)\n" % (
            camera, freq))
    state = State()
    cam = instantiate(camera, state,
                      freq=freq, maxrate=maxrate, logger=logger)
    cam.install()
    conn = establish_connection()
    recv = EventReceiver(conn, handlers={"*": state.event})
    try:
        try:
            recv.capture(limit=None)
        except KeyboardInterrupt:
            raise SystemExit
    finally:
        cam.cancel()
        conn.close()
Exemplo n.º 32
0
def evcam(camera, freq=1.0, maxrate=None, loglevel=0,
        logfile=None):
    if not isinstance(loglevel, int):
        loglevel = conf.LOG_LEVELS[loglevel.upper()]
    logger = log.setup_logger(loglevel=loglevel,
                              logfile=logfile,
                              name="celery.evcam")
    logger.info(
        "-> evcam: Taking snapshots with %s (every %s secs.)\n" % (
            camera, freq))
    state = State()
    cam = instantiate(camera, state,
                      freq=freq, maxrate=maxrate, logger=logger)
    cam.install()
    conn = establish_connection()
    recv = EventReceiver(conn, handlers={"*": state.event})
    try:
        try:
            recv.capture(limit=None)
        except KeyboardInterrupt:
            raise SystemExit
    finally:
        cam.cancel()
        conn.close()
Exemplo n.º 33
0
    def __init__(self, concurrency=None, logfile=None, loglevel=None,
            send_events=conf.SEND_EVENTS, hostname=None,
            ready_callback=noop, embed_clockservice=False,
            pool_cls=conf.CELERYD_POOL, listener_cls=conf.CELERYD_LISTENER,
            mediator_cls=conf.CELERYD_MEDIATOR,
            eta_scheduler_cls=conf.CELERYD_ETA_SCHEDULER,
            schedule_filename=conf.CELERYBEAT_SCHEDULE_FILENAME,
            task_time_limit=conf.CELERYD_TASK_TIME_LIMIT,
            task_soft_time_limit=conf.CELERYD_TASK_SOFT_TIME_LIMIT,
            max_tasks_per_child=conf.CELERYD_MAX_TASKS_PER_CHILD,
            pool_putlocks=conf.CELERYD_POOL_PUTLOCKS,
            db=conf.CELERYD_STATE_DB):

        # Options
        self.loglevel = loglevel or self.loglevel
        self.concurrency = concurrency or self.concurrency
        self.logfile = logfile or self.logfile
        self.logger = setup_logger(loglevel, logfile)
        self.hostname = hostname or socket.gethostname()
        self.embed_clockservice = embed_clockservice
        self.ready_callback = ready_callback
        self.send_events = send_events
        self.task_time_limit = task_time_limit
        self.task_soft_time_limit = task_soft_time_limit
        self.max_tasks_per_child = max_tasks_per_child
        self.pool_putlocks = pool_putlocks
        self.timer_debug = log.SilenceRepeated(self.logger.debug,
                                               max_iterations=10)
        self.db = db
        self._finalize = Finalize(self, self.stop, exitpriority=1)

        if self.db:
            persistence = state.Persistent(self.db)
            Finalize(persistence, persistence.save, exitpriority=5)

        # Queues
        if conf.DISABLE_RATE_LIMITS:
            self.ready_queue = FastQueue()
        else:
            self.ready_queue = TaskBucket(task_registry=registry.tasks)

        self.logger.debug("Instantiating thread components...")

        # Threads + Pool + Consumer
        self.pool = instantiate(pool_cls, self.concurrency,
                                logger=self.logger,
                                initializer=process_initializer,
                                maxtasksperchild=self.max_tasks_per_child,
                                timeout=self.task_time_limit,
                                soft_timeout=self.task_soft_time_limit,
                                putlocks=self.pool_putlocks)
        self.mediator = instantiate(mediator_cls, self.ready_queue,
                                    callback=self.process_task,
                                    logger=self.logger)
        self.scheduler = instantiate(eta_scheduler_cls,
                               precision=conf.CELERYD_ETA_SCHEDULER_PRECISION,
                               on_error=self.on_timer_error,
                               on_tick=self.on_timer_tick)

        self.clockservice = None
        if self.embed_clockservice:
            self.clockservice = EmbeddedClockService(logger=self.logger,
                                    schedule_filename=schedule_filename)

        prefetch_count = self.concurrency * conf.CELERYD_PREFETCH_MULTIPLIER
        self.listener = instantiate(listener_cls,
                                    self.ready_queue,
                                    self.scheduler,
                                    logger=self.logger,
                                    hostname=self.hostname,
                                    send_events=self.send_events,
                                    init_callback=self.ready_callback,
                                    initial_prefetch_count=prefetch_count,
                                    pool=self.pool)

        # The order is important here;
        #   the first in the list is the first to start,
        # and they must be stopped in reverse order.
        self.components = filter(None, (self.pool,
                                        self.mediator,
                                        self.scheduler,
                                        self.clockservice,
                                        self.listener))
 def with_override_stdouts(outs):
     stdout, stderr = outs
     l = setup_logger(logfile=stderr, loglevel=logging.INFO)
     l.info("The quick brown fox...")
     self.assertIn("The quick brown fox...", stderr.getvalue())
    def __init__(self,
                 concurrency=None,
                 logfile=None,
                 loglevel=None,
                 send_events=conf.SEND_EVENTS,
                 hostname=None,
                 ready_callback=noop,
                 embed_clockservice=False,
                 pool_cls=conf.CELERYD_POOL,
                 listener_cls=conf.CELERYD_LISTENER,
                 mediator_cls=conf.CELERYD_MEDIATOR,
                 eta_scheduler_cls=conf.CELERYD_ETA_SCHEDULER,
                 schedule_filename=conf.CELERYBEAT_SCHEDULE_FILENAME,
                 task_time_limit=conf.CELERYD_TASK_TIME_LIMIT,
                 task_soft_time_limit=conf.CELERYD_TASK_SOFT_TIME_LIMIT,
                 max_tasks_per_child=conf.CELERYD_MAX_TASKS_PER_CHILD,
                 pool_putlocks=conf.CELERYD_POOL_PUTLOCKS,
                 db=conf.CELERYD_STATE_DB):

        # Options
        self.loglevel = loglevel or self.loglevel
        self.concurrency = concurrency or self.concurrency
        self.logfile = logfile or self.logfile
        self.logger = setup_logger(loglevel, logfile)
        self.hostname = hostname or socket.gethostname()
        self.embed_clockservice = embed_clockservice
        self.ready_callback = ready_callback
        self.send_events = send_events
        self.task_time_limit = task_time_limit
        self.task_soft_time_limit = task_soft_time_limit
        self.max_tasks_per_child = max_tasks_per_child
        self.pool_putlocks = pool_putlocks
        self.db = db
        self._finalize = Finalize(self, self.stop, exitpriority=1)

        if self.db:
            persistence = state.Persistent(self.db)
            Finalize(persistence, persistence.save, exitpriority=5)

        # Queues
        if conf.DISABLE_RATE_LIMITS:
            self.ready_queue = FastQueue()
        else:
            self.ready_queue = TaskBucket(task_registry=registry.tasks)
        self.eta_schedule = Scheduler(self.ready_queue, logger=self.logger)

        self.logger.debug("Instantiating thread components...")

        # Threads + Pool + Consumer
        self.pool = instantiate(pool_cls,
                                self.concurrency,
                                logger=self.logger,
                                initializer=process_initializer,
                                maxtasksperchild=self.max_tasks_per_child,
                                timeout=self.task_time_limit,
                                soft_timeout=self.task_soft_time_limit,
                                putlocks=self.pool_putlocks)
        self.mediator = instantiate(mediator_cls,
                                    self.ready_queue,
                                    callback=self.process_task,
                                    logger=self.logger)
        self.scheduler = instantiate(eta_scheduler_cls,
                                     self.eta_schedule,
                                     logger=self.logger)

        self.clockservice = None
        if self.embed_clockservice:
            self.clockservice = EmbeddedClockService(
                logger=self.logger, schedule_filename=schedule_filename)

        prefetch_count = self.concurrency * conf.CELERYD_PREFETCH_MULTIPLIER
        self.listener = instantiate(listener_cls,
                                    self.ready_queue,
                                    self.eta_schedule,
                                    logger=self.logger,
                                    hostname=self.hostname,
                                    send_events=self.send_events,
                                    init_callback=self.ready_callback,
                                    initial_prefetch_count=prefetch_count,
                                    pool=self.pool)

        # The order is important here;
        #   the first in the list is the first to start,
        # and they must be stopped in reverse order.
        self.components = filter(None,
                                 (self.pool, self.mediator, self.scheduler,
                                  self.clockservice, self.listener))
Exemplo n.º 36
0
 def redirect_stdouts_to_logger(self):
     from celery import log
     # Redirect stdout/stderr to our logger.
     logger = log.setup_logger(loglevel=self.loglevel, logfile=self.logfile)
     log.redirect_stdouts_to_logger(logger, loglevel=logging.WARNING)
Exemplo n.º 37
0
def run_worker(concurrency=DAEMON_CONCURRENCY, detach=False,
        loglevel=DAEMON_LOG_LEVEL, logfile=DAEMON_LOG_FILE, discard=False,
        pidfile=DAEMON_PID_FILE, umask=0, uid=None, gid=None,
        supervised=False, working_directory=None, chroot=None,
        statistics=None, **kwargs):
    """Starts the celery worker server."""

    # set SIGCLD back to the default SIG_DFL (before python-daemon overrode
    # it) lets the parent wait() for the terminated child process and stops
    # the 'OSError: [Errno 10] No child processes' problem.

    if hasattr(signal, "SIGCLD"): # Make sure the platform supports signals.
        signal.signal(signal.SIGCLD, signal.SIG_DFL)

    print("Celery %s is starting." % __version__)

    if statistics is not None:
        settings.CELERY_STATISTICS = statistics

    if not concurrency:
        concurrency = multiprocessing.cpu_count()

    if conf.CELERY_BACKEND == "database" \
            and settings.DATABASE_ENGINE == "sqlite3" and \
            concurrency > 1:
        import warnings
        warnings.warn("The sqlite3 database engine doesn't support "
                "concurrency. We'll be using a single process only.",
                UserWarning)
        concurrency = 1

    # Setup logging
    if not isinstance(loglevel, int):
        loglevel = LOG_LEVELS[loglevel.upper()]
    if not detach:
        logfile = None # log to stderr when not running in the background.

    if discard:
        discarded_count = discard_all()
        what = discarded_count > 1 and "messages" or "message"
        print("discard: Erased %d %s from the queue.\n" % (
                discarded_count, what))

    # Dump configuration to screen so we have some basic information
    # when users sends e-mails.
    print(STARTUP_INFO_FMT % {
            "vhost": getattr(settings, "AMQP_VHOST", "(default)"),
            "host": getattr(settings, "AMQP_SERVER", "(default)"),
            "port": getattr(settings, "AMQP_PORT", "(default)"),
            "exchange": conf.AMQP_EXCHANGE,
            "exchange_type": conf.AMQP_EXCHANGE_TYPE,
            "consumer_queue": conf.AMQP_CONSUMER_QUEUE,
            "consumer_rkey": conf.AMQP_CONSUMER_ROUTING_KEY,
            "publisher_rkey": conf.AMQP_PUBLISHER_ROUTING_KEY,
            "concurrency": concurrency,
            "loglevel": loglevel,
            "pidfile": pidfile,
            "statistics": settings.CELERY_STATISTICS and "ON" or "OFF",
    })

    print("Celery has started.")
    if detach:
        if not CAN_DETACH:
            raise RuntimeError(
                    "This operating system doesn't support detach. ")
        from daemon import DaemonContext
        from celery.log import setup_logger, redirect_stdouts_to_logger

        # Since without stderr any errors will be silently suppressed,
        # we need to know that we have access to the logfile
        if logfile:
            open(logfile, "a").close()

        pidlock = acquire_pidlock(pidfile)
        if umask is None:
            umask = 0
        if uid is None:
            uid = os.geteuid()
        if gid is None:
            gid = os.getegid()
        working_directory = working_directory or os.getcwd()
        context = DaemonContext(chroot_directory=chroot,
                                working_directory=working_directory,
                                umask=umask,
                                pidfile=pidlock,
                                uid=uid,
                                gid=gid)
        context.open()
        logger = setup_logger(loglevel, logfile)
        redirect_stdouts_to_logger(logger, loglevel)

    # Run the worker init handler.
    # (Usually imports task modules and such.)
    current_loader.on_worker_init()

    def run_worker():
        worker = WorkController(concurrency=concurrency,
                                loglevel=loglevel,
                                logfile=logfile,
                                is_detached=detach)

        # Install signal handler that restarts celeryd on SIGHUP,
        # (only on POSIX systems)
        install_restart_signal_handler(worker)

        try:
            worker.start()
        except Exception, e:
            emergency_error(logfile, "celeryd raised exception %s: %s\n%s" % (
                            e.__class__, e, traceback.format_exc()))
Exemplo n.º 38
0
 def redirect_stdouts_to_logger(self):
     from celery import log
     # Redirect stdout/stderr to our logger.
     logger = log.setup_logger(loglevel=self.loglevel,
                               logfile=self.logfile)
     log.redirect_stdouts_to_logger(logger, loglevel=logging.WARNING)