Example #1
0
    def test_regular_task(self):
        T1 = self.createTaskCls("T1", "c.unittest.t.t1")
        self.assertTrue(isinstance(T1(), T1))
        self.assertTrue(T1().run())
        self.assertTrue(callable(T1()),
                "Task class is callable()")
        self.assertTrue(T1()(),
                "Task class runs run() when called")

        # task name generated out of class module + name.
        T2 = self.createTaskCls("T2")
        self.assertEquals(T2().name, "celery.tests.test_task.T2")

        registry.tasks.register(T1)
        t1 = T1()
        consumer = t1.get_consumer()
        self.assertRaises(NotImplementedError, consumer.receive, "foo", "foo")
        consumer.discard_all()
        self.assertTrue(consumer.fetch() is None)

        # Without arguments.
        presult = t1.delay()
        self.assertNextTaskDataEquals(consumer, presult, t1.name)

        # With arguments.
        presult2 = task.delay_task(t1.name, name="George Constanza")
        self.assertNextTaskDataEquals(consumer, presult2, t1.name,
                name="George Constanza")

        # With eta.
        presult2 = task.apply_async(t1, kwargs=dict(name="George Constanza"),
                                    eta=datetime.now() + timedelta(days=1))
        self.assertNextTaskDataEquals(consumer, presult2, t1.name,
                name="George Constanza", test_eta=True)

        # With countdown.
        presult2 = task.apply_async(t1, kwargs=dict(name="George Constanza"),
                                    countdown=10)
        self.assertNextTaskDataEquals(consumer, presult2, t1.name,
                name="George Constanza", test_eta=True)

        self.assertRaises(registry.tasks.NotRegistered, task.delay_task,
                "some.task.that.should.never.exist.X.X.X.X.X")

        # Discarding all tasks.
        task.discard_all()
        tid3 = task.delay_task(t1.name)
        self.assertEquals(task.discard_all(), 1)
        self.assertTrue(consumer.fetch() is None)

        self.assertFalse(task.is_done(presult.task_id))
        self.assertFalse(presult.is_done())
        default_backend.mark_as_done(presult.task_id, result=None)
        self.assertTrue(task.is_done(presult.task_id))
        self.assertTrue(presult.is_done())


        publisher = t1.get_publisher()
        self.assertTrue(isinstance(publisher, messaging.TaskPublisher))
Example #2
0
    def test_regular_task(self):
        T1 = self.createTaskCls("T1", "c.unittest.t.t1")
        self.assertTrue(isinstance(T1(), T1))
        self.assertTrue(T1().run())
        self.assertTrue(callable(T1()),
                "Task class is callable()")
        self.assertTrue(T1()(),
                "Task class runs run() when called")

        # task without name raises NotImplementedError
        T2 = self.createTaskCls("T2")
        self.assertRaises(NotImplementedError, T2)

        registry.tasks.register(T1)
        t1 = T1()
        consumer = t1.get_consumer()
        self.assertRaises(NotImplementedError, consumer.receive, "foo", "foo")
        consumer.discard_all()
        self.assertTrue(consumer.fetch() is None)

        # Without arguments.
        presult = t1.delay()
        self.assertNextTaskDataEquals(consumer, presult, t1.name)

        # With arguments.
        presult2 = task.delay_task(t1.name, name="George Constanza")
        self.assertNextTaskDataEquals(consumer, presult2, t1.name,
                name="George Constanza")

        self.assertRaises(registry.tasks.NotRegistered, task.delay_task,
                "some.task.that.should.never.exist.X.X.X.X.X")

        # Discarding all tasks.
        task.discard_all()
        tid3 = task.delay_task(t1.name)
        self.assertEquals(task.discard_all(), 1)
        self.assertTrue(consumer.fetch() is None)

        self.assertFalse(task.is_done(presult.task_id))
        self.assertFalse(presult.is_done())
        default_backend.mark_as_done(presult.task_id, result=None)
        self.assertTrue(task.is_done(presult.task_id))
        self.assertTrue(presult.is_done())


        publisher = t1.get_publisher()
        self.assertTrue(isinstance(publisher, messaging.TaskPublisher))
Example #3
0
def main(concurrency=DAEMON_CONCURRENCY, daemon=False,
        loglevel=DAEMON_LOG_LEVEL, logfile=DAEMON_LOG_FILE, discard=False,
        pidfile=DAEMON_PID_FILE, queue_wakeup_after=QUEUE_WAKEUP_AFTER):
    """Run the celery daemon."""
    if settings.DATABASE_ENGINE == "sqlite3" and concurrency > 1:
        import warnings
        warnings.warn("The sqlite3 database engine doesn't support "
                "concurrency. We'll be using a single process only.",
                UserWarning)
        concurrency = 1

    if discard:
        discarded_count = discard_all()
        what = "message"
        if discarded_count > 1:
            what = "messages"
        sys.stderr.write("Discard: Erased %d %s from the queue.\n" % (
            discarded_count, what))
    if daemon:
        sys.stderr.write("Launching celeryd in the background...\n")
        pidfile_handler = PIDFile(pidfile)
        pidfile_handler.check()
        daemonize(pidfile=pidfile_handler)
        atexit.register(remove_pidfile, pidfile)
    else:
        logfile = None # log to stderr when not running as daemon.

    discovery.autodiscover()
    celeryd = WorkController(concurrency=concurrency,
                               loglevel=loglevel,
                               logfile=logfile,
                               queue_wakeup_after=queue_wakeup_after,
                               is_detached=daemon)
    try:
        celeryd.run()
    except Exception, e:
        emergency_error(logfile, "celeryd raised exception %s: %s\n%s" % (
                            e.__class__, e, traceback.format_exc()))
Example #4
0
 def purge_messages(self):
     discarded_count = discard_all()
     what = discarded_count > 1 and "messages" or "message"
     print("discard: Erased %d %s from the queue.\n" % (
         discarded_count, what))
Example #5
0
def run_worker(concurrency=conf.CELERYD_CONCURRENCY,
        loglevel=conf.CELERYD_LOG_LEVEL, logfile=conf.CELERYD_LOG_FILE,
        hostname=None,
        discard=False, run_clockservice=False, events=False, **kwargs):
    """Starts the celery worker server."""

    hostname = hostname or socket.gethostname()

    print("celery@%s v%s is starting." % (hostname, celery.__version__))

    from celery.loaders import current_loader, load_settings
    loader = current_loader()
    settings = load_settings()

    if not concurrency:
        concurrency = multiprocessing.cpu_count()

    if conf.CELERY_BACKEND == "database" \
            and settings.DATABASE_ENGINE == "sqlite3" and \
            concurrency > 1:
        import warnings
        warnings.warn("The sqlite3 database engine doesn't support "
                "concurrency. We'll be using a single process only.",
                UserWarning)
        concurrency = 1

    # Setup logging
    if not isinstance(loglevel, int):
        loglevel = conf.LOG_LEVELS[loglevel.upper()]

    if discard:
        discarded_count = discard_all()
        what = discarded_count > 1 and "messages" or "message"
        print("discard: Erased %d %s from the queue.\n" % (
                discarded_count, what))

    # Run the worker init handler.
    # (Usually imports task modules and such.)
    loader.on_worker_init()

    # Dump configuration to screen so we have some basic information
    # when users sends e-mails.

    tasklist = ""
    if loglevel <= logging.INFO:
        from celery.registry import tasks
        tasklist = tasks.keys()
        if not loglevel <= logging.DEBUG:
            tasklist = filter(lambda s: not s.startswith("celery."), tasklist)
        tasklist = TASK_LIST_FMT % "\n".join("        . %s" % task
                                                for task in sorted(tasklist))

    print(STARTUP_INFO_FMT % {
            "conninfo": info.format_broker_info(),
            "queues": info.format_routing_table(indent=8),
            "concurrency": concurrency,
            "loglevel": conf.LOG_LEVELS[loglevel],
            "logfile": logfile or "[stderr]",
            "celerybeat": run_clockservice and "ON" or "OFF",
            "events": events and "ON" or "OFF",
            "tasks": tasklist,
            "loader": loader.__class__.__module__,
    })

    print("Celery has started.")
    set_process_status("Running...")

    def run_worker():
        worker = WorkController(concurrency=concurrency,
                                loglevel=loglevel,
                                logfile=logfile,
                                hostname=hostname,
                                embed_clockservice=run_clockservice,
                                send_events=events)

        # Install signal handler so SIGHUP restarts the worker.
        install_worker_restart_handler(worker)

        from celery import signals
        signals.worker_init.send(sender=worker)

        try:
            worker.start()
        except Exception, e:
            emergency_error(logfile, "celeryd raised exception %s: %s\n%s" % (
                            e.__class__, e, traceback.format_exc()))
Example #6
0
def run_worker(concurrency=DAEMON_CONCURRENCY, detach=False,
        loglevel=DAEMON_LOG_LEVEL, logfile=DAEMON_LOG_FILE, discard=False,
        pidfile=DAEMON_PID_FILE, umask=0, uid=None, gid=None,
        supervised=False, working_directory=None, chroot=None,
        statistics=None, **kwargs):
    """Starts the celery worker server."""

    # set SIGCLD back to the default SIG_DFL (before python-daemon overrode
    # it) lets the parent wait() for the terminated child process and stops
    # the 'OSError: [Errno 10] No child processes' problem.

    if hasattr(signal, "SIGCLD"): # Make sure the platform supports signals.
        signal.signal(signal.SIGCLD, signal.SIG_DFL)

    print("Celery %s is starting." % __version__)

    if statistics is not None:
        settings.CELERY_STATISTICS = statistics

    if not concurrency:
        concurrency = multiprocessing.cpu_count()

    if conf.CELERY_BACKEND == "database" \
            and settings.DATABASE_ENGINE == "sqlite3" and \
            concurrency > 1:
        import warnings
        warnings.warn("The sqlite3 database engine doesn't support "
                "concurrency. We'll be using a single process only.",
                UserWarning)
        concurrency = 1

    # Setup logging
    if not isinstance(loglevel, int):
        loglevel = LOG_LEVELS[loglevel.upper()]
    if not detach:
        logfile = None # log to stderr when not running in the background.

    if discard:
        discarded_count = discard_all()
        what = discarded_count > 1 and "messages" or "message"
        print("discard: Erased %d %s from the queue.\n" % (
                discarded_count, what))

    # Dump configuration to screen so we have some basic information
    # when users sends e-mails.
    print(STARTUP_INFO_FMT % {
            "vhost": getattr(settings, "AMQP_VHOST", "(default)"),
            "host": getattr(settings, "AMQP_SERVER", "(default)"),
            "port": getattr(settings, "AMQP_PORT", "(default)"),
            "exchange": conf.AMQP_EXCHANGE,
            "exchange_type": conf.AMQP_EXCHANGE_TYPE,
            "consumer_queue": conf.AMQP_CONSUMER_QUEUE,
            "consumer_rkey": conf.AMQP_CONSUMER_ROUTING_KEY,
            "publisher_rkey": conf.AMQP_PUBLISHER_ROUTING_KEY,
            "concurrency": concurrency,
            "loglevel": loglevel,
            "pidfile": pidfile,
            "statistics": settings.CELERY_STATISTICS and "ON" or "OFF",
    })

    print("Celery has started.")
    if detach:
        if not CAN_DETACH:
            raise RuntimeError(
                    "This operating system doesn't support detach. ")
        from daemon import DaemonContext
        from celery.log import setup_logger, redirect_stdouts_to_logger

        # Since without stderr any errors will be silently suppressed,
        # we need to know that we have access to the logfile
        if logfile:
            open(logfile, "a").close()

        pidlock = acquire_pidlock(pidfile)
        if umask is None:
            umask = 0
        if uid is None:
            uid = os.geteuid()
        if gid is None:
            gid = os.getegid()
        working_directory = working_directory or os.getcwd()
        context = DaemonContext(chroot_directory=chroot,
                                working_directory=working_directory,
                                umask=umask,
                                pidfile=pidlock,
                                uid=uid,
                                gid=gid)
        context.open()
        logger = setup_logger(loglevel, logfile)
        redirect_stdouts_to_logger(logger, loglevel)

    # Run the worker init handler.
    # (Usually imports task modules and such.)
    current_loader.on_worker_init()

    def run_worker():
        worker = WorkController(concurrency=concurrency,
                                loglevel=loglevel,
                                logfile=logfile,
                                is_detached=detach)

        # Install signal handler that restarts celeryd on SIGHUP,
        # (only on POSIX systems)
        install_restart_signal_handler(worker)

        try:
            worker.start()
        except Exception, e:
            emergency_error(logfile, "celeryd raised exception %s: %s\n%s" % (
                            e.__class__, e, traceback.format_exc()))
Example #7
0
def run_worker(concurrency=DAEMON_CONCURRENCY, detach=False,
        loglevel=DAEMON_LOG_LEVEL, logfile=DAEMON_LOG_FILE, discard=False,
        pidfile=DAEMON_PID_FILE, umask=0, uid=None, gid=None,
        supervised=False, working_directory=None, chroot=None,
        statistics=None, **kwargs):
    """Starts the celery worker server."""

    print("Celery %s is starting." % __version__)

    if statistics:
        settings.CELERY_STATISTICS = statistics

    if not concurrency:
        concurrency = multiprocessing.cpu_count()

    if settings.DATABASE_ENGINE == "sqlite3" and concurrency > 1:
        import warnings
        warnings.warn("The sqlite3 database engine doesn't support "
                "concurrency. We'll be using a single process only.",
                UserWarning)
        concurrency = 1

    # Setup logging
    if not isinstance(loglevel, int):
        loglevel = LOG_LEVELS[loglevel.upper()]
    if not detach:
        logfile = None # log to stderr when not running in the background.

    if discard:
        discarded_count = discard_all()
        what = discarded_count > 1 and "messages" or "message"
        print("discard: Erased %d %s from the queue.\n" % (
                discarded_count, what))

    # Dump configuration to screen so we have some basic information
    # when users sends e-mails.
    print(STARTUP_INFO_FMT % {
            "vhost": settings.AMQP_VHOST,
            "host": settings.AMQP_SERVER,
            "port": settings.AMQP_PORT,
            "exchange": conf.AMQP_EXCHANGE,
            "exchange_type": conf.AMQP_EXCHANGE_TYPE,
            "consumer_queue": conf.AMQP_CONSUMER_QUEUE,
            "consumer_rkey": conf.AMQP_CONSUMER_ROUTING_KEY,
            "publisher_rkey": conf.AMQP_PUBLISHER_ROUTING_KEY,
            "concurrency": concurrency,
            "loglevel": loglevel,
            "pidfile": pidfile,
            "statistics": settings.CELERY_STATISTICS and "ON" or "OFF",
    })

    print("Celery has started.")
    if detach:
        if not CAN_DETACH:
            raise RuntimeError(
                    "This operating system doesn't support detach. ")
        from daemon import DaemonContext
        # Since without stderr any errors will be silently suppressed,
        # we need to know that we have access to the logfile
        if logfile:
            open(logfile, "a").close()
        pidlock = acquire_pidlock(pidfile)
        if not umask:
            umask = 0
        uid = uid and int(uid) or os.geteuid()
        gid = gid and int(gid) or os.getegid()
        working_directory = working_directory or os.getcwd()
        context = DaemonContext(chroot_directory=chroot,
                                working_directory=working_directory,
                                umask=umask,
                                pidfile=pidlock,
                                uid=uid,
                                gid=gid)
        context.open()

    discovery.autodiscover()

    def run_worker():
        worker = WorkController(concurrency=concurrency,
                                loglevel=loglevel,
                                logfile=logfile,
                                is_detached=detach)
        try:
            worker.start()
        except Exception, e:
            emergency_error(logfile, "celeryd raised exception %s: %s\n%s" % (
                            e.__class__, e, traceback.format_exc()))
Example #8
0
 def purge_messages(self):
     discarded_count = discard_all()
     what = discarded_count > 1 and "messages" or "message"
     print("discard: Erased %d %s from the queue.\n" %
           (discarded_count, what))
Example #9
0
#!/usr/bin/env python
import parametros
from guars.nucleo.models import *

Aldeaporusuario.objects.all().update(depositohuerto=0,depositobarrizal=0,depositobosque=0,depositomina=0)
Materiaporaldea.objects.all().update(enconstruccion=False,nivelactual=0)
from celery import task
task.discard_all()
Example #10
0
def run_worker(concurrency=conf.DAEMON_CONCURRENCY, detach=False,
        loglevel=conf.DAEMON_LOG_LEVEL, logfile=conf.DAEMON_LOG_FILE,
        discard=False, pidfile=conf.DAEMON_PID_FILE, umask=0,
        uid=None, gid=None, working_directory=None,
        chroot=None, statistics=None, run_clockservice=False, **kwargs):
    """Starts the celery worker server."""

    print("Celery %s is starting." % __version__)

    if statistics is not None:
        settings.CELERY_STATISTICS = statistics

    if not concurrency:
        concurrency = multiprocessing.cpu_count()

    if conf.CELERY_BACKEND == "database" \
            and settings.DATABASE_ENGINE == "sqlite3" and \
            concurrency > 1:
        import warnings
        warnings.warn("The sqlite3 database engine doesn't support "
                "concurrency. We'll be using a single process only.",
                UserWarning)
        concurrency = 1

    # Setup logging
    if not isinstance(loglevel, int):
        loglevel = conf.LOG_LEVELS[loglevel.upper()]
    if not detach:
        logfile = None # log to stderr when not running in the background.

    if discard:
        discarded_count = discard_all()
        what = discarded_count > 1 and "messages" or "message"
        print("discard: Erased %d %s from the queue.\n" % (
                discarded_count, what))

    # Dump configuration to screen so we have some basic information
    # when users sends e-mails.

    print(STARTUP_INFO_FMT % {
            "conninfo": get_connection_info(),
            "exchange": conf.AMQP_EXCHANGE,
            "exchange_type": conf.AMQP_EXCHANGE_TYPE,
            "consumer_queue": conf.AMQP_CONSUMER_QUEUE,
            "consumer_rkey": conf.AMQP_CONSUMER_ROUTING_KEY,
            "publisher_rkey": conf.AMQP_PUBLISHER_ROUTING_KEY,
            "concurrency": concurrency,
            "loglevel": loglevel,
            "pidfile": pidfile,
            "statistics": settings.CELERY_STATISTICS and "ON" or "OFF",
            "celerybeat": run_clockservice and "ON" or "OFF",
    })

    print("Celery has started.")
    if detach:
        from celery.log import setup_logger, redirect_stdouts_to_logger
        context = platform.create_daemon_context(logfile, pidfile,
                                        chroot_directory=chroot,
                                        working_directory=working_directory,
                                        umask=umask,
                                        uid=uid,
                                        gid=gid)
        context.open()
        logger = setup_logger(loglevel, logfile)
        redirect_stdouts_to_logger(logger, loglevel)

    # Run the worker init handler.
    # (Usually imports task modules and such.)
    current_loader.on_worker_init()

    def run_worker():
        worker = WorkController(concurrency=concurrency,
                                loglevel=loglevel,
                                logfile=logfile,
                                embed_clockservice=run_clockservice,
                                is_detached=detach)

        # Install signal handler that restarts celeryd on SIGHUP,
        # (only on POSIX systems)
        install_worker_restart_handler(worker)

        try:
            worker.start()
        except Exception, e:
            emergency_error(logfile, "celeryd raised exception %s: %s\n%s" % (
                            e.__class__, e, traceback.format_exc()))