def get_schedule(beat: dict) -> Optional[BaseSchedule]: type = beat['type'] config = beat['config'] if type == 'crontab': return crontab(minute=config.get('minute', '*'), hour=config.get('hour', '*'), day_of_week=config.get('day_of_week', '*'), day_of_month=config.get('day_of_month', '*'), month_of_year=config.get('month_of_year', '*')) else: return None
def test_update_cron_task(self): test_cases = [ # crontab str ('* 10 * * *', test_func, ('sdg', ), { 'job': 'programmer' }, 'tests.test_cron.test_func', '[tests.test_cron.test_func | {}]'.format(crontab(hour=10)), crontab(hour=10)), # run by seconds (5, test_func, tuple(), {}, 'tests.test_cron.test_func', '[tests.test_cron.test_func | 5]', 5), ] for cron_settings, func, args, kwargs, \ expected_task_name, expected_cron_name, expected_schedule \ in test_cases: auto.cron.update_cron_task(cron_settings, func, *args, **kwargs) self.assertIn(expected_cron_name, auto.cron.app.conf.beat_schedule) self.assertDictEqual({ 'task': expected_task_name, 'schedule': expected_schedule, 'args': args, 'kwargs': kwargs }, auto.cron.app.conf.beat_schedule[expected_cron_name])
from celery.beat import crontab from celery.signals import after_setup_logger @after_setup_logger.connect def config_loggers(*args, **kwags): # This prevents celery reconfiguring the logging import log log.setup(force=True) beat_schedule = { # Try to retrigger anything we missed once a day 'retrigger': { "task": "sync.tasks.retrigger", "schedule": crontab(hour=8, minute=0), }, # Try to cleanup once an hour 'cleanup': { "task": "sync.tasks.cleanup", "schedule": 3600, }, # Try to update metadata once a day 'update_bugs': { "task": "sync.tasks.update_bugs", "schedule": crontab(hour=9, minute=0), } } worker = celery.Celery('sync', broker='pyamqp://*****:*****@rabbitmq',
from celery.beat import crontab CELERYBEAT_SCHEDULE = { 'update_db': { 'task': 'manager.periodic_task', 'schedule': crontab(minute='*'), 'args': () } }
CELERY_DEFAULT_QUEUE = 'control' CELERY_DEFAULT_EXCHANGE = 'campaigns' CELERY_DEFAULT_EXCHANGE_TYPE = 'topic' CELERY_QUEUES = ( #Queue('timelines', routing_key='*.timeline.*'), #Queue('streamings', routing_key='*.streaming.*'), Queue('control', routing_key='control'), Queue('offline_jobs', routing_key='offline_jobs'), ) # from datetime import timedelta # # CELERYBEAT_SCHEDULE = { # 'dispatch_timeline_harvester_tasks_every_three_minutes': { # 'task': 'check_watchlist_and_dispatch_tasks', # 'schedule': timedelta(seconds=60*3), # }, # } from celery.beat import crontab CELERYBEAT_SCHEDULE = { 'dispatch_timeline_harvester_tasks_every_five_minutes': { 'task': 'check_watchlist_and_dispatch_tasks', 'schedule': crontab('*/5'), }, }
from __future__ import absolute_import from datetime import datetime from celery.task.base import periodic_task from celery.utils.log import get_task_logger from celery.beat import crontab from lifeboat.models import Error as LifeboatError from lifeboat.models import Rescue from lifeboat.models import Statistic logger = get_task_logger("lifeboat") # A periodic task that will run every minute (the symbol "*" means every) @periodic_task(queue='lifeboat', options={'queue': 'lifeboat'}, run_every=(crontab(hour="*", minute="*", day_of_week="*"))) def handle_rescues(): logger.debug("Starting task handle_rescues") unhandled_errors = LifeboatError.objects.filter(status="unhandled") logger.info("Found {0} unhandled errors".format(unhandled_errors.count())) for error in unhandled_errors: Rescue.rescue(error) @periodic_task(queue='lifeboat', options={'queue': 'lifeboat'}, run_every=(crontab(hour="*", minute="*", day_of_week="*"))) def gather_stats(): for stat in Statistic.objects.all(): stat.try_reset_value() Statistic.gather_stat(stat)