def test_read_periodic(self): @self.huey.periodic_task(crontab(minute='*/15', hour='9-17')) def work(): pass @self.huey.periodic_task(crontab(minute='0', hour='21')) def sleep(): pass @self.huey.periodic_task(crontab(minute='0-30')) def first_half(): pass def assertPeriodic(hour, minute, names): dt = datetime.datetime(2000, 1, 1, hour, minute) tasks = self.huey.read_periodic(dt) self.assertEqual([t.name for t in tasks], names) assertPeriodic(0, 0, ['first_half']) assertPeriodic(23, 59, []) assertPeriodic(9, 0, ['work', 'first_half']) assertPeriodic(9, 45, ['work']) assertPeriodic(9, 46, []) assertPeriodic(21, 0, ['sleep', 'first_half']) assertPeriodic(21, 30, ['first_half']) assertPeriodic(21, 31, [])
def test_consumer_periodic_tasks(self): state = [] @self.huey.periodic_task(crontab(minute='*/10')) def task_p1(): state.append('p1') @self.huey.periodic_task(crontab(minute='0', hour='0')) def task_p2(): state.append('p2') consumer = self.consumer(workers=1) dt = datetime.datetime(2000, 1, 1, 0, 0) self.schedule_tasks(consumer, dt) self.assertEqual(len(self.huey), 2) self.work_on_tasks(consumer, 2) self.assertEqual(state, ['p1', 'p2']) dt = datetime.datetime(2000, 1, 1, 12, 0) self.schedule_tasks(consumer, dt) self.assertEqual(len(self.huey), 1) self.work_on_tasks(consumer, 1) self.assertEqual(state, ['p1', 'p2', 'p1']) task_p1.revoke() self.schedule_tasks(consumer, dt) self.assertEqual(len(self.huey), 1) # Enqueued despite being revoked. self.work_on_tasks(consumer, 1) self.assertEqual(state, ['p1', 'p2', 'p1']) # No change, not executed.
def test_retry_periodic(self): state = [0] @self.huey.periodic_task(crontab(hour='0'), retries=2) def task_p(): if state[0] == 0: state[0] = 1 raise TestError('oops') elif state[0] == 1: state[0] = 2 else: state[0] = 9 # Should not happen. task_p() self.assertTrue(self.execute_next() is None) # The task is re-enqueued to be retried. Verify retry count is right # and execute. self.assertEqual(len(self.huey), 1) task = self.huey.dequeue() self.assertEqual(task.retries, 1) self.huey.execute(task) self.assertEqual(state, [2]) self.assertEqual(len(self.huey), 0)
def test_revoke_periodic(self): state = [0] @self.huey.periodic_task(crontab(minute='0')) def task_p(): state[0] = state[0] + 1 task_p.revoke() self.assertTrue(task_p.is_revoked()) self.assertTrue(task_p.is_revoked()) # Verify check is idempotent. task_p.restore() self.assertFalse(task_p.is_revoked()) self.assertFalse(task_p.restore()) # It is not revoked. task_p.revoke(revoke_once=True) self.assertTrue(task_p.is_revoked()) self.assertTrue(task_p.is_revoked()) # Verify idempotent. r = task_p() self.execute_next() self.assertTrue(r() is None) self.assertEqual(state, [0]) # Task was not run, no side-effect. self.assertFalse(task_p.is_revoked()) # No longer revoked. timestamp = datetime.datetime(2000, 1, 1) second = datetime.timedelta(seconds=1) task_p.revoke(revoke_until=timestamp) self.assertFalse(task_p.is_revoked(timestamp=timestamp)) self.assertFalse(task_p.is_revoked(timestamp=timestamp + second)) self.assertTrue(task_p.is_revoked(timestamp=timestamp - second)) task_p.restore() self.assertFalse(task_p.is_revoked()) self.assertFalse(task_p.is_revoked(timestamp=timestamp - second))
def wrapped_f(*args, **kwargs): return db_periodic_task( crontab(month=self.month, day=self.day, day_of_week=self.day_of_week, hour=self.hour, minute=self.minute))(f)
def test_periodic_task(self): @self.huey.periodic_task(crontab(minute='1')) def task_p(): return 123 task = task_p.s() self.assertTrue(isinstance(task, PeriodicTask)) self.assertEqual(task.retries, 0) self.assertEqual(task.retry_delay, 0) self.assertEqual(task.execute(), 123) @self.huey.periodic_task(crontab(), retries=3, retry_delay=10) def task_p2(): pass task = task_p2.s() self.assertEqual(task.retries, 3) self.assertEqual(task.retry_delay, 10)
def test_call_periodic_task(self): @self.huey.periodic_task(crontab(minute='1')) def task_p(): return 123 res = task_p() self.assertEqual(len(self.huey), 1) self.assertEqual(self.execute_next(), 123) # Result-store is not used for periodic task results. self.assertTrue(res() is None)
def test_docstring_preserved(self): @self.huey.task() def add(a, b): 'Adds two numbers.' return a + b @self.huey.periodic_task(crontab(minute='*')) def ptask(): 'Sample periodic task.' self.assertEqual(inspect.getdoc(add), 'Adds two numbers.') self.assertEqual(inspect.getdoc(ptask), 'Sample periodic task.')
def test_periodic_priority(self): @self.huey.periodic_task(crontab(), priority=3, name='ptask') def task_p(): pass self.task_0(0) self.task_1(10) self.task_2(100) for task in self.huey.read_periodic(datetime.datetime.now()): self.huey.enqueue(task) # Our periodic task has a higher priority than the other tasks in the # queue, and will be executed first. self.assertEqual(len(self.huey), 4) ptask = self.huey.dequeue() self.assertEqual(ptask.name, 'ptask') # Verify is our periodic task. self.assertEqual(ptask.priority, 3) # Priority is preserved.
def test_serialize_deserialize(self): @self.huey.task() def task_a(n): return n @self.huey.task() def task_b(n): return n @self.huey.periodic_task(crontab(minute='1')) def task_p(): return ta = task_a.s(1) tb = task_b.s(2) tp = task_p.s() S = lambda t: self.huey.deserialize_task(self.huey.serialize_task(t)) self.assertEqual(ta, S(ta)) self.assertEqual(tb, S(tb)) self.assertEqual(tp, S(tp))
def test_retry_delay_periodic(self): @self.huey.periodic_task(crontab(), retries=2, retry_delay=60) def task_p(): raise ValueError('try again') r = task_p() self.assertTrue(self.execute_next() is None) self.assertEqual(len(self.huey), 0) self.assertEqual(self.huey.scheduled_count(), 1) task, = self.huey.scheduled() # Dequeue the delayed retry. self.assertEqual(task.id, r.id) self.assertTrue(task.eta is not None) self.assertEqual(task.retries, 1) self.assertFalse(self.huey.ready_to_run(task)) dt = datetime.datetime.utcnow() + datetime.timedelta(seconds=61) self.assertTrue(self.huey.ready_to_run(task, dt))
def test_retry_delay_periodic(self): @self.huey.periodic_task(crontab(), retries=2, retry_delay=60) def task_p(): raise ValueError('try again') r = task_p() self.assertTrue(self.execute_next() is None) self.assertEqual(len(self.huey), 0) self.assertEqual(self.huey.scheduled_count(), 1) task, = self.huey.scheduled() # Dequeue the delayed retry. self.assertEqual(task.id, r.id) self.assertTrue(task.eta is not None) self.assertEqual(task.retries, 1) self.assertFalse(self.huey.ready_to_run(task)) dt = datetime.datetime.now() + datetime.timedelta(seconds=61) self.assertTrue(self.huey.ready_to_run(task, dt))
error_report += "Please check the server logs, they might contain more details." mail_admins("Error when backing up swift containers", error_report) logger.error("Error when backing up swift containers\b %s", error_report) else: logger.info("Swift backup finished successfully") if settings.BACKUP_SWIFT_SNITCH: ping_heartbeat_url(settings.BACKUP_SWIFT_SNITCH) @db_task() def backup_swift_task(): """ Task that performs backup of swift containers. """ do_backup_swift() if settings.BACKUP_SWIFT_ENABLED: @db_periodic_task(crontab(minute="10", hour="1")) def backup_swift_periodic(): """ Periodically schedules backup_swift_task. This is long running task, more like spawn_appserver than watch_pr, since we a single queue for periodic tasks it makes sense for this to finish early and then execute in another worker. """ backup_swift_task()
TRIAL_INSTANCES_REPORT_SCHEDULE_MINUTE = TRIAL_INSTANCES_REPORT_SCHEDULE[0] TRIAL_INSTANCES_REPORT_SCHEDULE_HOUR = TRIAL_INSTANCES_REPORT_SCHEDULE[1] TRIAL_INSTANCES_REPORT_SCHEDULE_DAY = TRIAL_INSTANCES_REPORT_SCHEDULE[2] TRIAL_INSTANCES_REPORT_SCHEDULE_MONTH = TRIAL_INSTANCES_REPORT_SCHEDULE[3] TRIAL_INSTANCES_REPORT_SCHEDULE_DAY_OF_WEEK = TRIAL_INSTANCES_REPORT_SCHEDULE[ 4] # Tasks ####################################################################### # Run on the 1st of every month @db_periodic_task( crontab(minute=TRIAL_INSTANCES_REPORT_SCHEDULE_MINUTE, hour=TRIAL_INSTANCES_REPORT_SCHEDULE_HOUR, day=TRIAL_INSTANCES_REPORT_SCHEDULE_DAY, month=TRIAL_INSTANCES_REPORT_SCHEDULE_MONTH, day_of_week=TRIAL_INSTANCES_REPORT_SCHEDULE_DAY_OF_WEEK)) def send_trial_instances_report( recipients=settings.TRIAL_INSTANCES_REPORT_RECIPIENTS): """ Generate and send a trial instance data report This task runs on the first of every month at 2AM """ if not recipients: logger.warning( 'No recipients listed for Trial Instances Report. It will not be generated.' ) return True
from huey.api import crontab from huey.contrib.djhuey import db_periodic_task, lock_task from instance.models.openedx_instance import OpenEdXInstance from instance.models.appserver import Status from instance.tasks import spawn_appserver # Logging ##################################################################### logger = logging.getLogger(__name__) # Tasks ####################################################################### @db_periodic_task(crontab(minute="34", hour="*/2")) @lock_task('launch_periodic_builds-lock') def launch_periodic_builds(): """ Automatically deploy new servers for all Open edX instances configured for periodic builds. """ instances = OpenEdXInstance.objects.filter(periodic_builds_enabled=True) now = datetime.datetime.now(tz=datetime.timezone.utc) for instance in instances: appservers = instance.appserver_set.order_by("-created").all() # NOTE: 'created' is the time when the appserver was created, which is # before provisioning begins. # if the instance has no appservers or latest appserver is past the # interval time, then we spawn a new appserver if not appservers or (now - appservers[0].created
if sufficient_time_passed(closed_at, now, 7): instance.logger.info("Shutting down obsolete sandbox instance") instance.archive() @db_task() def terminate_obsolete_appservers_all_instances(): """ Terminate obsolete app servers for all instances. """ for instance in OpenEdXInstance.objects.all(): instance.logger.info("Terminating obsolete appservers for instance") instance.terminate_obsolete_appservers() @db_periodic_task(crontab(day='*/1', hour='1', minute='0')) def clean_up(): """ Clean up obsolete VMs. This task runs once per day. """ shut_down_obsolete_pr_sandboxes() terminate_obsolete_appservers_all_instances() @db_periodic_task(crontab(day='*/1', hour='0', minute='0')) def delete_old_logs(): """ Delete old log entries.
from huey import RedisHuey from huey.api import crontab from huey.api import QueueTask from huey.registry import registry from huey.tests.base import BaseTestCase from huey.tests.base import DummyHuey huey = DummyHuey(None) @huey.task() def test_task_one(x, y): pass @huey.periodic_task(crontab(minute='0')) def test_task_two(): pass class MyTaskClass(QueueTask): def execute(self): pass class TestRegistry(BaseTestCase): def test_registry(self): self.assertTrue('queuecmd_test_task_one' in registry) self.assertTrue('queuecmd_test_task_two' in registry) self.assertFalse('MyTaskClass' in registry) registry.register(MyTaskClass) self.assertTrue('MyTaskClass' in registry)
from django.conf import settings from huey.api import crontab from huey.contrib.djhuey import db_periodic_task from mailchimp3 import MailChimp from more_itertools import chunked from userprofile.models import UserProfile # Logging ##################################################################### logger = logging.getLogger(__name__) # Tasks ####################################################################### @db_periodic_task(crontab(day='*', hour='2', minute='0')) def add_trial_users_to_mailchimp_list(): """ Adds opted-in trial users to the MailChimp list. This task runs once per day. """ if not settings.MAILCHIMP_ENABLED: return emails_local = set( UserProfile.objects.filter(subscribe_to_updates=True, ).values_list( 'user__email', flat=True, ))
from huey.contrib.djhuey import db_periodic_task from instance.models.deployment import DeploymentType from instance.utils import create_new_deployment from pr_watch.github import (RateLimitExceeded, get_pr_list_from_usernames) from pr_watch.models import WatchedFork, WatchedPullRequest from userprofile.models import UserProfile # Logging ##################################################################### logger = logging.getLogger(__name__) # Tasks ####################################################################### @db_periodic_task(crontab(minute='*/1')) def watch_pr(): """ Automatically create sandboxes for PRs opened by members of the watched organization on the watched repository """ if not settings.WATCH_PRS: return try: for watched_fork in WatchedFork.objects.filter(enabled=True): usernames = list( UserProfile.objects.filter( organization=watched_fork.organization, ).exclude( github_username__isnull=True, ).values_list( 'github_username', flat=True)) for pr in get_pr_list_from_usernames(usernames, watched_fork.fork):
def make_ptask(every_n): name = 'ptask_%s' % every_n sched = crontab('*/%s' % every_n) self.huey.periodic_task(sched, name=name)(ptask)
from huey.api import crontab from huey.api import Huey from huey.api import QueueTask from huey.registry import registry from huey.tests.base import BaseTestCase huey = Huey(None) @huey.task() def test_task_one(x, y): pass @huey.periodic_task(crontab(minute='0')) def test_task_two(): pass class MyTaskClass(QueueTask): def execute(self): pass class TestRegistry(BaseTestCase): def test_registry(self): self.assertTrue('queuecmd_test_task_one' in registry) self.assertTrue('queuecmd_test_task_two' in registry) self.assertTrue('MyTaskClass' in registry) self.assertFalse('another' in registry) def test_periodic_tasks(self): periodic = registry._periodic_tasks
if sufficient_time_passed(closed_at, now, 7): instance.logger.info("Shutting down obsolete sandbox instance") instance.archive() @db_task() def terminate_obsolete_appservers_all_instances(): """ Terminate obsolete app servers for all instances. """ for instance in OpenEdXInstance.objects.all(): instance.logger.info("Terminating obsolete appservers for instance") instance.terminate_obsolete_appservers() @db_periodic_task(crontab(day='*/1', hour='1', minute='0')) def clean_up(): """ Clean up obsolete VMs. This task runs once per day. """ shut_down_obsolete_pr_sandboxes() terminate_obsolete_appservers_all_instances() @db_periodic_task(crontab()) def reconfigure_dirty_load_balancers(): """ Any load balancers that are dirty need to be reconfigured.
if error_report: error_report += "Please check the server logs, they might contain more details." mail_admins("Error when backing up swift containers", error_report) logger.error("Error when backing up swift containers\b %s", error_report) else: logger.info("Swift backup finished successfully") if settings.BACKUP_SWIFT_SNITCH: ping_heartbeat_url(settings.BACKUP_SWIFT_SNITCH) @db_task() def backup_swift_task(): """ Task that performs backup of swift containers. """ do_backup_swift() if settings.BACKUP_SWIFT_ENABLED: @db_periodic_task(crontab(minute="10", hour="1")) def backup_swift_periodic(): """ Periodically schedules backup_swift_task. This is long running task, more like spawn_appserver than watch_pr, since we a single queue for periodic tasks it makes sense for this to finish early and then execute in another worker. """ backup_swift_task()