if sufficient_time_passed(closed_at, now, 7): instance.logger.info("Shutting down obsolete sandbox instance") instance.archive() @db_task() def terminate_obsolete_appservers_all_instances(): """ Terminate obsolete app servers for all instances. """ for instance in OpenEdXInstance.objects.all(): instance.logger.info("Terminating obsolete appservers for instance") instance.terminate_obsolete_appservers() @db_periodic_task(crontab(day='*/1', hour='1', minute='0')) def clean_up(): """ Clean up obsolete VMs. This task runs once per day. """ shut_down_obsolete_pr_sandboxes() terminate_obsolete_appservers_all_instances() @db_periodic_task(crontab()) def reconfigure_dirty_load_balancers(): """ Any load balancers that are dirty need to be reconfigured.
import random from huey.contrib.djhuey import task, periodic_task, crontab, db_task @task() def count_beans(number): print('-- counted %s beans --' % number) return 'Counted %s beans' % number @periodic_task(crontab(minute='*/5')) def every_five_mins(): print('Every five minutes this will be printed by the consumer') @task(retries=3, retry_delay=10) def try_thrice(): if random.randint(1, 3) == 1: print('OK') else: print('About to fail, will retry in 10 seconds') raise Exception('Crap something went wrong') @db_task() def foo(number): print('foo(%s)' % number)
from huey.contrib.djhuey import crontab, db_periodic_task from neon_app.models import Day from icalendar import Calendar from django.db import IntegrityError from re import compile import requests pro_d_terms = ['Professional', 'Pro-D', 'Development'] holiday_terms = ['Holiday', 'Statutory', 'School Closed'] late_start_terms = ['Late Start'] early_dismissal_terms = ['Early Dismissal'] @db_periodic_task(crontab(minute="*/5")) def update_from_calendar(): r = requests.get("http://www.sd44.ca/school/windsor/" "_LAYOUTS/15/scholantis/handlers/ical/" "event.ashx?List=f13b021f-ee41-4705-ab17-1a2f36172f0b") cal = Calendar.from_ical(r.text) for event in (x for x in cal.subcomponents if x.name == 'VEVENT'): day_type = "" if any(x in event['summary'] for x in holiday_terms): day_type = "holiday" elif any(x in event['summary'] for x in late_start_terms): day_type = "late-start" elif any(x in event['summary'] for x in pro_d_terms): day_type = "pro-d" elif any(x in event['summary'] for x in early_dismissal_terms): day_type = "early-d" else: day_type = "normal"
# Tasks ####################################################################### @db_task() def provision_instance(instance_pk): """ Run provisioning on an existing instance """ logger.info('Retreiving instance: pk=%s', instance_pk) instance = SingleVMOpenEdXInstance.objects.get(pk=instance_pk) logger.info('Running provisioning on %s', instance) instance.provision() @db_periodic_task(crontab(minute='*/1')) def watch_pr(): """ Automatically create/update sandboxes for PRs opened by members of the watched organization on the watched repository """ team_username_list = get_username_list_from_team(settings.WATCH_ORGANIZATION) for username in team_username_list: for pr in get_pr_list_from_username(username, settings.WATCH_FORK): sub_domain = 'pr{number}.sandbox'.format(number=pr.number) instance, created = SingleVMOpenEdXInstance.objects.update_or_create_from_pr(pr, sub_domain) if created: logger.info('New PR found, creating sandbox: %s', pr) provision_instance(instance.pk)
from utils import read_top import logging # The next two LOC should be parameterized; this is on the to-do list. # The logger is commented out to assist new users. # logging.basicConfig(level=logging.DEBUG, filename='/tmp/metrics.log') # osx TOP_COMMAND = 'top -n 0 -l 1' # zero processes, one repetition in logging mode # ubuntu # TOP_COMMAND = 'top -n 1 -b' # one repetition in batch mode @db_periodic_task(crontab(minute='*')) # Once per minute def make_new_top_entry(): try: data = read_top(TOP_COMMAND) # Data is automatically jsonified by the JSONField in Entry new_entry = Entry.objects.create(command=TOP_COMMAND, data=data) new_entry.save() except: logging.exception('Exception raised in make_new_top_entry') # Examples of alternative tasks. # `make_new_top_delta_entry` runs `top -d`; the resulting Entry will contain # the delta since the last time top was called. Can be done on the ORM/database # levels (or even in Django), too. Doing it here is trading space for time. # I could have made another layer of abstraction here, but I didn't want to
""" File name: tasks.py Author: dhilipsiva <*****@*****.**> Date created: 2017-03-18 """ import logging from django.conf import settings from importlib import import_module from statboard.core.models import Metric from statboard.core.config import huey # NOQA from huey.contrib.djhuey import crontab, periodic_task logger = logging.getLogger(__name__) @periodic_task(crontab(minute='*/1')) def fetch_metrics(): for metric_name in settings.METRICS: logger.debug('~' * 80) try: logger.debug("Initializing metrics for: %s" % metric_name) metric = Metric.objects.get(name=metric_name) metric_module = import_module('statboard.core.metrics.%s' % metric_name) logger.debug("Attemting to fetch metrics for: %s" % metric_name) metric_module.fetch(metric) logger.debug("DONE! fetched metrics for: %s" % metric_name) except Exception as e: logger.error("FAILED! metrics for: %s \n %s" % (metric_name, e))
for instance in OpenEdXInstance.objects.filter(watchedpullrequest__isnull=False): pr = github.get_pr_info_by_number( instance.watchedpullrequest.target_fork_name, instance.watchedpullrequest.github_pr_number ) if pr['state'] == 'closed': closed_at = github.parse_date(pr['closed_at']) now = datetime.now() if sufficient_time_passed(closed_at, now, 7): instance.shut_down() @db_task() def terminate_obsolete_appservers_all_instances(): """ Terminate obsolete app servers for all instances. """ for instance in OpenEdXInstance.objects.all(): instance.terminate_obsolete_appservers() @db_periodic_task(crontab(day='*/1', hour='1', minute='0')) def clean_up(): """ Clean up obsolete VMs. This task runs once per day. """ shut_down_obsolete_pr_sandboxes() terminate_obsolete_appservers_all_instances()
from django.db import transaction, IntegrityError from django.conf import settings from django.core.exceptions import ObjectDoesNotExist from .models import Posts from huey.contrib.djhuey import crontab from huey import RedisHuey import logging logger = logging.getLogger('huey.consumer') # This is necessary in order to get the right RedisHuey instance. huey = RedisHuey('main', password = settings.CACHE_PASSWORD) @huey.periodic_task(crontab(minute='0', hour='*/1')) def viewcountupdate(): """ This task is being executed hourly and pushed the cached view counters into the database using a single transaction """ # This is the prefix we are going to use to distinguish the cache keys # we need for the view counters PREFIX = settings.CACHE_PREFIX logger.warn('Entering viewcountupdate...') with redis_cache.lock('lock'): keys = redis_cache.keys(PREFIX + "*") if keys: try: with transaction.atomic():