def main(): tid = int(sys.argv[1]) cwindow = sys.argv[2] if len(sys.argv) > 2 else "*-*-*-*-*-*" if crontabwindow.crontab_range( cwindow ): # Performance note: immediately check task execution time/day window range to avoid other calls tdo = TaskDefinition.objects.get(id=tid) if tdo.task_type != "scrub": return logger.error("task_type(%s) is not scrub." % tdo.task_type) meta = json.loads(tdo.json_meta) aw = APIWrapper() if Task.objects.filter(task_def=tdo).exists(): ll = Task.objects.filter(task_def=tdo).order_by("-id")[0] if ll.state != "error" and ll.state != "finished": logger.debug("Non terminal state(%s) for task(%d). Checking " "again." % (ll.state, tid)) cur_state = update_state(ll, meta["pool"], aw) if cur_state != "error" and cur_state != "finished": return logger.debug( "Non terminal state(%s) for task(%d). " "A new task will not be run." % (cur_state, tid) ) now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc) t = Task(task_def=tdo, state="started", start=now) url = "pools/%s/scrub" % meta["pool"] try: aw.api_call(url, data=None, calltype="post", save_error=False) logger.debug("Started scrub at %s" % url) t.state = "running" except Exception, e: logger.error("Failed to start scrub at %s" % url) t.state = "error" logger.exception(e) finally:
def main(): tid = int(sys.argv[1]) cwindow = sys.argv[2] if len(sys.argv) > 2 else '*-*-*-*-*-*' if (crontabwindow.crontab_range(cwindow)): #Performance note: immediately check task execution time/day window range to avoid other calls tdo = TaskDefinition.objects.get(id=tid) if (tdo.task_type != 'scrub'): return logger.error('task_type(%s) is not scrub.' % tdo.task_type) meta = json.loads(tdo.json_meta) aw = APIWrapper() if (Task.objects.filter(task_def=tdo).exists()): ll = Task.objects.filter(task_def=tdo).order_by('-id')[0] if (ll.state != 'error' and ll.state != 'finished'): logger.debug('Non terminal state(%s) for task(%d). Checking ' 'again.' % (ll.state, tid)) cur_state = update_state(ll, meta['pool'], aw) if (cur_state != 'error' and cur_state != 'finished'): return logger.debug('Non terminal state(%s) for task(%d). ' 'A new task will not be run.' % (cur_state, tid)) now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc) t = Task(task_def=tdo, state='started', start=now) url = ('pools/%s/scrub' % meta['pool']) try: aw.api_call(url, data=None, calltype='post', save_error=False) logger.debug('Started scrub at %s' % url) t.state = 'running' except Exception, e: logger.error('Failed to start scrub at %s' % url) t.state = 'error' logger.exception(e) finally:
def main(): tid = int(sys.argv[1]) cwindow = sys.argv[2] if len(sys.argv) > 2 else '*-*-*-*-*-*' if (crontabwindow.crontab_range(cwindow)): # Performance note: immediately check task execution time/day window # range to avoid other calls tdo = TaskDefinition.objects.get(id=tid) stype = 'task_scheduler' aw = APIWrapper() if (tdo.task_type != 'snapshot'): logger.error('task_type(%s) is not snapshot.' % tdo.task_type) return meta = json.loads(tdo.json_meta) validate_snap_meta(meta) share = Share.objects.get(id=meta['share']) max_count = int(float(meta['max_count'])) prefix = ('%s_' % meta['prefix']) now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc) t = Task(task_def=tdo, state='started', start=now) snap_created = False t.state = 'error' try: name = ('%s_%s' % (meta['prefix'], datetime.now().strftime( settings.SNAP_TS_FORMAT))) url = ('shares/{}/snapshots/{}'.format(share.id, name)) # only create a new snap if there's no overflow situation. This # prevents runaway snapshot creation beyond max_count+1. if (delete(aw, share, stype, prefix, max_count)): data = { 'snap_type': stype, 'uvisible': meta['visible'], 'writable': meta['writable'], } headers = {'content-type': 'application/json'} aw.api_call(url, data=data, calltype='post', headers=headers, save_error=False) logger.debug('created snapshot at %s' % url) t.state = 'finished' snap_created = True except Exception as e: logger.error('Failed to create snapshot at %s' % url) logger.exception(e) finally: t.end = datetime.utcnow().replace(tzinfo=utc) t.save() # best effort pruning without erroring out. If deletion fails, we'll # have max_count+1 number of snapshots and it would be dealt with on # the next round. if (snap_created): delete(aw, share, stype, prefix, max_count) else: logger.debug('Cron scheduled task not executed because outside ' 'time/day window ranges')
def main(): tid = int(sys.argv[1]) cwindow = sys.argv[2] if len(sys.argv) > 2 else "*-*-*-*-*-*" if crontabwindow.crontab_range(cwindow): # Performance note: immediately check task execution time/day window # range to avoid other calls tdo = TaskDefinition.objects.get(id=tid) if tdo.task_type != "scrub": return logger.error("task_type(%s) is not scrub." % tdo.task_type) meta = json.loads(tdo.json_meta) aw = APIWrapper() if Task.objects.filter(task_def=tdo).exists(): ll = Task.objects.filter(task_def=tdo).order_by("-id")[0] if ll.state not in TERMINAL_SCRUB_STATES: logger.debug("Non terminal state(%s) for task(%d). Checking " "again." % (ll.state, tid)) cur_state = update_state(ll, meta["pool"], aw) if cur_state not in TERMINAL_SCRUB_STATES: return logger.debug("Non terminal state(%s) for task(%d). " "A new task will not be run." % (cur_state, tid)) now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc) t = Task(task_def=tdo, state="started", start=now) url = "pools/%s/scrub" % meta["pool"] try: aw.api_call(url, data=None, calltype="post", save_error=False) logger.debug("Started scrub at %s" % url) t.state = "running" except Exception as e: logger.error("Failed to start scrub at %s" % url) t.state = "error" logger.exception(e) finally: t.save() while True: cur_state = update_state(t, meta["pool"], aw) if cur_state in TERMINAL_SCRUB_STATES: logger.debug("task(%d) finished with state(%s)." % (tid, cur_state)) t.end = datetime.utcnow().replace(tzinfo=utc) t.save() break logger.debug("pending state(%s) for scrub task(%d). Will check " "again in 60 seconds." % (cur_state, tid)) time.sleep(60) else: logger.debug( "Cron scheduled task not executed because outside time/day window ranges" )
def main(): tid = int(sys.argv[1]) cwindow = sys.argv[2] if len(sys.argv) > 2 else '*-*-*-*-*-*' if (crontabwindow.crontab_range(cwindow)): # Performance note: immediately check task execution time/day window # range to avoid other calls tdo = TaskDefinition.objects.get(id=tid) if (tdo.task_type != 'scrub'): return logger.error('task_type(%s) is not scrub.' % tdo.task_type) meta = json.loads(tdo.json_meta) aw = APIWrapper() if (Task.objects.filter(task_def=tdo).exists()): ll = Task.objects.filter(task_def=tdo).order_by('-id')[0] if ll.state not in TERMINAL_SCRUB_STATES: logger.debug('Non terminal state(%s) for task(%d). Checking ' 'again.' % (ll.state, tid)) cur_state = update_state(ll, meta['pool'], aw) if cur_state not in TERMINAL_SCRUB_STATES: return logger.debug('Non terminal state(%s) for task(%d). ' 'A new task will not be run.' % (cur_state, tid)) now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc) t = Task(task_def=tdo, state='started', start=now) url = ('pools/%s/scrub' % meta['pool']) try: aw.api_call(url, data=None, calltype='post', save_error=False) logger.debug('Started scrub at %s' % url) t.state = 'running' except Exception as e: logger.error('Failed to start scrub at %s' % url) t.state = 'error' logger.exception(e) finally: t.save() while True: cur_state = update_state(t, meta['pool'], aw) if cur_state in TERMINAL_SCRUB_STATES: logger.debug('task(%d) finished with state(%s).' % (tid, cur_state)) t.end = datetime.utcnow().replace(tzinfo=utc) t.save() break logger.debug('pending state(%s) for scrub task(%d). Will check ' 'again in 60 seconds.' % (cur_state, tid)) time.sleep(60) else: logger.debug('Cron scheduled task not executed because outside ' 'time/day window ranges')
def main(): tid = int(sys.argv[1]) cwindow = sys.argv[2] if ( crontabwindow.crontab_range(cwindow) ): #Performance note: immediately check task execution time/day window range to avoid other calls tdo = TaskDefinition.objects.get(id=tid) stype = 'task_scheduler' aw = APIWrapper() if (tdo.task_type != 'snapshot'): logger.error('task_type(%s) is not snapshot.' % tdo.task_type) return meta = json.loads(tdo.json_meta) validate_snap_meta(meta) share = Share.objects.get(name=meta['share']) max_count = int(float(meta['max_count'])) prefix = ('%s_' % meta['prefix']) now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc) t = Task(task_def=tdo, state='started', start=now) snap_created = False t.state = 'error' try: name = ('%s_%s' % (meta['prefix'], datetime.now().strftime( settings.SNAP_TS_FORMAT))) url = ('shares/%s/snapshots/%s' % (share.name, name)) #only create a new snap if there's no overflow situation. This prevents #runaway snapshot creation beyond max_count+1. if (delete(aw, share, stype, prefix, max_count)): data = { 'snap_type': stype, 'uvisible': meta['visible'], } headers = {'content-type': 'application/json'} aw.api_call(url, data=data, calltype='post', headers=headers, save_error=False) logger.debug('created snapshot at %s' % url) t.state = 'finished' snap_created = True except Exception, e: logger.error('Failed to create snapshot at %s' % url) logger.exception(e) finally:
def main(): tid = int(sys.argv[1]) cwindow = sys.argv[2] if len(sys.argv) > 2 else '*-*-*-*-*-*' if (crontabwindow.crontab_range(cwindow)): #Performance note: immediately check task execution time/day window range to avoid other calls tdo = TaskDefinition.objects.get(id=tid) stype = 'task_scheduler' aw = APIWrapper() if (tdo.task_type != 'snapshot'): logger.error('task_type(%s) is not snapshot.' % tdo.task_type) return meta = json.loads(tdo.json_meta) validate_snap_meta(meta) share = Share.objects.get(name=meta['share']) max_count = int(float(meta['max_count'])) prefix = ('%s_' % meta['prefix']) now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc) t = Task(task_def=tdo, state='started', start=now) snap_created = False t.state = 'error' try: name = ('%s_%s' % (meta['prefix'], datetime.now().strftime(settings.SNAP_TS_FORMAT))) url = ('shares/%s/snapshots/%s' % (share.name, name)) #only create a new snap if there's no overflow situation. This prevents #runaway snapshot creation beyond max_count+1. if(delete(aw, share, stype, prefix, max_count)): data = {'snap_type': stype, 'uvisible': meta['visible'], 'writable': meta['writable'], } headers = {'content-type': 'application/json'} aw.api_call(url, data=data, calltype='post', headers=headers, save_error=False) logger.debug('created snapshot at %s' % url) t.state = 'finished' snap_created = True except Exception, e: logger.error('Failed to create snapshot at %s' % url) logger.exception(e) finally:
def main(): tid = int(sys.argv[1]) cwindow = sys.argv[2] if len(sys.argv) > 2 else "*-*-*-*-*-*" if crontabwindow.crontab_range(cwindow): # Performance note: immediately check task execution time/day window # range to avoid other calls tdo = TaskDefinition.objects.get(id=tid) aw = APIWrapper() if tdo.task_type not in ["reboot", "shutdown", "suspend"]: logger.error( "task_type(%s) is not a system reboot, " "shutdown or suspend." % tdo.task_type ) return meta = json.loads(tdo.json_meta) validate_shutdown_meta(meta) if not run_conditions_met(meta): logger.debug( "Cron scheduled task not executed because the run conditions have not been met" ) return now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc) schedule = now + timedelta(minutes=3) t = Task(task_def=tdo, state="scheduled", start=now, end=schedule) try: # set default command url before checking if it's a shutdown # and if we have an rtc wake up url = "commands/%s" % tdo.task_type # if task_type is shutdown and rtc wake up true # parse crontab hour & minute vs rtc hour & minute to state # if wake will occur same day or next day, finally update # command url adding wake up epoch time if tdo.task_type in ["shutdown", "suspend"] and meta["wakeup"]: crontab_fields = tdo.crontab.split() crontab_time = int(crontab_fields[1]) * 60 + int(crontab_fields[0]) wakeup_time = meta["rtc_hour"] * 60 + meta["rtc_minute"] # rtc wake up requires UTC epoch, but users on WebUI set time # thinking to localtime, so first we set wake up time, # update it if wake up is on next day, finally move it to UTC # and get its epoch epoch = datetime.now().replace( hour=int(meta["rtc_hour"]), minute=int(meta["rtc_minute"]), second=0, microsecond=0, ) # if wake up < crontab time wake up will run next day if crontab_time > wakeup_time: epoch += timedelta(days=1) epoch = epoch.strftime("%s") url = "%s/%s" % (url, epoch) aw.api_call(url, data=None, calltype="post", save_error=False) logger.debug("System %s scheduled" % tdo.task_type) t.state = "finished" except Exception as e: t.state = "failed" logger.error("Failed to schedule system %s" % tdo.task_type) logger.exception(e) finally: # t.end = datetime.utcnow().replace(tzinfo=utc) t.save() else: logger.debug( "Cron scheduled task not executed because outside time/day window ranges" )
def main(): tid = int(sys.argv[1]) cwindow = sys.argv[2] if len(sys.argv) > 2 else '*-*-*-*-*-*' if (crontabwindow.crontab_range(cwindow)): # Performance note: immediately check task execution time/day window # range to avoid other calls tdo = TaskDefinition.objects.get(id=tid) stype = 'task_scheduler' aw = APIWrapper() if (tdo.task_type != 'snapshot'): logger.error('task_type(%s) is not snapshot.' % tdo.task_type) return meta = json.loads(tdo.json_meta) validate_snap_meta(meta) # to keep backwards compatibility, allow for share to be either # name or id and migrate the metadata. To be removed in #1854 try: share = Share.objects.get(id=meta['share']) except ValueError: share = Share.objects.get(name=meta['share']) meta['share'] = share.id tdo.json_meta = json.dumps(meta) tdo.save() max_count = int(float(meta['max_count'])) prefix = ('%s_' % meta['prefix']) now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc) t = Task(task_def=tdo, state='started', start=now) snap_created = False t.state = 'error' try: name = ('%s_%s' % (meta['prefix'], datetime.now().strftime(settings.SNAP_TS_FORMAT))) url = ('shares/{}/snapshots/{}'.format(share.id, name)) # only create a new snap if there's no overflow situation. This # prevents runaway snapshot creation beyond max_count+1. if(delete(aw, share, stype, prefix, max_count)): data = {'snap_type': stype, 'uvisible': meta['visible'], 'writable': meta['writable'], } headers = {'content-type': 'application/json'} aw.api_call(url, data=data, calltype='post', headers=headers, save_error=False) logger.debug('created snapshot at %s' % url) t.state = 'finished' snap_created = True except Exception as e: logger.error('Failed to create snapshot at %s' % url) logger.exception(e) finally: t.end = datetime.utcnow().replace(tzinfo=utc) t.save() # best effort pruning without erroring out. If deletion fails, we'll # have max_count+1 number of snapshots and it would be dealt with on # the next round. if (snap_created): delete(aw, share, stype, prefix, max_count) else: logger.debug('Cron scheduled task not executed because outside ' 'time/day window ranges')
def main(): tid = int(sys.argv[1]) cwindow = sys.argv[2] if len(sys.argv) > 2 else '*-*-*-*-*-*' if (crontabwindow.crontab_range(cwindow)): # Performance note: immediately check task execution time/day window # range to avoid other calls tdo = TaskDefinition.objects.get(id=tid) aw = APIWrapper() if (tdo.task_type not in ['reboot', 'shutdown', 'suspend']): logger.error('task_type(%s) is not a system reboot, ' 'shutdown or suspend.' % tdo.task_type) return meta = json.loads(tdo.json_meta) validate_shutdown_meta(meta) now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc) schedule = now + timedelta(minutes=3) t = Task(task_def=tdo, state='scheduled', start=now, end=schedule) try: # set default command url before checking if it's a shutdown # and if we have an rtc wake up url = ('commands/%s' % tdo.task_type) # if task_type is shutdown and rtc wake up true # parse crontab hour & minute vs rtc hour & minute to state # if wake will occur same day or next day, finally update # command url adding wake up epoch time if (tdo.task_type in ['shutdown', 'suspend'] and meta['wakeup']): crontab_fields = tdo.crontab.split() crontab_time = (int(crontab_fields[1]) * 60 + int(crontab_fields[0])) wakeup_time = meta['rtc_hour'] * 60 + meta['rtc_minute'] # rtc wake up requires UTC epoch, but users on WebUI set time # thinking to localtime, so first we set wake up time, # update it if wake up is on next day, finally move it to UTC # and get its epoch epoch = datetime.now().replace(hour=int(meta['rtc_hour']), minute=int(meta['rtc_minute']), second=0, microsecond=0) # if wake up < crontab time wake up will run next day if (crontab_time > wakeup_time): epoch += timedelta(days=1) epoch = epoch.strftime('%s') url = ('%s/%s' % (url, epoch)) aw.api_call(url, data=None, calltype='post', save_error=False) logger.debug('System %s scheduled' % tdo.task_type) t.state = 'finished' except Exception as e: t.state = 'failed' logger.error('Failed to schedule system %s' % tdo.task_type) logger.exception(e) finally: # t.end = datetime.utcnow().replace(tzinfo=utc) t.save() else: logger.debug('Cron scheduled task not executed because outside ' 'time/day window ranges')
def main(): tid = int(sys.argv[1]) cwindow = sys.argv[2] if len(sys.argv) > 2 else "*-*-*-*-*-*" if crontabwindow.crontab_range(cwindow): # Performance note: immediately check task execution time/day window # range to avoid other calls tdo = TaskDefinition.objects.get(id=tid) stype = "task_scheduler" aw = APIWrapper() if tdo.task_type != "snapshot": logger.error("task_type(%s) is not snapshot." % tdo.task_type) return meta = json.loads(tdo.json_meta) validate_snap_meta(meta) # to keep backwards compatibility, allow for share to be either # name or id and migrate the metadata. To be removed in #1854 try: share = Share.objects.get(id=meta["share"]) except ValueError: share = Share.objects.get(name=meta["share"]) meta["share"] = share.id tdo.json_meta = json.dumps(meta) tdo.save() max_count = int(float(meta["max_count"])) prefix = "%s_" % meta["prefix"] now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc) t = Task(task_def=tdo, state="started", start=now) snap_created = False t.state = "error" try: name = "%s_%s" % ( meta["prefix"], datetime.now().strftime(settings.SNAP_TS_FORMAT), ) url = "shares/{}/snapshots/{}".format(share.id, name) # only create a new snap if there's no overflow situation. This # prevents runaway snapshot creation beyond max_count+1. if delete(aw, share, stype, prefix, max_count): data = { "snap_type": stype, "uvisible": meta["visible"], "writable": meta["writable"], } headers = {"content-type": "application/json"} aw.api_call(url, data=data, calltype="post", headers=headers, save_error=False) logger.debug("created snapshot at %s" % url) t.state = "finished" snap_created = True except Exception as e: logger.error("Failed to create snapshot at %s" % url) logger.exception(e) finally: t.end = datetime.utcnow().replace(tzinfo=utc) t.save() # best effort pruning without erroring out. If deletion fails, we'll # have max_count+1 number of snapshots and it would be dealt with on # the next round. if snap_created: delete(aw, share, stype, prefix, max_count) else: logger.debug( "Cron scheduled task not executed because outside time/day window ranges" )