示例#1
0
def main():
    tid = int(sys.argv[1])
    tdo = TaskDefinition.objects.get(id=tid)
    if (tdo.task_type != 'scrub'):
        return logger.error('task_type(%s) is not scrub.' % tdo.task_type)
    meta = json.loads(tdo.json_meta)

    if (Task.objects.filter(task_def=tdo).exists()):
        ll = Task.objects.filter(task_def=tdo).order_by('-id')[0]
        if (ll.state != 'error' and ll.state != 'finished'):
            logger.debug('Non terminal state(%s) for task(%d). Checking '
                         'again.' % (ll.state, tid))
            cur_state = update_state(ll, meta['pool'])
            if (cur_state != 'error' and cur_state != 'finished'):
                return logger.debug('Non terminal state(%s) for task(%d). '
                                    'A new task will not be run.' %
                                    (cur_state, tid))

    now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
    t = Task(task_def=tdo, state='started', start=now)
    url = ('%spools/%s/scrub' % (baseurl, meta['pool']))
    try:
        api_call(url, data=None, calltype='post', save_error=False)
        logger.debug('Started scrub at %s' % url)
        t.state = 'running'
    except Exception, e:
        logger.error('Failed to start scrub at %s' % url)
        t.state = 'error'
        logger.exception(e)
示例#2
0
def main():
    tid = int(sys.argv[1])
    tdo = TaskDefinition.objects.get(id=tid)
    if (tdo.task_type != 'scrub'):
        return logger.error('task_type(%s) is not scrub.' % tdo.task_type)
    meta = json.loads(tdo.json_meta)

    if (Task.objects.filter(task_def=tdo).exists()):
        ll = Task.objects.filter(task_def=tdo).order_by('-id')[0]
        if (ll.state != 'error' and ll.state != 'finished'):
            logger.debug('Non terminal state(%s) for task(%d). Checking '
                         'again.' % (ll.state, tid))
            cur_state = update_state(ll, meta['pool'])
            if (cur_state != 'error' and cur_state != 'finished'):
                return logger.debug('Non terminal state(%s) for task(%d). '
                                    'A new task will not be run.' % (cur_state, tid))

    now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
    t = Task(task_def=tdo, state='started', start=now)
    url = ('%spools/%s/scrub' % (baseurl, meta['pool']))
    try:
        api_call(url, data=None, calltype='post', save_error=False)
        logger.debug('Started scrub at %s' % url)
        t.state = 'running'
    except Exception, e:
        logger.error('Failed to start scrub at %s' % url)
        t.state = 'error'
        logger.exception(e)
示例#3
0
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2] if len(sys.argv) > 2 else '*-*-*-*-*-*'
    if (crontabwindow.crontab_range(cwindow)): #Performance note: immediately check task execution time/day window range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        if (tdo.task_type != 'scrub'):
            return logger.error('task_type(%s) is not scrub.' % tdo.task_type)
        meta = json.loads(tdo.json_meta)
        aw = APIWrapper()

        if (Task.objects.filter(task_def=tdo).exists()):
            ll = Task.objects.filter(task_def=tdo).order_by('-id')[0]
            if (ll.state != 'error' and ll.state != 'finished'):
                logger.debug('Non terminal state(%s) for task(%d). Checking '
                             'again.' % (ll.state, tid))
                cur_state = update_state(ll, meta['pool'], aw)
                if (cur_state != 'error' and cur_state != 'finished'):
                    return logger.debug('Non terminal state(%s) for task(%d). '
                                        'A new task will not be run.' % (cur_state, tid))

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        t = Task(task_def=tdo, state='started', start=now)
        url = ('pools/%s/scrub' % meta['pool'])
        try:
            aw.api_call(url, data=None, calltype='post', save_error=False)
            logger.debug('Started scrub at %s' % url)
            t.state = 'running'
        except Exception, e:
            logger.error('Failed to start scrub at %s' % url)
            t.state = 'error'
            logger.exception(e)
        finally:
示例#4
0
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2] if len(sys.argv) > 2 else "*-*-*-*-*-*"
    if crontabwindow.crontab_range(
        cwindow
    ):  # Performance note: immediately check task execution time/day window range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        if tdo.task_type != "scrub":
            return logger.error("task_type(%s) is not scrub." % tdo.task_type)
        meta = json.loads(tdo.json_meta)
        aw = APIWrapper()

        if Task.objects.filter(task_def=tdo).exists():
            ll = Task.objects.filter(task_def=tdo).order_by("-id")[0]
            if ll.state != "error" and ll.state != "finished":
                logger.debug("Non terminal state(%s) for task(%d). Checking " "again." % (ll.state, tid))
                cur_state = update_state(ll, meta["pool"], aw)
                if cur_state != "error" and cur_state != "finished":
                    return logger.debug(
                        "Non terminal state(%s) for task(%d). " "A new task will not be run." % (cur_state, tid)
                    )

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        t = Task(task_def=tdo, state="started", start=now)
        url = "pools/%s/scrub" % meta["pool"]
        try:
            aw.api_call(url, data=None, calltype="post", save_error=False)
            logger.debug("Started scrub at %s" % url)
            t.state = "running"
        except Exception, e:
            logger.error("Failed to start scrub at %s" % url)
            t.state = "error"
            logger.exception(e)
        finally:
示例#5
0
def main():
    tid = int(sys.argv[1])
    tdo = TaskDefinition.objects.get(id=tid)
    stype = 'task_scheduler'
    aw = APIWrapper()
    if (tdo.task_type != 'snapshot'):
        logger.error('task_type(%s) is not snapshot.' % tdo.task_type)
        return
    meta = json.loads(tdo.json_meta)
    validate_snap_meta(meta)
    share = Share.objects.get(name=meta['share'])
    max_count = int(float(meta['max_count']))
    prefix = ('%s_' % meta['prefix'])

    now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
    t = Task(task_def=tdo, state='started', start=now)

    snap_created = False
    t.state = 'error'
    try:
        name = ('%s_%s' % (meta['prefix'], datetime.now().strftime(settings.SNAP_TS_FORMAT)))
        url = ('shares/%s/snapshots/%s' % (share.name, name))
        #only create a new snap if there's no overflow situation. This prevents
        #runaway snapshot creation beyond max_count+1.
        if(delete(aw, share, stype, prefix, max_count)):
            data = {'snap_type': stype,
                    'uvisible': meta['visible'], }
            headers = {'content-type': 'application/json'}
            aw.api_call(url, data=data, calltype='post', headers=headers, save_error=False)
            logger.debug('created snapshot at %s' % url)
            t.state = 'finished'
            snap_created = True
    except Exception, e:
        logger.error('Failed to create snapshot at %s' % url)
        logger.exception(e)
示例#6
0
    def run(self):
        context = zmq.Context()
        sink_socket = context.socket(zmq.PUSH)
        sink_socket.connect('tcp://%s:%d' % settings.SPROBE_SINK)
        total_sleep = 0
        while True:
            if (os.getppid() != self.ppid):
                logger.info('ppids: %d, %d' % (os.getppid(), self.ppid))
                for w in self.workers.keys():
                    worker = self.workers[w]
                    if (worker.is_alive()):
                        #@todo: signal worker to cleanup and exit.
                        worker.task['queue'].put('stop')
                break

            for w in self.workers.keys():
                if (not self.workers[w].is_alive()):
                    to = Task.objects.get(id=w)
                    if (self.workers[w].exitcode == 0):
                        to.state = 'finished'
                    else:
                        to.state = 'error'
                    to.end = datetime.utcnow().replace(tzinfo=utc)
                    data = serialize("json", (to,))
                    sink_socket.send_json(data)
                    del(self.workers[w])

            if (total_sleep == 60):
                for td in TaskDefinition.objects.all():
                    now = datetime.utcnow().replace(second=0, microsecond=0,
                                                    tzinfo=utc)
                    if (self._schedulable(td, now)):
                        t = Task(name=td.name, json_meta=td.json_meta,
                                 state='scheduled', start=now)
                        data = serialize("json", (t,))
                        sink_socket.send_json(data)
                total_sleep = 0

            for t in Task.objects.filter(state='scheduled'):
                worker = TaskWorker(t)
                self.workers[t.id] = worker
                worker.daemon = True
                worker.start()

                if (worker.is_alive()):
                    t.state = 'running'
                    data = serialize("json", (t,))
                    sink_socket.send_json(data)
                else:
                    t.state = 'error'
                    t.end = datetime.utcnow().replace(tzinfo=utc)
                    data = serialize("json", (t,))
                    sink_socket.send_json(data)
            time.sleep(1)
            total_sleep = total_sleep + 1

        sink_socket.close()
        context.term()
        logger.info('terminated context. exiting')
示例#7
0
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2] if len(sys.argv) > 2 else '*-*-*-*-*-*'
    if (
            crontabwindow.crontab_range(cwindow)
    ):  #Performance note: immediately check task execution time/day window range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        stype = 'task_scheduler'
        aw = APIWrapper()
        if (tdo.task_type != 'snapshot'):
            logger.error('task_type(%s) is not snapshot.' % tdo.task_type)
            return
        meta = json.loads(tdo.json_meta)
        validate_snap_meta(meta)
        share = Share.objects.get(name=meta['share'])
        max_count = int(float(meta['max_count']))
        prefix = ('%s_' % meta['prefix'])

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        t = Task(task_def=tdo, state='started', start=now)

        snap_created = False
        t.state = 'error'
        try:
            name = ('%s_%s' % (meta['prefix'], datetime.now().strftime(
                settings.SNAP_TS_FORMAT)))
            url = ('shares/%s/snapshots/%s' % (share.name, name))
            #only create a new snap if there's no overflow situation. This prevents
            #runaway snapshot creation beyond max_count+1.
            if (delete(aw, share, stype, prefix, max_count)):
                data = {
                    'snap_type': stype,
                    'uvisible': meta['visible'],
                    'writable': meta['writable'],
                }
                headers = {'content-type': 'application/json'}
                aw.api_call(url,
                            data=data,
                            calltype='post',
                            headers=headers,
                            save_error=False)
                logger.debug('created snapshot at %s' % url)
                t.state = 'finished'
                snap_created = True
        except Exception, e:
            logger.error('Failed to create snapshot at %s' % url)
            logger.exception(e)
        finally:
示例#8
0
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2] if len(sys.argv) > 2 else "*-*-*-*-*-*"
    if crontabwindow.crontab_range(cwindow):
        # Performance note: immediately check task execution time/day window
        # range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        if tdo.task_type != "scrub":
            return logger.error("task_type(%s) is not scrub." % tdo.task_type)
        meta = json.loads(tdo.json_meta)
        aw = APIWrapper()

        if Task.objects.filter(task_def=tdo).exists():
            ll = Task.objects.filter(task_def=tdo).order_by("-id")[0]
            if ll.state not in TERMINAL_SCRUB_STATES:
                logger.debug("Non terminal state(%s) for task(%d). Checking "
                             "again." % (ll.state, tid))
                cur_state = update_state(ll, meta["pool"], aw)
                if cur_state not in TERMINAL_SCRUB_STATES:
                    return logger.debug("Non terminal state(%s) for task(%d). "
                                        "A new task will not be run." %
                                        (cur_state, tid))

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        t = Task(task_def=tdo, state="started", start=now)
        url = "pools/%s/scrub" % meta["pool"]
        try:
            aw.api_call(url, data=None, calltype="post", save_error=False)
            logger.debug("Started scrub at %s" % url)
            t.state = "running"
        except Exception as e:
            logger.error("Failed to start scrub at %s" % url)
            t.state = "error"
            logger.exception(e)
        finally:
            t.save()

        while True:
            cur_state = update_state(t, meta["pool"], aw)
            if cur_state in TERMINAL_SCRUB_STATES:
                logger.debug("task(%d) finished with state(%s)." %
                             (tid, cur_state))
                t.end = datetime.utcnow().replace(tzinfo=utc)
                t.save()
                break
            logger.debug("pending state(%s) for scrub task(%d). Will check "
                         "again in 60 seconds." % (cur_state, tid))
            time.sleep(60)
    else:
        logger.debug(
            "Cron scheduled task not executed because outside time/day window ranges"
        )
示例#9
0
    def run(self):
        total_sleep = 0
        while True:
            if (os.getppid() != self.ppid):
                for w in self.workers.keys():
                    worker = self.workers[w]
                    if (worker.is_alive()):
                        #@todo: signal worker to cleanup and exit.
                        worker.task['queue'].put('stop')
                break

            for w in self.workers.keys():
                if (not self.workers[w].is_alive()):
                    to = Task.objects.get(id=w)
                    if (self.workers[w].exitcode == 0):
                        to.state = 'finished'
                    else:
                        to.state = 'error'
                    to.end = datetime.utcnow().replace(tzinfo=utc)
                    to.save()
                    del (self.workers[w])

            try:
                if (total_sleep >= 60):
                    for td in TaskDefinition.objects.filter(enabled=True):
                        now = datetime.utcnow().replace(second=0,
                                                        microsecond=0,
                                                        tzinfo=utc)
                        if (self._schedulable(td, now)):
                            t = Task(task_def=td, state='scheduled', start=now)
                            t.save()
                    total_sleep = 0

                for t in Task.objects.filter(state='scheduled'):
                    worker = TaskWorker(t)
                    self.workers[t.id] = worker
                    worker.daemon = True
                    worker.start()

                    if (worker.is_alive()):
                        t.state = 'running'
                    else:
                        t.state = 'error'
                        t.end = datetime.utcnow().replace(tzinfo=utc)
                    t.save()
            except DatabaseError, e:
                e_msg = ('Error getting the list of scheduled tasks. Moving'
                         ' on')
                logger.error(e_msg)
                logger.exception(e)
            finally:
示例#10
0
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2] if len(sys.argv) > 2 else '*-*-*-*-*-*'
    if (crontabwindow.crontab_range(cwindow)):
        # Performance note: immediately check task execution time/day window
        # range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        if (tdo.task_type != 'scrub'):
            return logger.error('task_type(%s) is not scrub.' % tdo.task_type)
        meta = json.loads(tdo.json_meta)
        aw = APIWrapper()

        if (Task.objects.filter(task_def=tdo).exists()):
            ll = Task.objects.filter(task_def=tdo).order_by('-id')[0]
            if ll.state not in TERMINAL_SCRUB_STATES:
                logger.debug('Non terminal state(%s) for task(%d). Checking '
                             'again.' % (ll.state, tid))
                cur_state = update_state(ll, meta['pool'], aw)
                if cur_state not in TERMINAL_SCRUB_STATES:
                    return logger.debug('Non terminal state(%s) for task(%d). '
                                        'A new task will not be run.' %
                                        (cur_state, tid))

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        t = Task(task_def=tdo, state='started', start=now)
        url = ('pools/%s/scrub' % meta['pool'])
        try:
            aw.api_call(url, data=None, calltype='post', save_error=False)
            logger.debug('Started scrub at %s' % url)
            t.state = 'running'
        except Exception as e:
            logger.error('Failed to start scrub at %s' % url)
            t.state = 'error'
            logger.exception(e)
        finally:
            t.save()

        while True:
            cur_state = update_state(t, meta['pool'], aw)
            if cur_state in TERMINAL_SCRUB_STATES:
                logger.debug('task(%d) finished with state(%s).' %
                             (tid, cur_state))
                t.end = datetime.utcnow().replace(tzinfo=utc)
                t.save()
                break
            logger.debug('pending state(%s) for scrub task(%d). Will check '
                         'again in 60 seconds.' % (cur_state, tid))
            time.sleep(60)
    else:
        logger.debug('Cron scheduled task not executed because outside '
                     'time/day window ranges')
示例#11
0
    def run(self):
        total_sleep = 0
        while True:
            if (os.getppid() != self.ppid):
                for w in self.workers.keys():
                    worker = self.workers[w]
                    if (worker.is_alive()):
                        #@todo: signal worker to cleanup and exit.
                        worker.task['queue'].put('stop')
                break

            for w in self.workers.keys():
                if (not self.workers[w].is_alive()):
                    to = Task.objects.get(id=w)
                    if (self.workers[w].exitcode == 0):
                        to.state = 'finished'
                    else:
                        to.state = 'error'
                    to.end = datetime.utcnow().replace(tzinfo=utc)
                    to.save()
                    del(self.workers[w])

            try:
                if (total_sleep >= 60):
                    for td in TaskDefinition.objects.filter(enabled=True):
                        now = datetime.utcnow().replace(second=0,
                                                        microsecond=0,
                                                        tzinfo=utc)
                        if (self._schedulable(td, now)):
                            t = Task(task_def=td, state='scheduled',
                                     start=now)
                            t.save()
                    total_sleep = 0

                for t in Task.objects.filter(state='scheduled'):
                    worker = TaskWorker(t)
                    self.workers[t.id] = worker
                    worker.daemon = True
                    worker.start()

                    if (worker.is_alive()):
                        t.state = 'running'
                    else:
                        t.state = 'error'
                        t.end = datetime.utcnow().replace(tzinfo=utc)
                    t.save()
            except DatabaseError, e:
                e_msg = ('Error getting the list of scheduled tasks. Moving'
                         ' on')
                logger.error(e_msg)
                logger.exception(e)
            finally:
示例#12
0
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2] if len(sys.argv) > 2 else '*-*-*-*-*-*'
    if (crontabwindow.crontab_range(cwindow)):
        # Performance note: immediately check task execution time/day window
        # range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        if (tdo.task_type != 'scrub'):
            return logger.error('task_type(%s) is not scrub.' % tdo.task_type)
        meta = json.loads(tdo.json_meta)
        aw = APIWrapper()

        if (Task.objects.filter(task_def=tdo).exists()):
            ll = Task.objects.filter(task_def=tdo).order_by('-id')[0]
            if ll.state not in TERMINAL_SCRUB_STATES:
                logger.debug('Non terminal state(%s) for task(%d). Checking '
                             'again.' % (ll.state, tid))
                cur_state = update_state(ll, meta['pool'], aw)
                if cur_state not in TERMINAL_SCRUB_STATES:
                    return logger.debug('Non terminal state(%s) for task(%d). '
                                        'A new task will not be run.' %
                                        (cur_state, tid))

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        t = Task(task_def=tdo, state='started', start=now)
        url = ('pools/%s/scrub' % meta['pool'])
        try:
            aw.api_call(url, data=None, calltype='post', save_error=False)
            logger.debug('Started scrub at %s' % url)
            t.state = 'running'
        except Exception as e:
            logger.error('Failed to start scrub at %s' % url)
            t.state = 'error'
            logger.exception(e)
        finally:
            t.save()

        while True:
            cur_state = update_state(t, meta['pool'], aw)
            if cur_state in TERMINAL_SCRUB_STATES:
                logger.debug('task(%d) finished with state(%s).' %
                             (tid, cur_state))
                t.end = datetime.utcnow().replace(tzinfo=utc)
                t.save()
                break
            logger.debug('pending state(%s) for scrub task(%d). Will check '
                         'again in 60 seconds.' % (cur_state, tid))
            time.sleep(60)
    else:
        logger.debug('Cron scheduled task not executed because outside '
                     'time/day window ranges')
示例#13
0
    def run(self):
        running_tasks = {}
        baseurl = 'https://localhost/api/'
        while True:
            if (os.getppid() != self.ppid):
                break
            try:
                for td in TaskDefinition.objects.filter(enabled=True):
                    now = datetime.utcnow().replace(second=0,
                                                    microsecond=0,
                                                    tzinfo=utc)
                    if (self._schedulable(td, now)):
                        if (Task.objects.filter(
                                task_def=td,
                                state__regex=r'(scheduled|started|running)').
                                exists()):
                            logger.debug(
                                'there is already a task scheduled or running for this definition'
                            )
                        else:
                            t = Task(task_def=td, state='scheduled', start=now)
                            t.save()

                for t in Task.objects.filter(state='scheduled'):
                    meta = json.loads(t.task_def.json_meta)
                    if (t.task_def.task_type == 'scrub'):
                        url = ('%spools/%s/scrub' % (baseurl, meta['pool']))
                        try:
                            api_call(url, data=None, calltype='post')
                            t.state = 'running'
                        except:
                            t.state = 'error'
                        finally:
                            t.save()
                            if (t.state == 'running'):
                                running_tasks[t.id] = True
                    elif (t.task_def.task_type == 'snapshot'):
                        stype = 'task_scheduler'
                        try:
                            self._validate_snap_meta(meta)
                            name = ('%s_%s' %
                                    (meta['prefix'], datetime.utcnow().replace(
                                        tzinfo=utc).strftime(
                                            settings.SNAP_TS_FORMAT)))
                            url = ('%sshares/%s/snapshots/%s' %
                                   (baseurl, meta['share'], name))
                            data = {
                                'snap_type': stype,
                                'uvisible': meta['visible'],
                            }
                            headers = {'content-type': 'application/json'}
                            api_call(url,
                                     data=data,
                                     calltype='post',
                                     headers=headers)
                            t.state = 'finished'
                        except Exception, e:
                            t.state = 'error'
                            logger.exception(e)
                        finally:
                            t.end = datetime.utcnow().replace(tzinfo=utc)
                            t.save()

                        max_count = int(float(meta['max_count']))
                        share = Share.objects.get(name=meta['share'])
                        prefix = ('%s_' % meta['prefix'])
                        snapshots = Snapshot.objects.filter(
                            share=share,
                            snap_type=stype,
                            name__startswith=prefix).order_by('-id')
                        if (len(snapshots) > max_count):
                            for snap in snapshots[max_count:]:
                                url = ('%s/shares/%s/snapshots/%s' %
                                       (baseurl, meta['share'], snap.name))
                                try:
                                    api_call(url, data=None, calltype='delete')
                                except Exception, e:
                                    logger.error('Failed to delete old '
                                                 'snapshot(%s)' % snap.name)
                                    logger.exception(e)
示例#14
0
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2] if len(sys.argv) > 2 else '*-*-*-*-*-*'
    if (crontabwindow.crontab_range(cwindow)):
        # Performance note: immediately check task execution time/day window
        # range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        aw = APIWrapper()
        if (tdo.task_type not in ['reboot', 'shutdown', 'suspend']):
            logger.error('task_type(%s) is not a system reboot, '
                         'shutdown or suspend.' % tdo.task_type)
            return
        meta = json.loads(tdo.json_meta)
        validate_shutdown_meta(meta)

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        schedule = now + timedelta(minutes=3)
        t = Task(task_def=tdo, state='scheduled', start=now, end=schedule)

        try:
            # set default command url before checking if it's a shutdown
            # and if we have an rtc wake up
            url = ('commands/%s' % tdo.task_type)

            # if task_type is shutdown and rtc wake up true
            # parse crontab hour & minute vs rtc hour & minute to state
            # if wake will occur same day or next day, finally update
            # command url adding wake up epoch time
            if (tdo.task_type in ['shutdown', 'suspend'] and meta['wakeup']):
                crontab_fields = tdo.crontab.split()
                crontab_time = (int(crontab_fields[1]) * 60 +
                                int(crontab_fields[0]))
                wakeup_time = meta['rtc_hour'] * 60 + meta['rtc_minute']
                # rtc wake up requires UTC epoch, but users on WebUI set time
                # thinking to localtime, so first we set wake up time,
                # update it if wake up is on next day, finally move it to UTC
                # and get its epoch
                epoch = datetime.now().replace(hour=int(meta['rtc_hour']),
                                               minute=int(meta['rtc_minute']),
                                               second=0, microsecond=0)
                # if wake up < crontab time wake up will run next day
                if (crontab_time > wakeup_time):
                    epoch += timedelta(days=1)

                epoch = epoch.strftime('%s')
                url = ('%s/%s' % (url, epoch))

            aw.api_call(url, data=None, calltype='post', save_error=False)
            logger.debug('System %s scheduled' % tdo.task_type)
            t.state = 'finished'

        except Exception as e:
            t.state = 'failed'
            logger.error('Failed to schedule system %s' % tdo.task_type)
            logger.exception(e)

        finally:
            # t.end = datetime.utcnow().replace(tzinfo=utc)
            t.save()

    else:
        logger.debug('Cron scheduled task not executed because outside '
                     'time/day window ranges')
示例#15
0
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2] if len(sys.argv) > 2 else "*-*-*-*-*-*"
    if crontabwindow.crontab_range(cwindow):
        # Performance note: immediately check task execution time/day window
        # range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        aw = APIWrapper()
        if tdo.task_type not in ["reboot", "shutdown", "suspend"]:
            logger.error(
                "task_type(%s) is not a system reboot, "
                "shutdown or suspend." % tdo.task_type
            )
            return
        meta = json.loads(tdo.json_meta)
        validate_shutdown_meta(meta)

        if not run_conditions_met(meta):
            logger.debug(
                "Cron scheduled task not executed because the run conditions have not been met"
            )
            return

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        schedule = now + timedelta(minutes=3)
        t = Task(task_def=tdo, state="scheduled", start=now, end=schedule)

        try:
            # set default command url before checking if it's a shutdown
            # and if we have an rtc wake up
            url = "commands/%s" % tdo.task_type

            # if task_type is shutdown and rtc wake up true
            # parse crontab hour & minute vs rtc hour & minute to state
            # if wake will occur same day or next day, finally update
            # command url adding wake up epoch time
            if tdo.task_type in ["shutdown", "suspend"] and meta["wakeup"]:
                crontab_fields = tdo.crontab.split()
                crontab_time = int(crontab_fields[1]) * 60 + int(crontab_fields[0])
                wakeup_time = meta["rtc_hour"] * 60 + meta["rtc_minute"]
                # rtc wake up requires UTC epoch, but users on WebUI set time
                # thinking to localtime, so first we set wake up time,
                # update it if wake up is on next day, finally move it to UTC
                # and get its epoch
                epoch = datetime.now().replace(
                    hour=int(meta["rtc_hour"]),
                    minute=int(meta["rtc_minute"]),
                    second=0,
                    microsecond=0,
                )
                # if wake up < crontab time wake up will run next day
                if crontab_time > wakeup_time:
                    epoch += timedelta(days=1)

                epoch = epoch.strftime("%s")
                url = "%s/%s" % (url, epoch)

            aw.api_call(url, data=None, calltype="post", save_error=False)
            logger.debug("System %s scheduled" % tdo.task_type)
            t.state = "finished"

        except Exception as e:
            t.state = "failed"
            logger.error("Failed to schedule system %s" % tdo.task_type)
            logger.exception(e)

        finally:
            # t.end = datetime.utcnow().replace(tzinfo=utc)
            t.save()

    else:
        logger.debug(
            "Cron scheduled task not executed because outside time/day window ranges"
        )
示例#16
0
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2] if len(sys.argv) > 2 else "*-*-*-*-*-*"
    if crontabwindow.crontab_range(cwindow):
        # Performance note: immediately check task execution time/day window
        # range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        stype = "task_scheduler"
        aw = APIWrapper()
        if tdo.task_type != "snapshot":
            logger.error("task_type(%s) is not snapshot." % tdo.task_type)
            return
        meta = json.loads(tdo.json_meta)
        validate_snap_meta(meta)

        # to keep backwards compatibility, allow for share to be either
        # name or id and migrate the metadata. To be removed in #1854
        try:
            share = Share.objects.get(id=meta["share"])
        except ValueError:
            share = Share.objects.get(name=meta["share"])
            meta["share"] = share.id
            tdo.json_meta = json.dumps(meta)
            tdo.save()

        max_count = int(float(meta["max_count"]))
        prefix = "%s_" % meta["prefix"]

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        t = Task(task_def=tdo, state="started", start=now)

        snap_created = False
        t.state = "error"
        try:
            name = "%s_%s" % (
                meta["prefix"],
                datetime.now().strftime(settings.SNAP_TS_FORMAT),
            )
            url = "shares/{}/snapshots/{}".format(share.id, name)
            # only create a new snap if there's no overflow situation. This
            # prevents runaway snapshot creation beyond max_count+1.
            if delete(aw, share, stype, prefix, max_count):
                data = {
                    "snap_type": stype,
                    "uvisible": meta["visible"],
                    "writable": meta["writable"],
                }
                headers = {"content-type": "application/json"}
                aw.api_call(url,
                            data=data,
                            calltype="post",
                            headers=headers,
                            save_error=False)
                logger.debug("created snapshot at %s" % url)
                t.state = "finished"
                snap_created = True
        except Exception as e:
            logger.error("Failed to create snapshot at %s" % url)
            logger.exception(e)
        finally:
            t.end = datetime.utcnow().replace(tzinfo=utc)
            t.save()

        # best effort pruning without erroring out. If deletion fails, we'll
        # have max_count+1 number of snapshots and it would be dealt with on
        # the next round.
        if snap_created:
            delete(aw, share, stype, prefix, max_count)
    else:
        logger.debug(
            "Cron scheduled task not executed because outside time/day window ranges"
        )
示例#17
0
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2] if len(sys.argv) > 2 else '*-*-*-*-*-*'
    if (crontabwindow.crontab_range(cwindow)):
        # Performance note: immediately check task execution time/day window
        # range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        aw = APIWrapper()
        if (tdo.task_type not in ['reboot', 'shutdown', 'suspend']):
            logger.error('task_type(%s) is not a system reboot, '
                         'shutdown or suspend.' % tdo.task_type)
            return
        meta = json.loads(tdo.json_meta)
        validate_shutdown_meta(meta)

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        schedule = now + timedelta(minutes=3)
        t = Task(task_def=tdo, state='scheduled', start=now, end=schedule)

        try:
            # set default command url before checking if it's a shutdown
            # and if we have an rtc wake up
            url = ('commands/%s' % tdo.task_type)

            # if task_type is shutdown and rtc wake up true
            # parse crontab hour & minute vs rtc hour & minute to state
            # if wake will occur same day or next day, finally update
            # command url adding wake up epoch time
            if (tdo.task_type in ['shutdown', 'suspend'] and meta['wakeup']):
                crontab_fields = tdo.crontab.split()
                crontab_time = (int(crontab_fields[1]) * 60 +
                                int(crontab_fields[0]))
                wakeup_time = meta['rtc_hour'] * 60 + meta['rtc_minute']
                # rtc wake up requires UTC epoch, but users on WebUI set time
                # thinking to localtime, so first we set wake up time,
                # update it if wake up is on next day, finally move it to UTC
                # and get its epoch
                epoch = datetime.now().replace(hour=int(meta['rtc_hour']),
                                               minute=int(meta['rtc_minute']),
                                               second=0,
                                               microsecond=0)
                # if wake up < crontab time wake up will run next day
                if (crontab_time > wakeup_time):
                    epoch += timedelta(days=1)

                epoch = epoch.strftime('%s')
                url = ('%s/%s' % (url, epoch))

            aw.api_call(url, data=None, calltype='post', save_error=False)
            logger.debug('System %s scheduled' % tdo.task_type)
            t.state = 'finished'

        except Exception as e:
            t.state = 'failed'
            logger.error('Failed to schedule system %s' % tdo.task_type)
            logger.exception(e)

        finally:
            # t.end = datetime.utcnow().replace(tzinfo=utc)
            t.save()

    else:
        logger.debug('Cron scheduled task not executed because outside '
                     'time/day window ranges')
示例#18
0
    def run(self):
        running_tasks = {}
        baseurl = 'https://localhost/api/'
        while True:
            if (os.getppid() != self.ppid):
                break
            try:
                for td in TaskDefinition.objects.filter(enabled=True):
                    now = datetime.utcnow().replace(second=0,
                                                    microsecond=0,
                                                    tzinfo=utc)
                    if (self._schedulable(td, now)):
                        if (Task.objects.filter(
                                task_def=td,
                                state__regex=r'(scheduled|started|running)').exists()):
                            logger.debug('there is already a task scheduled or running for this definition')
                        else:
                            t = Task(task_def=td, state='scheduled',
                                     start=now)
                            t.save()

                for t in Task.objects.filter(state='scheduled'):
                    meta = json.loads(t.task_def.json_meta)
                    if (t.task_def.task_type == 'scrub'):
                        url = ('%spools/%s/scrub' % (baseurl, meta['pool']))
                        try:
                            api_call(url, data=None, calltype='post')
                            t.state = 'running'
                        except:
                            t.state = 'error'
                        finally:
                            t.save()
                            if (t.state == 'running'):
                                running_tasks[t.id] = True
                    elif (t.task_def.task_type == 'snapshot'):
                        stype = 'task_scheduler'
                        try:
                            self._validate_snap_meta(meta)
                            name = ('%s_%s' %
                                    (meta['prefix'],
                                     datetime.utcnow().replace(
                                         tzinfo=utc).strftime(
                                             settings.SNAP_TS_FORMAT)))
                            url = ('%sshares/%s/snapshots/%s' %
                                   (baseurl, meta['share'], name))
                            data = {'snap_type': stype,
                                    'uvisible': meta['visible'], }
                            headers = {'content-type': 'application/json'}
                            api_call(url, data=data, calltype='post',
                                     headers=headers)
                            t.state = 'finished'
                        except Exception, e:
                            t.state = 'error'
                            logger.exception(e)
                        finally:
                            t.end = datetime.utcnow().replace(tzinfo=utc)
                            t.save()

                        max_count = int(float(meta['max_count']))
                        share = Share.objects.get(name=meta['share'])
                        prefix = ('%s_' % meta['prefix'])
                        snapshots = Snapshot.objects.filter(
                            share=share, snap_type=stype,
                            name__startswith=prefix).order_by('-id')
                        if (len(snapshots) > max_count):
                            for snap in snapshots[max_count:]:
                                url = ('%s/shares/%s/snapshots/%s' %
                                       (baseurl, meta['share'], snap.name))
                                try:
                                    api_call(url, data=None, calltype='delete')
                                except Exception, e:
                                    logger.error('Failed to delete old '
                                                 'snapshot(%s)' % snap.name)
                                    logger.exception(e)
示例#19
0
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2] if len(sys.argv) > 2 else '*-*-*-*-*-*'
    if (crontabwindow.crontab_range(cwindow)):
        # Performance note: immediately check task execution time/day window
        # range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        stype = 'task_scheduler'
        aw = APIWrapper()
        if (tdo.task_type != 'snapshot'):
            logger.error('task_type(%s) is not snapshot.' % tdo.task_type)
            return
        meta = json.loads(tdo.json_meta)
        validate_snap_meta(meta)

        # to keep backwards compatibility, allow for share to be either
        # name or id and migrate the metadata. To be removed in #1854
        try:
            share = Share.objects.get(id=meta['share'])
        except ValueError:
            share = Share.objects.get(name=meta['share'])
            meta['share'] = share.id
            tdo.json_meta = json.dumps(meta)
            tdo.save()

        max_count = int(float(meta['max_count']))
        prefix = ('%s_' % meta['prefix'])

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        t = Task(task_def=tdo, state='started', start=now)

        snap_created = False
        t.state = 'error'
        try:
            name = ('%s_%s'
                    % (meta['prefix'],
                       datetime.now().strftime(settings.SNAP_TS_FORMAT)))
            url = ('shares/{}/snapshots/{}'.format(share.id, name))
            # only create a new snap if there's no overflow situation. This
            # prevents runaway snapshot creation beyond max_count+1.
            if(delete(aw, share, stype, prefix, max_count)):
                data = {'snap_type': stype,
                        'uvisible': meta['visible'],
                        'writable': meta['writable'], }
                headers = {'content-type': 'application/json'}
                aw.api_call(url, data=data, calltype='post', headers=headers,
                            save_error=False)
                logger.debug('created snapshot at %s' % url)
                t.state = 'finished'
                snap_created = True
        except Exception as e:
            logger.error('Failed to create snapshot at %s' % url)
            logger.exception(e)
        finally:
            t.end = datetime.utcnow().replace(tzinfo=utc)
            t.save()

        # best effort pruning without erroring out. If deletion fails, we'll
        # have max_count+1 number of snapshots and it would be dealt with on
        # the next round.
        if (snap_created):
            delete(aw, share, stype, prefix, max_count)
    else:
        logger.debug('Cron scheduled task not executed because outside '
                     'time/day window ranges')
示例#20
0
    snapshots = Snapshot.objects.filter(
        share=share, snap_type=stype, name__startswith=prefix).order_by('-id')
    if (len(snapshots) > max_count):
        for snap in snapshots[max_count:]:
            url = ('%s/shares/%s/snapshots/%s' %
                   (baseurl, meta['share'], snap.name))
            try:
                api_call(url, data=None, calltype='delete', save_error=False)
                logger.debug('deleted old snapshot at %s' % url)
            except Exception, e:
                logger.error('Failed to delete old snapshot at %s' % url)
                logger.exception(e)
                return

    now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
    t = Task(task_def=tdo, state='started', start=now)
    try:
        name = ('%s_%s' % (meta['prefix'], datetime.utcnow().replace(
            tzinfo=utc).strftime(settings.SNAP_TS_FORMAT)))
        url = ('%sshares/%s/snapshots/%s' % (baseurl, meta['share'], name))
        data = {
            'snap_type': stype,
            'uvisible': meta['visible'],
        }
        headers = {'content-type': 'application/json'}
        api_call(url,
                 data=data,
                 calltype='post',
                 headers=headers,
                 save_error=False)
        logger.debug('created snapshot at %s' % url)
示例#21
0
    def run(self):
        context = zmq.Context()
        sink_socket = context.socket(zmq.PUSH)
        sink_socket.connect('tcp://%s:%d' % settings.SPROBE_SINK)
        total_sleep = 0
        while True:
            if (os.getppid() != self.ppid):
                logger.info('ppids: %d, %d' % (os.getppid(), self.ppid))
                for w in self.workers.keys():
                    worker = self.workers[w]
                    if (worker.is_alive()):
                        #@todo: signal worker to cleanup and exit.
                        worker.task['queue'].put('stop')
                break

            for w in self.workers.keys():
                if (not self.workers[w].is_alive()):
                    to = Task.objects.get(id=w)
                    if (self.workers[w].exitcode == 0):
                        to.state = 'finished'
                    else:
                        to.state = 'error'
                    to.end = datetime.utcnow().replace(tzinfo=utc)
                    data = serialize("json", (to, ))
                    sink_socket.send_json(data)
                    del (self.workers[w])

            if (total_sleep == 60):
                for td in TaskDefinition.objects.all():
                    now = datetime.utcnow().replace(second=0,
                                                    microsecond=0,
                                                    tzinfo=utc)
                    if (self._schedulable(td, now)):
                        t = Task(name=td.name,
                                 json_meta=td.json_meta,
                                 state='scheduled',
                                 start=now)
                        data = serialize("json", (t, ))
                        sink_socket.send_json(data)
                total_sleep = 0

            for t in Task.objects.filter(state='scheduled'):
                worker = TaskWorker(t)
                self.workers[t.id] = worker
                worker.daemon = True
                worker.start()

                if (worker.is_alive()):
                    t.state = 'running'
                    data = serialize("json", (t, ))
                    sink_socket.send_json(data)
                else:
                    t.state = 'error'
                    t.end = datetime.utcnow().replace(tzinfo=utc)
                    data = serialize("json", (t, ))
                    sink_socket.send_json(data)
            time.sleep(1)
            total_sleep = total_sleep + 1

        sink_socket.close()
        context.term()
        logger.info('terminated context. exiting')
示例#22
0
    prefix = ('%s_' % meta['prefix'])
    snapshots = Snapshot.objects.filter(share=share, snap_type=stype,
                                        name__startswith=prefix).order_by('-id')
    if (len(snapshots) > max_count):
        for snap in snapshots[max_count:]:
            url = ('shares/%s/snapshots/%s' % (meta['share'], snap.name))
            try:
                aw.api_call(url, data=None, calltype='delete', save_error=False)
                logger.debug('deleted old snapshot at %s' % url)
            except Exception, e:
                logger.error('Failed to delete old snapshot at %s' % url)
                logger.exception(e)
                return

    now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
    t = Task(task_def=tdo, state='started', start=now)
    try:
        name = ('%s_%s' % (meta['prefix'], datetime.now().strftime(settings.SNAP_TS_FORMAT)))
        url = ('shares/%s/snapshots/%s' % (meta['share'], name))
        data = {'snap_type': stype,
                'uvisible': meta['visible'], }
        headers = {'content-type': 'application/json'}
        aw.api_call(url, data=data, calltype='post', headers=headers, save_error=False)
        logger.debug('created snapshot at %s' % url)
        t.state = 'finished'
    except Exception, e:
        logger.error('Failed to create snapshot at %s' % url)
        t.state = 'error'
        logger.exception(e)
    finally:
        t.end = datetime.utcnow().replace(tzinfo=utc)
示例#23
0
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2] if len(sys.argv) > 2 else '*-*-*-*-*-*'
    if (crontabwindow.crontab_range(cwindow)):
        # Performance note: immediately check task execution time/day window
        # range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        stype = 'task_scheduler'
        aw = APIWrapper()
        if (tdo.task_type != 'snapshot'):
            logger.error('task_type(%s) is not snapshot.' % tdo.task_type)
            return
        meta = json.loads(tdo.json_meta)
        validate_snap_meta(meta)

        # to keep backwards compatibility, allow for share to be either
        # name or id and migrate the metadata. To be removed in #1854
        try:
            share = Share.objects.get(id=meta['share'])
        except ValueError:
            share = Share.objects.get(name=meta['share'])
            meta['share'] = share.id
            tdo.json_meta = json.dumps(meta)
            tdo.save()

        max_count = int(float(meta['max_count']))
        prefix = ('%s_' % meta['prefix'])

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        t = Task(task_def=tdo, state='started', start=now)

        snap_created = False
        t.state = 'error'
        try:
            name = ('%s_%s' % (meta['prefix'], datetime.now().strftime(
                settings.SNAP_TS_FORMAT)))
            url = ('shares/{}/snapshots/{}'.format(share.id, name))
            # only create a new snap if there's no overflow situation. This
            # prevents runaway snapshot creation beyond max_count+1.
            if (delete(aw, share, stype, prefix, max_count)):
                data = {
                    'snap_type': stype,
                    'uvisible': meta['visible'],
                    'writable': meta['writable'],
                }
                headers = {'content-type': 'application/json'}
                aw.api_call(url,
                            data=data,
                            calltype='post',
                            headers=headers,
                            save_error=False)
                logger.debug('created snapshot at %s' % url)
                t.state = 'finished'
                snap_created = True
        except Exception as e:
            logger.error('Failed to create snapshot at %s' % url)
            logger.exception(e)
        finally:
            t.end = datetime.utcnow().replace(tzinfo=utc)
            t.save()

        # best effort pruning without erroring out. If deletion fails, we'll
        # have max_count+1 number of snapshots and it would be dealt with on
        # the next round.
        if (snap_created):
            delete(aw, share, stype, prefix, max_count)
    else:
        logger.debug('Cron scheduled task not executed because outside '
                     'time/day window ranges')