コード例 #1
0
    def run(self):
        context = zmq.Context()
        sink_socket = context.socket(zmq.PUSH)
        sink_socket.connect('tcp://%s:%d' % settings.SPROBE_SINK)
        total_sleep = 0
        while True:
            if (os.getppid() != self.ppid):
                logger.info('ppids: %d, %d' % (os.getppid(), self.ppid))
                for w in self.workers.keys():
                    worker = self.workers[w]
                    if (worker.is_alive()):
                        #@todo: signal worker to cleanup and exit.
                        worker.task['queue'].put('stop')
                break

            for w in self.workers.keys():
                if (not self.workers[w].is_alive()):
                    to = Task.objects.get(id=w)
                    if (self.workers[w].exitcode == 0):
                        to.state = 'finished'
                    else:
                        to.state = 'error'
                    to.end = datetime.utcnow().replace(tzinfo=utc)
                    data = serialize("json", (to,))
                    sink_socket.send_json(data)
                    del(self.workers[w])

            if (total_sleep == 60):
                for td in TaskDefinition.objects.all():
                    now = datetime.utcnow().replace(second=0, microsecond=0,
                                                    tzinfo=utc)
                    if (self._schedulable(td, now)):
                        t = Task(name=td.name, json_meta=td.json_meta,
                                 state='scheduled', start=now)
                        data = serialize("json", (t,))
                        sink_socket.send_json(data)
                total_sleep = 0

            for t in Task.objects.filter(state='scheduled'):
                worker = TaskWorker(t)
                self.workers[t.id] = worker
                worker.daemon = True
                worker.start()

                if (worker.is_alive()):
                    t.state = 'running'
                    data = serialize("json", (t,))
                    sink_socket.send_json(data)
                else:
                    t.state = 'error'
                    t.end = datetime.utcnow().replace(tzinfo=utc)
                    data = serialize("json", (t,))
                    sink_socket.send_json(data)
            time.sleep(1)
            total_sleep = total_sleep + 1

        sink_socket.close()
        context.term()
        logger.info('terminated context. exiting')
コード例 #2
0
ファイル: snapshot.py プロジェクト: sirio81/rockstor-core
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2] if len(sys.argv) > 2 else '*-*-*-*-*-*'
    if (crontabwindow.crontab_range(cwindow)):
        # Performance note: immediately check task execution time/day window
        # range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        stype = 'task_scheduler'
        aw = APIWrapper()
        if (tdo.task_type != 'snapshot'):
            logger.error('task_type(%s) is not snapshot.' % tdo.task_type)
            return
        meta = json.loads(tdo.json_meta)
        validate_snap_meta(meta)
        share = Share.objects.get(id=meta['share'])
        max_count = int(float(meta['max_count']))
        prefix = ('%s_' % meta['prefix'])

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        t = Task(task_def=tdo, state='started', start=now)

        snap_created = False
        t.state = 'error'
        try:
            name = ('%s_%s' % (meta['prefix'], datetime.now().strftime(
                settings.SNAP_TS_FORMAT)))
            url = ('shares/{}/snapshots/{}'.format(share.id, name))
            # only create a new snap if there's no overflow situation. This
            # prevents runaway snapshot creation beyond max_count+1.
            if (delete(aw, share, stype, prefix, max_count)):
                data = {
                    'snap_type': stype,
                    'uvisible': meta['visible'],
                    'writable': meta['writable'],
                }
                headers = {'content-type': 'application/json'}
                aw.api_call(url,
                            data=data,
                            calltype='post',
                            headers=headers,
                            save_error=False)
                logger.debug('created snapshot at %s' % url)
                t.state = 'finished'
                snap_created = True
        except Exception as e:
            logger.error('Failed to create snapshot at %s' % url)
            logger.exception(e)
        finally:
            t.end = datetime.utcnow().replace(tzinfo=utc)
            t.save()

        # best effort pruning without erroring out. If deletion fails, we'll
        # have max_count+1 number of snapshots and it would be dealt with on
        # the next round.
        if (snap_created):
            delete(aw, share, stype, prefix, max_count)
    else:
        logger.debug('Cron scheduled task not executed because outside '
                     'time/day window ranges')
コード例 #3
0
ファイル: pool_scrub.py プロジェクト: zboy13/rockstor-core
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2] if len(sys.argv) > 2 else "*-*-*-*-*-*"
    if crontabwindow.crontab_range(cwindow):
        # Performance note: immediately check task execution time/day window
        # range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        if tdo.task_type != "scrub":
            return logger.error("task_type(%s) is not scrub." % tdo.task_type)
        meta = json.loads(tdo.json_meta)
        aw = APIWrapper()

        if Task.objects.filter(task_def=tdo).exists():
            ll = Task.objects.filter(task_def=tdo).order_by("-id")[0]
            if ll.state not in TERMINAL_SCRUB_STATES:
                logger.debug("Non terminal state(%s) for task(%d). Checking "
                             "again." % (ll.state, tid))
                cur_state = update_state(ll, meta["pool"], aw)
                if cur_state not in TERMINAL_SCRUB_STATES:
                    return logger.debug("Non terminal state(%s) for task(%d). "
                                        "A new task will not be run." %
                                        (cur_state, tid))

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        t = Task(task_def=tdo, state="started", start=now)
        url = "pools/%s/scrub" % meta["pool"]
        try:
            aw.api_call(url, data=None, calltype="post", save_error=False)
            logger.debug("Started scrub at %s" % url)
            t.state = "running"
        except Exception as e:
            logger.error("Failed to start scrub at %s" % url)
            t.state = "error"
            logger.exception(e)
        finally:
            t.save()

        while True:
            cur_state = update_state(t, meta["pool"], aw)
            if cur_state in TERMINAL_SCRUB_STATES:
                logger.debug("task(%d) finished with state(%s)." %
                             (tid, cur_state))
                t.end = datetime.utcnow().replace(tzinfo=utc)
                t.save()
                break
            logger.debug("pending state(%s) for scrub task(%d). Will check "
                         "again in 60 seconds." % (cur_state, tid))
            time.sleep(60)
    else:
        logger.debug(
            "Cron scheduled task not executed because outside time/day window ranges"
        )
コード例 #4
0
    def run(self):
        total_sleep = 0
        while True:
            if (os.getppid() != self.ppid):
                for w in self.workers.keys():
                    worker = self.workers[w]
                    if (worker.is_alive()):
                        #@todo: signal worker to cleanup and exit.
                        worker.task['queue'].put('stop')
                break

            for w in self.workers.keys():
                if (not self.workers[w].is_alive()):
                    to = Task.objects.get(id=w)
                    if (self.workers[w].exitcode == 0):
                        to.state = 'finished'
                    else:
                        to.state = 'error'
                    to.end = datetime.utcnow().replace(tzinfo=utc)
                    to.save()
                    del(self.workers[w])

            try:
                if (total_sleep >= 60):
                    for td in TaskDefinition.objects.filter(enabled=True):
                        now = datetime.utcnow().replace(second=0,
                                                        microsecond=0,
                                                        tzinfo=utc)
                        if (self._schedulable(td, now)):
                            t = Task(task_def=td, state='scheduled',
                                     start=now)
                            t.save()
                    total_sleep = 0

                for t in Task.objects.filter(state='scheduled'):
                    worker = TaskWorker(t)
                    self.workers[t.id] = worker
                    worker.daemon = True
                    worker.start()

                    if (worker.is_alive()):
                        t.state = 'running'
                    else:
                        t.state = 'error'
                        t.end = datetime.utcnow().replace(tzinfo=utc)
                    t.save()
            except DatabaseError, e:
                e_msg = ('Error getting the list of scheduled tasks. Moving'
                         ' on')
                logger.error(e_msg)
                logger.exception(e)
            finally:
コード例 #5
0
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2] if len(sys.argv) > 2 else '*-*-*-*-*-*'
    if (crontabwindow.crontab_range(cwindow)):
        # Performance note: immediately check task execution time/day window
        # range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        if (tdo.task_type != 'scrub'):
            return logger.error('task_type(%s) is not scrub.' % tdo.task_type)
        meta = json.loads(tdo.json_meta)
        aw = APIWrapper()

        if (Task.objects.filter(task_def=tdo).exists()):
            ll = Task.objects.filter(task_def=tdo).order_by('-id')[0]
            if ll.state not in TERMINAL_SCRUB_STATES:
                logger.debug('Non terminal state(%s) for task(%d). Checking '
                             'again.' % (ll.state, tid))
                cur_state = update_state(ll, meta['pool'], aw)
                if cur_state not in TERMINAL_SCRUB_STATES:
                    return logger.debug('Non terminal state(%s) for task(%d). '
                                        'A new task will not be run.' %
                                        (cur_state, tid))

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        t = Task(task_def=tdo, state='started', start=now)
        url = ('pools/%s/scrub' % meta['pool'])
        try:
            aw.api_call(url, data=None, calltype='post', save_error=False)
            logger.debug('Started scrub at %s' % url)
            t.state = 'running'
        except Exception as e:
            logger.error('Failed to start scrub at %s' % url)
            t.state = 'error'
            logger.exception(e)
        finally:
            t.save()

        while True:
            cur_state = update_state(t, meta['pool'], aw)
            if cur_state in TERMINAL_SCRUB_STATES:
                logger.debug('task(%d) finished with state(%s).' %
                             (tid, cur_state))
                t.end = datetime.utcnow().replace(tzinfo=utc)
                t.save()
                break
            logger.debug('pending state(%s) for scrub task(%d). Will check '
                         'again in 60 seconds.' % (cur_state, tid))
            time.sleep(60)
    else:
        logger.debug('Cron scheduled task not executed because outside '
                     'time/day window ranges')
コード例 #6
0
ファイル: pool_scrub.py プロジェクト: MFlyer/rockstor-core
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2] if len(sys.argv) > 2 else '*-*-*-*-*-*'
    if (crontabwindow.crontab_range(cwindow)):
        # Performance note: immediately check task execution time/day window
        # range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        if (tdo.task_type != 'scrub'):
            return logger.error('task_type(%s) is not scrub.' % tdo.task_type)
        meta = json.loads(tdo.json_meta)
        aw = APIWrapper()

        if (Task.objects.filter(task_def=tdo).exists()):
            ll = Task.objects.filter(task_def=tdo).order_by('-id')[0]
            if ll.state not in TERMINAL_SCRUB_STATES:
                logger.debug('Non terminal state(%s) for task(%d). Checking '
                             'again.' % (ll.state, tid))
                cur_state = update_state(ll, meta['pool'], aw)
                if cur_state not in TERMINAL_SCRUB_STATES:
                    return logger.debug('Non terminal state(%s) for task(%d). '
                                        'A new task will not be run.' %
                                        (cur_state, tid))

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        t = Task(task_def=tdo, state='started', start=now)
        url = ('pools/%s/scrub' % meta['pool'])
        try:
            aw.api_call(url, data=None, calltype='post', save_error=False)
            logger.debug('Started scrub at %s' % url)
            t.state = 'running'
        except Exception as e:
            logger.error('Failed to start scrub at %s' % url)
            t.state = 'error'
            logger.exception(e)
        finally:
            t.save()

        while True:
            cur_state = update_state(t, meta['pool'], aw)
            if cur_state in TERMINAL_SCRUB_STATES:
                logger.debug('task(%d) finished with state(%s).' %
                             (tid, cur_state))
                t.end = datetime.utcnow().replace(tzinfo=utc)
                t.save()
                break
            logger.debug('pending state(%s) for scrub task(%d). Will check '
                         'again in 60 seconds.' % (cur_state, tid))
            time.sleep(60)
    else:
        logger.debug('Cron scheduled task not executed because outside '
                     'time/day window ranges')
コード例 #7
0
    def run(self):
        total_sleep = 0
        while True:
            if (os.getppid() != self.ppid):
                for w in self.workers.keys():
                    worker = self.workers[w]
                    if (worker.is_alive()):
                        #@todo: signal worker to cleanup and exit.
                        worker.task['queue'].put('stop')
                break

            for w in self.workers.keys():
                if (not self.workers[w].is_alive()):
                    to = Task.objects.get(id=w)
                    if (self.workers[w].exitcode == 0):
                        to.state = 'finished'
                    else:
                        to.state = 'error'
                    to.end = datetime.utcnow().replace(tzinfo=utc)
                    to.save()
                    del (self.workers[w])

            try:
                if (total_sleep >= 60):
                    for td in TaskDefinition.objects.filter(enabled=True):
                        now = datetime.utcnow().replace(second=0,
                                                        microsecond=0,
                                                        tzinfo=utc)
                        if (self._schedulable(td, now)):
                            t = Task(task_def=td, state='scheduled', start=now)
                            t.save()
                    total_sleep = 0

                for t in Task.objects.filter(state='scheduled'):
                    worker = TaskWorker(t)
                    self.workers[t.id] = worker
                    worker.daemon = True
                    worker.start()

                    if (worker.is_alive()):
                        t.state = 'running'
                    else:
                        t.state = 'error'
                        t.end = datetime.utcnow().replace(tzinfo=utc)
                    t.save()
            except DatabaseError, e:
                e_msg = ('Error getting the list of scheduled tasks. Moving'
                         ' on')
                logger.error(e_msg)
                logger.exception(e)
            finally:
コード例 #8
0
    def run(self):
        running_tasks = {}
        baseurl = 'https://localhost/api/'
        while True:
            if (os.getppid() != self.ppid):
                break
            try:
                for td in TaskDefinition.objects.filter(enabled=True):
                    now = datetime.utcnow().replace(second=0,
                                                    microsecond=0,
                                                    tzinfo=utc)
                    if (self._schedulable(td, now)):
                        if (Task.objects.filter(
                                task_def=td,
                                state__regex=r'(scheduled|started|running)').
                                exists()):
                            logger.debug(
                                'there is already a task scheduled or running for this definition'
                            )
                        else:
                            t = Task(task_def=td, state='scheduled', start=now)
                            t.save()

                for t in Task.objects.filter(state='scheduled'):
                    meta = json.loads(t.task_def.json_meta)
                    if (t.task_def.task_type == 'scrub'):
                        url = ('%spools/%s/scrub' % (baseurl, meta['pool']))
                        try:
                            api_call(url, data=None, calltype='post')
                            t.state = 'running'
                        except:
                            t.state = 'error'
                        finally:
                            t.save()
                            if (t.state == 'running'):
                                running_tasks[t.id] = True
                    elif (t.task_def.task_type == 'snapshot'):
                        stype = 'task_scheduler'
                        try:
                            self._validate_snap_meta(meta)
                            name = ('%s_%s' %
                                    (meta['prefix'], datetime.utcnow().replace(
                                        tzinfo=utc).strftime(
                                            settings.SNAP_TS_FORMAT)))
                            url = ('%sshares/%s/snapshots/%s' %
                                   (baseurl, meta['share'], name))
                            data = {
                                'snap_type': stype,
                                'uvisible': meta['visible'],
                            }
                            headers = {'content-type': 'application/json'}
                            api_call(url,
                                     data=data,
                                     calltype='post',
                                     headers=headers)
                            t.state = 'finished'
                        except Exception, e:
                            t.state = 'error'
                            logger.exception(e)
                        finally:
                            t.end = datetime.utcnow().replace(tzinfo=utc)
                            t.save()

                        max_count = int(float(meta['max_count']))
                        share = Share.objects.get(name=meta['share'])
                        prefix = ('%s_' % meta['prefix'])
                        snapshots = Snapshot.objects.filter(
                            share=share,
                            snap_type=stype,
                            name__startswith=prefix).order_by('-id')
                        if (len(snapshots) > max_count):
                            for snap in snapshots[max_count:]:
                                url = ('%s/shares/%s/snapshots/%s' %
                                       (baseurl, meta['share'], snap.name))
                                try:
                                    api_call(url, data=None, calltype='delete')
                                except Exception, e:
                                    logger.error('Failed to delete old '
                                                 'snapshot(%s)' % snap.name)
                                    logger.exception(e)
コード例 #9
0
ファイル: snapshot.py プロジェクト: MFlyer/rockstor-core
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2] if len(sys.argv) > 2 else '*-*-*-*-*-*'
    if (crontabwindow.crontab_range(cwindow)):
        # Performance note: immediately check task execution time/day window
        # range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        stype = 'task_scheduler'
        aw = APIWrapper()
        if (tdo.task_type != 'snapshot'):
            logger.error('task_type(%s) is not snapshot.' % tdo.task_type)
            return
        meta = json.loads(tdo.json_meta)
        validate_snap_meta(meta)

        # to keep backwards compatibility, allow for share to be either
        # name or id and migrate the metadata. To be removed in #1854
        try:
            share = Share.objects.get(id=meta['share'])
        except ValueError:
            share = Share.objects.get(name=meta['share'])
            meta['share'] = share.id
            tdo.json_meta = json.dumps(meta)
            tdo.save()

        max_count = int(float(meta['max_count']))
        prefix = ('%s_' % meta['prefix'])

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        t = Task(task_def=tdo, state='started', start=now)

        snap_created = False
        t.state = 'error'
        try:
            name = ('%s_%s'
                    % (meta['prefix'],
                       datetime.now().strftime(settings.SNAP_TS_FORMAT)))
            url = ('shares/{}/snapshots/{}'.format(share.id, name))
            # only create a new snap if there's no overflow situation. This
            # prevents runaway snapshot creation beyond max_count+1.
            if(delete(aw, share, stype, prefix, max_count)):
                data = {'snap_type': stype,
                        'uvisible': meta['visible'],
                        'writable': meta['writable'], }
                headers = {'content-type': 'application/json'}
                aw.api_call(url, data=data, calltype='post', headers=headers,
                            save_error=False)
                logger.debug('created snapshot at %s' % url)
                t.state = 'finished'
                snap_created = True
        except Exception as e:
            logger.error('Failed to create snapshot at %s' % url)
            logger.exception(e)
        finally:
            t.end = datetime.utcnow().replace(tzinfo=utc)
            t.save()

        # best effort pruning without erroring out. If deletion fails, we'll
        # have max_count+1 number of snapshots and it would be dealt with on
        # the next round.
        if (snap_created):
            delete(aw, share, stype, prefix, max_count)
    else:
        logger.debug('Cron scheduled task not executed because outside '
                     'time/day window ranges')
コード例 #10
0
    now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
    t = Task(task_def=tdo, state='started', start=now)
    try:
        name = ('%s_%s' % (meta['prefix'], datetime.utcnow().replace(
            tzinfo=utc).strftime(settings.SNAP_TS_FORMAT)))
        url = ('%sshares/%s/snapshots/%s' % (baseurl, meta['share'], name))
        data = {
            'snap_type': stype,
            'uvisible': meta['visible'],
        }
        headers = {'content-type': 'application/json'}
        api_call(url,
                 data=data,
                 calltype='post',
                 headers=headers,
                 save_error=False)
        logger.debug('created snapshot at %s' % url)
        t.state = 'finished'
    except Exception, e:
        logger.error('Failed to create snapshot at %s' % url)
        t.state = 'error'
        logger.exception(e)
    finally:
        t.end = datetime.utcnow().replace(tzinfo=utc)
        t.save()


if __name__ == '__main__':
    #takes one argument. taskdef object id.
    main()
コード例 #11
0
    def run(self):
        context = zmq.Context()
        sink_socket = context.socket(zmq.PUSH)
        sink_socket.connect('tcp://%s:%d' % settings.SPROBE_SINK)
        total_sleep = 0
        while True:
            if (os.getppid() != self.ppid):
                logger.info('ppids: %d, %d' % (os.getppid(), self.ppid))
                for w in self.workers.keys():
                    worker = self.workers[w]
                    if (worker.is_alive()):
                        #@todo: signal worker to cleanup and exit.
                        worker.task['queue'].put('stop')
                break

            for w in self.workers.keys():
                if (not self.workers[w].is_alive()):
                    to = Task.objects.get(id=w)
                    if (self.workers[w].exitcode == 0):
                        to.state = 'finished'
                    else:
                        to.state = 'error'
                    to.end = datetime.utcnow().replace(tzinfo=utc)
                    data = serialize("json", (to, ))
                    sink_socket.send_json(data)
                    del (self.workers[w])

            if (total_sleep == 60):
                for td in TaskDefinition.objects.all():
                    now = datetime.utcnow().replace(second=0,
                                                    microsecond=0,
                                                    tzinfo=utc)
                    if (self._schedulable(td, now)):
                        t = Task(name=td.name,
                                 json_meta=td.json_meta,
                                 state='scheduled',
                                 start=now)
                        data = serialize("json", (t, ))
                        sink_socket.send_json(data)
                total_sleep = 0

            for t in Task.objects.filter(state='scheduled'):
                worker = TaskWorker(t)
                self.workers[t.id] = worker
                worker.daemon = True
                worker.start()

                if (worker.is_alive()):
                    t.state = 'running'
                    data = serialize("json", (t, ))
                    sink_socket.send_json(data)
                else:
                    t.state = 'error'
                    t.end = datetime.utcnow().replace(tzinfo=utc)
                    data = serialize("json", (t, ))
                    sink_socket.send_json(data)
            time.sleep(1)
            total_sleep = total_sleep + 1

        sink_socket.close()
        context.term()
        logger.info('terminated context. exiting')
コード例 #12
0
            try:
                aw.api_call(url, data=None, calltype='delete', save_error=False)
                logger.debug('deleted old snapshot at %s' % url)
            except Exception, e:
                logger.error('Failed to delete old snapshot at %s' % url)
                logger.exception(e)
                return

    now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
    t = Task(task_def=tdo, state='started', start=now)
    try:
        name = ('%s_%s' % (meta['prefix'], datetime.now().strftime(settings.SNAP_TS_FORMAT)))
        url = ('shares/%s/snapshots/%s' % (meta['share'], name))
        data = {'snap_type': stype,
                'uvisible': meta['visible'], }
        headers = {'content-type': 'application/json'}
        aw.api_call(url, data=data, calltype='post', headers=headers, save_error=False)
        logger.debug('created snapshot at %s' % url)
        t.state = 'finished'
    except Exception, e:
        logger.error('Failed to create snapshot at %s' % url)
        t.state = 'error'
        logger.exception(e)
    finally:
        t.end = datetime.utcnow().replace(tzinfo=utc)
        t.save()

if __name__ == '__main__':
    #takes one argument. taskdef object id.
    main()
コード例 #13
0
    def run(self):
        running_tasks = {}
        baseurl = 'https://localhost/api/'
        while True:
            if (os.getppid() != self.ppid):
                break
            try:
                for td in TaskDefinition.objects.filter(enabled=True):
                    now = datetime.utcnow().replace(second=0,
                                                    microsecond=0,
                                                    tzinfo=utc)
                    if (self._schedulable(td, now)):
                        if (Task.objects.filter(
                                task_def=td,
                                state__regex=r'(scheduled|started|running)').exists()):
                            logger.debug('there is already a task scheduled or running for this definition')
                        else:
                            t = Task(task_def=td, state='scheduled',
                                     start=now)
                            t.save()

                for t in Task.objects.filter(state='scheduled'):
                    meta = json.loads(t.task_def.json_meta)
                    if (t.task_def.task_type == 'scrub'):
                        url = ('%spools/%s/scrub' % (baseurl, meta['pool']))
                        try:
                            api_call(url, data=None, calltype='post')
                            t.state = 'running'
                        except:
                            t.state = 'error'
                        finally:
                            t.save()
                            if (t.state == 'running'):
                                running_tasks[t.id] = True
                    elif (t.task_def.task_type == 'snapshot'):
                        stype = 'task_scheduler'
                        try:
                            self._validate_snap_meta(meta)
                            name = ('%s_%s' %
                                    (meta['prefix'],
                                     datetime.utcnow().replace(
                                         tzinfo=utc).strftime(
                                             settings.SNAP_TS_FORMAT)))
                            url = ('%sshares/%s/snapshots/%s' %
                                   (baseurl, meta['share'], name))
                            data = {'snap_type': stype,
                                    'uvisible': meta['visible'], }
                            headers = {'content-type': 'application/json'}
                            api_call(url, data=data, calltype='post',
                                     headers=headers)
                            t.state = 'finished'
                        except Exception, e:
                            t.state = 'error'
                            logger.exception(e)
                        finally:
                            t.end = datetime.utcnow().replace(tzinfo=utc)
                            t.save()

                        max_count = int(float(meta['max_count']))
                        share = Share.objects.get(name=meta['share'])
                        prefix = ('%s_' % meta['prefix'])
                        snapshots = Snapshot.objects.filter(
                            share=share, snap_type=stype,
                            name__startswith=prefix).order_by('-id')
                        if (len(snapshots) > max_count):
                            for snap in snapshots[max_count:]:
                                url = ('%s/shares/%s/snapshots/%s' %
                                       (baseurl, meta['share'], snap.name))
                                try:
                                    api_call(url, data=None, calltype='delete')
                                except Exception, e:
                                    logger.error('Failed to delete old '
                                                 'snapshot(%s)' % snap.name)
                                    logger.exception(e)
コード例 #14
0
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2] if len(sys.argv) > 2 else "*-*-*-*-*-*"
    if crontabwindow.crontab_range(cwindow):
        # Performance note: immediately check task execution time/day window
        # range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        stype = "task_scheduler"
        aw = APIWrapper()
        if tdo.task_type != "snapshot":
            logger.error("task_type(%s) is not snapshot." % tdo.task_type)
            return
        meta = json.loads(tdo.json_meta)
        validate_snap_meta(meta)

        # to keep backwards compatibility, allow for share to be either
        # name or id and migrate the metadata. To be removed in #1854
        try:
            share = Share.objects.get(id=meta["share"])
        except ValueError:
            share = Share.objects.get(name=meta["share"])
            meta["share"] = share.id
            tdo.json_meta = json.dumps(meta)
            tdo.save()

        max_count = int(float(meta["max_count"]))
        prefix = "%s_" % meta["prefix"]

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        t = Task(task_def=tdo, state="started", start=now)

        snap_created = False
        t.state = "error"
        try:
            name = "%s_%s" % (
                meta["prefix"],
                datetime.now().strftime(settings.SNAP_TS_FORMAT),
            )
            url = "shares/{}/snapshots/{}".format(share.id, name)
            # only create a new snap if there's no overflow situation. This
            # prevents runaway snapshot creation beyond max_count+1.
            if delete(aw, share, stype, prefix, max_count):
                data = {
                    "snap_type": stype,
                    "uvisible": meta["visible"],
                    "writable": meta["writable"],
                }
                headers = {"content-type": "application/json"}
                aw.api_call(url,
                            data=data,
                            calltype="post",
                            headers=headers,
                            save_error=False)
                logger.debug("created snapshot at %s" % url)
                t.state = "finished"
                snap_created = True
        except Exception as e:
            logger.error("Failed to create snapshot at %s" % url)
            logger.exception(e)
        finally:
            t.end = datetime.utcnow().replace(tzinfo=utc)
            t.save()

        # best effort pruning without erroring out. If deletion fails, we'll
        # have max_count+1 number of snapshots and it would be dealt with on
        # the next round.
        if snap_created:
            delete(aw, share, stype, prefix, max_count)
    else:
        logger.debug(
            "Cron scheduled task not executed because outside time/day window ranges"
        )