コード例 #1
0
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2] if len(sys.argv) > 2 else '*-*-*-*-*-*'
    if (crontabwindow.crontab_range(cwindow)): #Performance note: immediately check task execution time/day window range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        if (tdo.task_type != 'scrub'):
            return logger.error('task_type(%s) is not scrub.' % tdo.task_type)
        meta = json.loads(tdo.json_meta)
        aw = APIWrapper()

        if (Task.objects.filter(task_def=tdo).exists()):
            ll = Task.objects.filter(task_def=tdo).order_by('-id')[0]
            if (ll.state != 'error' and ll.state != 'finished'):
                logger.debug('Non terminal state(%s) for task(%d). Checking '
                             'again.' % (ll.state, tid))
                cur_state = update_state(ll, meta['pool'], aw)
                if (cur_state != 'error' and cur_state != 'finished'):
                    return logger.debug('Non terminal state(%s) for task(%d). '
                                        'A new task will not be run.' % (cur_state, tid))

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        t = Task(task_def=tdo, state='started', start=now)
        url = ('pools/%s/scrub' % meta['pool'])
        try:
            aw.api_call(url, data=None, calltype='post', save_error=False)
            logger.debug('Started scrub at %s' % url)
            t.state = 'running'
        except Exception, e:
            logger.error('Failed to start scrub at %s' % url)
            t.state = 'error'
            logger.exception(e)
        finally:
コード例 #2
0
def main():
    tid = int(sys.argv[1])
    tdo = TaskDefinition.objects.get(id=tid)
    stype = 'task_scheduler'
    aw = APIWrapper()
    if (tdo.task_type != 'snapshot'):
        logger.error('task_type(%s) is not snapshot.' % tdo.task_type)
        return
    meta = json.loads(tdo.json_meta)
    validate_snap_meta(meta)

    max_count = int(float(meta['max_count']))
    share = Share.objects.get(name=meta['share'])
    prefix = ('%s_' % meta['prefix'])
    snapshots = Snapshot.objects.filter(
        share=share, snap_type=stype, name__startswith=prefix).order_by('-id')
    if (len(snapshots) > max_count):
        for snap in snapshots[max_count:]:
            url = ('shares/%s/snapshots/%s' % (meta['share'], snap.name))
            try:
                aw.api_call(url,
                            data=None,
                            calltype='delete',
                            save_error=False)
                logger.debug('deleted old snapshot at %s' % url)
            except Exception, e:
                logger.error('Failed to delete old snapshot at %s' % url)
                logger.exception(e)
                return
コード例 #3
0
ファイル: snapshot.py プロジェクト: sirio81/rockstor-core
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2] if len(sys.argv) > 2 else '*-*-*-*-*-*'
    if (crontabwindow.crontab_range(cwindow)):
        # Performance note: immediately check task execution time/day window
        # range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        stype = 'task_scheduler'
        aw = APIWrapper()
        if (tdo.task_type != 'snapshot'):
            logger.error('task_type(%s) is not snapshot.' % tdo.task_type)
            return
        meta = json.loads(tdo.json_meta)
        validate_snap_meta(meta)
        share = Share.objects.get(id=meta['share'])
        max_count = int(float(meta['max_count']))
        prefix = ('%s_' % meta['prefix'])

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        t = Task(task_def=tdo, state='started', start=now)

        snap_created = False
        t.state = 'error'
        try:
            name = ('%s_%s' % (meta['prefix'], datetime.now().strftime(
                settings.SNAP_TS_FORMAT)))
            url = ('shares/{}/snapshots/{}'.format(share.id, name))
            # only create a new snap if there's no overflow situation. This
            # prevents runaway snapshot creation beyond max_count+1.
            if (delete(aw, share, stype, prefix, max_count)):
                data = {
                    'snap_type': stype,
                    'uvisible': meta['visible'],
                    'writable': meta['writable'],
                }
                headers = {'content-type': 'application/json'}
                aw.api_call(url,
                            data=data,
                            calltype='post',
                            headers=headers,
                            save_error=False)
                logger.debug('created snapshot at %s' % url)
                t.state = 'finished'
                snap_created = True
        except Exception as e:
            logger.error('Failed to create snapshot at %s' % url)
            logger.exception(e)
        finally:
            t.end = datetime.utcnow().replace(tzinfo=utc)
            t.save()

        # best effort pruning without erroring out. If deletion fails, we'll
        # have max_count+1 number of snapshots and it would be dealt with on
        # the next round.
        if (snap_created):
            delete(aw, share, stype, prefix, max_count)
    else:
        logger.debug('Cron scheduled task not executed because outside '
                     'time/day window ranges')
コード例 #4
0
    def on_connect(self, sid, environ):

        self.aw = APIWrapper()
        self.emit(
            'pincardwelcome', {
                'key': 'pincardManager:pincardwelcome',
                'data': 'Welcome to Rockstor PincardManager'
            })
コード例 #5
0
def main():

    try:
        device_scan()
    except Exception as e:
        print(
            "BTRFS device scan failed due to an exception. This indicates "
            "a serious problem. Aborting. Exception: %s" % e.__str__()
        )
        sys.exit(1)
    print("BTRFS device scan complete")

    # if the appliance is not setup, there's nothing more to do beyond
    # device scan
    setup = Setup.objects.first()
    if setup is None or setup.setup_user is False:
        print("Appliance is not yet setup.")
        return

    num_attempts = 0
    while True:
        try:
            aw = APIWrapper()
            time.sleep(2)
            aw.api_call("network")
            aw.api_call("commands/bootstrap", calltype="post")
            break
        except Exception as e:
            # Retry on every exception, primarily because of django-oauth
            # related code behaving unpredictably while setting
            # tokens. Retrying is a decent workaround for now(11302015).
            if num_attempts > 15:
                print(
                    "Max attempts(15) reached. Connection errors persist. "
                    "Failed to bootstrap. Error: %s" % e.__str__()
                )
                sys.exit(1)
            print(
                "Exception occured while bootstrapping. This could be "
                "because rockstor.service is still starting up. will "
                "wait 2 seconds and try again. Exception: %s" % e.__str__()
            )
            time.sleep(2)
            num_attempts += 1
    print("Bootstrapping complete")

    try:
        print("Running qgroup cleanup. %s" % QGROUP_CLEAN)
        run_command([QGROUP_CLEAN])
    except Exception as e:
        print("Exception while running %s: %s" % (QGROUP_CLEAN, e.__str__()))

    try:
        print("Running qgroup limit maxout. %s" % QGROUP_MAXOUT_LIMIT)
        run_command([QGROUP_MAXOUT_LIMIT])
    except Exception as e:
        print("Exception while running %s: %s" % (QGROUP_MAXOUT_LIMIT, e.__str__()))
コード例 #6
0
    def on_connect(self, sid, environ):

        self.aw = APIWrapper()
        self.emit(
            "pincardwelcome",
            {
                "key": "pincardManager:pincardwelcome",
                "data": "Welcome to Rockstor PincardManager",
            },
        )
コード例 #7
0
ファイル: pool_scrub.py プロジェクト: zboy13/rockstor-core
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2] if len(sys.argv) > 2 else "*-*-*-*-*-*"
    if crontabwindow.crontab_range(cwindow):
        # Performance note: immediately check task execution time/day window
        # range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        if tdo.task_type != "scrub":
            return logger.error("task_type(%s) is not scrub." % tdo.task_type)
        meta = json.loads(tdo.json_meta)
        aw = APIWrapper()

        if Task.objects.filter(task_def=tdo).exists():
            ll = Task.objects.filter(task_def=tdo).order_by("-id")[0]
            if ll.state not in TERMINAL_SCRUB_STATES:
                logger.debug("Non terminal state(%s) for task(%d). Checking "
                             "again." % (ll.state, tid))
                cur_state = update_state(ll, meta["pool"], aw)
                if cur_state not in TERMINAL_SCRUB_STATES:
                    return logger.debug("Non terminal state(%s) for task(%d). "
                                        "A new task will not be run." %
                                        (cur_state, tid))

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        t = Task(task_def=tdo, state="started", start=now)
        url = "pools/%s/scrub" % meta["pool"]
        try:
            aw.api_call(url, data=None, calltype="post", save_error=False)
            logger.debug("Started scrub at %s" % url)
            t.state = "running"
        except Exception as e:
            logger.error("Failed to start scrub at %s" % url)
            t.state = "error"
            logger.exception(e)
        finally:
            t.save()

        while True:
            cur_state = update_state(t, meta["pool"], aw)
            if cur_state in TERMINAL_SCRUB_STATES:
                logger.debug("task(%d) finished with state(%s)." %
                             (tid, cur_state))
                t.end = datetime.utcnow().replace(tzinfo=utc)
                t.save()
                break
            logger.debug("pending state(%s) for scrub task(%d). Will check "
                         "again in 60 seconds." % (cur_state, tid))
            time.sleep(60)
    else:
        logger.debug(
            "Cron scheduled task not executed because outside time/day window ranges"
        )
コード例 #8
0
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2] if len(sys.argv) > 2 else '*-*-*-*-*-*'
    if (crontabwindow.crontab_range(cwindow)):
        # Performance note: immediately check task execution time/day window
        # range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        if (tdo.task_type != 'scrub'):
            return logger.error('task_type(%s) is not scrub.' % tdo.task_type)
        meta = json.loads(tdo.json_meta)
        aw = APIWrapper()

        if (Task.objects.filter(task_def=tdo).exists()):
            ll = Task.objects.filter(task_def=tdo).order_by('-id')[0]
            if ll.state not in TERMINAL_SCRUB_STATES:
                logger.debug('Non terminal state(%s) for task(%d). Checking '
                             'again.' % (ll.state, tid))
                cur_state = update_state(ll, meta['pool'], aw)
                if cur_state not in TERMINAL_SCRUB_STATES:
                    return logger.debug('Non terminal state(%s) for task(%d). '
                                        'A new task will not be run.' %
                                        (cur_state, tid))

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        t = Task(task_def=tdo, state='started', start=now)
        url = ('pools/%s/scrub' % meta['pool'])
        try:
            aw.api_call(url, data=None, calltype='post', save_error=False)
            logger.debug('Started scrub at %s' % url)
            t.state = 'running'
        except Exception as e:
            logger.error('Failed to start scrub at %s' % url)
            t.state = 'error'
            logger.exception(e)
        finally:
            t.save()

        while True:
            cur_state = update_state(t, meta['pool'], aw)
            if cur_state in TERMINAL_SCRUB_STATES:
                logger.debug('task(%d) finished with state(%s).' %
                             (tid, cur_state))
                t.end = datetime.utcnow().replace(tzinfo=utc)
                t.save()
                break
            logger.debug('pending state(%s) for scrub task(%d). Will check '
                         'again in 60 seconds.' % (cur_state, tid))
            time.sleep(60)
    else:
        logger.debug('Cron scheduled task not executed because outside '
                     'time/day window ranges')
コード例 #9
0
    def on_connect(self, sid, environ):

        self.aw = APIWrapper()
        self.emit('connected', {
            'key': 'sysinfo:connected',
            'data': 'connected'
        })
        self.start = True
        self.spawn(self.update_storage_state, sid)
        self.spawn(self.update_check, sid)
        self.spawn(self.update_rockons, sid)
        self.spawn(self.send_kernel_info, sid)
        self.spawn(self.prune_logs, sid)
        self.spawn(self.send_localtime, sid)
        self.spawn(self.send_uptime, sid)
コード例 #10
0
    def on_connect(self, sid, environ):

        self.aw = APIWrapper()
        self.emit("connected", {"key": "sysinfo:connected", "data": "connected"})
        self.start = True
        self.spawn(self.update_storage_state, sid)
        self.spawn(self.update_check, sid)
        self.spawn(self.yum_updates, sid)
        self.spawn(self.send_kernel_info, sid)
        self.spawn(self.prune_logs, sid)
        self.spawn(self.send_localtime, sid)
        self.spawn(self.send_uptime, sid)
        self.spawn(self.send_distroinfo, sid)
        self.spawn(self.shutdown_status, sid)
        self.spawn(self.pool_degraded_status, sid)
        self.spawn(self.pool_dev_stats, sid)
コード例 #11
0
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2]
    if (
            crontabwindow.crontab_range(cwindow)
    ):  #Performance note: immediately check task execution time/day window range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        stype = 'task_scheduler'
        aw = APIWrapper()
        if (tdo.task_type != 'snapshot'):
            logger.error('task_type(%s) is not snapshot.' % tdo.task_type)
            return
        meta = json.loads(tdo.json_meta)
        validate_snap_meta(meta)
        share = Share.objects.get(name=meta['share'])
        max_count = int(float(meta['max_count']))
        prefix = ('%s_' % meta['prefix'])

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        t = Task(task_def=tdo, state='started', start=now)

        snap_created = False
        t.state = 'error'
        try:
            name = ('%s_%s' % (meta['prefix'], datetime.now().strftime(
                settings.SNAP_TS_FORMAT)))
            url = ('shares/%s/snapshots/%s' % (share.name, name))
            #only create a new snap if there's no overflow situation. This prevents
            #runaway snapshot creation beyond max_count+1.
            if (delete(aw, share, stype, prefix, max_count)):
                data = {
                    'snap_type': stype,
                    'uvisible': meta['visible'],
                }
                headers = {'content-type': 'application/json'}
                aw.api_call(url,
                            data=data,
                            calltype='post',
                            headers=headers,
                            save_error=False)
                logger.debug('created snapshot at %s' % url)
                t.state = 'finished'
                snap_created = True
        except Exception, e:
            logger.error('Failed to create snapshot at %s' % url)
            logger.exception(e)
        finally:
コード例 #12
0
 def get(self, *args, **kwargs):
     try:
         auuid = self.kwargs.get("auuid", None)
         ao = Appliance.objects.get(uuid=auuid)
         url = "https://%s:%s" % (ao.ip, ao.mgmt_port)
         aw = APIWrapper(client_id=ao.client_id,
                         client_secret=ao.client_secret,
                         url=url)
         response = aw.api_call("pools")
         res = [p["name"] for p in response["results"]]
         return Response(res)
     except Appliance.DoesNotExist:
         msg = "Remote appliance with the given uuid(%s) does not exist." % auuid
         handle_exception(Exception(msg), self.request)
     except Exception as e:
         msg = ("Failed to retrieve list of Pools on the remote "
                "appliance(%s). Make sure it is running and try again. "
                "Here is the exact error: %s" % (ao.ip, e.__str__()))
         handle_exception(Exception(msg), self.request)
コード例 #13
0
def main():
    if (len(sys.argv) == 1):
        sys.exit('Usage: %s <pool name>' % sys.argv[0])
    pname = sys.argv[1]
    sname = 'qgroup-test-share1'
    size = 1024 * 1024  #1 GiB
    aw = APIWrapper()
    res = create_share(aw, sname, pname, size)
    print('Share(%s) created. Size: %d' % (sname, size))

    fill_up_share(pname, sname)
    #expand Share and fillup. repeat 3 times
    for i in range(3):
        size += (1024 * 512)
        resize_share(aw, sname, size)
        fill_up_share(pname, sname)

    #remove random files and fillup. repeat 3 times.
    for i in range(3):
        #expand a bit so we can actually remove some files.
        size += (1024 * 128)
        resize_share(aw, sname, size)
        remove_random_files(pname, sname)
        fill_up_share(pname, sname)

    #remove random files, shrink the pool by half of free'd capacity, fill up. repeat 3 times
    for i in range(3):
        #expand a bit so we can actually remove files.
        size += (1024 * 128)
        resize_share(aw, sname, size)
        remove_random_files(pname, sname)
        so = Share.objects.get(name=sname)
        rusage, eusage = share_usage(so.pool, so.qgroup)
        free_space = so.size - rusage
        print('Free space on Share(%s): %d' % (sname, free_space))
        size -= int(free_space / 2)
        resize_share(aw, sname, size)
        fill_up_share(pname, sname)

    res2 = aw.api_call('shares/%s' % sname,
                       calltype='delete',
                       save_error=False)
    print('Share(%s) deleted.' % sname)
コード例 #14
0
 def get(self, *args, **kwargs):
     try:
         auuid = self.kwargs.get('auuid', None)
         ao = Appliance.objects.get(uuid=auuid)
         url = ('https://%s:%s' % (ao.ip, ao.mgmt_port))
         aw = APIWrapper(client_id=ao.client_id,
                         client_secret=ao.client_secret,
                         url=url)
         response = aw.api_call('pools')
         res = [p['name'] for p in response['results']]
         return Response(res)
     except Appliance.DoesNotExist:
         msg = ('Remote appliance with the given uuid(%s) does not exist.' %
                auuid)
         handle_exception(Exception(msg), self.request)
     except Exception, e:
         msg = ('Failed to retrieve list of Pools on the remote '
                'appliance(%s). Make sure it is running and try again. '
                'Here is the exact error: %s' % (ao.ip, e.__str__()))
         handle_exception(Exception(msg), self.request)
コード例 #15
0
    def on_connect(self, sid, environ):

        self.aw = APIWrapper()
        self.emit('connected', {
            'key': 'sysinfo:connected',
            'data': 'connected'
        })
        self.start = True
        self.spawn(self.update_storage_state, sid)
        self.spawn(self.update_check, sid)
        self.spawn(self.yum_updates, sid)
        self.spawn(self.update_rockons, sid)
        self.spawn(self.send_kernel_info, sid)
        self.spawn(self.prune_logs, sid)
        self.spawn(self.send_localtime, sid)
        self.spawn(self.send_uptime, sid)
        self.spawn(self.send_distroinfo, sid)
        self.spawn(self.shutdown_status, sid)
        self.spawn(self.pool_degraded_status, sid)
        self.spawn(self.pool_dev_stats, sid)
コード例 #16
0
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2] if len(sys.argv) > 2 else "*-*-*-*-*-*"
    if crontabwindow.crontab_range(cwindow):
        # Performance note: immediately check task execution time/day window
        # range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        stype = "task_scheduler"
        aw = APIWrapper()
        if tdo.task_type != "snapshot":
            logger.error("task_type(%s) is not snapshot." % tdo.task_type)
            return
        meta = json.loads(tdo.json_meta)
        validate_snap_meta(meta)

        # to keep backwards compatibility, allow for share to be either
        # name or id and migrate the metadata. To be removed in #1854
        try:
            share = Share.objects.get(id=meta["share"])
        except ValueError:
            share = Share.objects.get(name=meta["share"])
            meta["share"] = share.id
            tdo.json_meta = json.dumps(meta)
            tdo.save()

        max_count = int(float(meta["max_count"]))
        prefix = "%s_" % meta["prefix"]

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        t = Task(task_def=tdo, state="started", start=now)

        snap_created = False
        t.state = "error"
        try:
            name = "%s_%s" % (
                meta["prefix"],
                datetime.now().strftime(settings.SNAP_TS_FORMAT),
            )
            url = "shares/{}/snapshots/{}".format(share.id, name)
            # only create a new snap if there's no overflow situation. This
            # prevents runaway snapshot creation beyond max_count+1.
            if delete(aw, share, stype, prefix, max_count):
                data = {
                    "snap_type": stype,
                    "uvisible": meta["visible"],
                    "writable": meta["writable"],
                }
                headers = {"content-type": "application/json"}
                aw.api_call(url,
                            data=data,
                            calltype="post",
                            headers=headers,
                            save_error=False)
                logger.debug("created snapshot at %s" % url)
                t.state = "finished"
                snap_created = True
        except Exception as e:
            logger.error("Failed to create snapshot at %s" % url)
            logger.exception(e)
        finally:
            t.end = datetime.utcnow().replace(tzinfo=utc)
            t.save()

        # best effort pruning without erroring out. If deletion fails, we'll
        # have max_count+1 number of snapshots and it would be dealt with on
        # the next round.
        if snap_created:
            delete(aw, share, stype, prefix, max_count)
    else:
        logger.debug(
            "Cron scheduled task not executed because outside time/day window ranges"
        )
コード例 #17
0
from storageadmin.models import (RockOn, DContainer, DVolume, DPort,
                                 DCustomConfig, DContainerLink,
                                 ContainerOption, DContainerEnv,
                                 DContainerDevice, DContainerArgs)
from fs.btrfs import mount_share
from rockon_utils import container_status
import logging

DOCKER = '/usr/bin/docker'
ROCKON_URL = 'https://localhost/api/rockons'
DCMD = [DOCKER, 'run', ]
DCMD2 = list(DCMD) + ['-d', '--restart=unless-stopped', ]


logger = logging.getLogger(__name__)
aw = APIWrapper()


def docker_status():
    o, e, rc = service_status('docker')
    if (rc != 0):
        return False
    return True


def rockon_status(name):
    ro = RockOn.objects.get(name=name)
    if (globals().get('%s_status' % ro.name.lower()) is not None):
        return globals().get('%s_status' % ro.name.lower())(ro)
    co = DContainer.objects.filter(rockon=ro).order_by('-launch_order')[0]
    return container_status(co.name)
コード例 #18
0
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2] if len(sys.argv) > 2 else "*-*-*-*-*-*"
    if crontabwindow.crontab_range(cwindow):
        # Performance note: immediately check task execution time/day window
        # range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        aw = APIWrapper()
        if tdo.task_type not in ["reboot", "shutdown", "suspend"]:
            logger.error(
                "task_type(%s) is not a system reboot, "
                "shutdown or suspend." % tdo.task_type
            )
            return
        meta = json.loads(tdo.json_meta)
        validate_shutdown_meta(meta)

        if not run_conditions_met(meta):
            logger.debug(
                "Cron scheduled task not executed because the run conditions have not been met"
            )
            return

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        schedule = now + timedelta(minutes=3)
        t = Task(task_def=tdo, state="scheduled", start=now, end=schedule)

        try:
            # set default command url before checking if it's a shutdown
            # and if we have an rtc wake up
            url = "commands/%s" % tdo.task_type

            # if task_type is shutdown and rtc wake up true
            # parse crontab hour & minute vs rtc hour & minute to state
            # if wake will occur same day or next day, finally update
            # command url adding wake up epoch time
            if tdo.task_type in ["shutdown", "suspend"] and meta["wakeup"]:
                crontab_fields = tdo.crontab.split()
                crontab_time = int(crontab_fields[1]) * 60 + int(crontab_fields[0])
                wakeup_time = meta["rtc_hour"] * 60 + meta["rtc_minute"]
                # rtc wake up requires UTC epoch, but users on WebUI set time
                # thinking to localtime, so first we set wake up time,
                # update it if wake up is on next day, finally move it to UTC
                # and get its epoch
                epoch = datetime.now().replace(
                    hour=int(meta["rtc_hour"]),
                    minute=int(meta["rtc_minute"]),
                    second=0,
                    microsecond=0,
                )
                # if wake up < crontab time wake up will run next day
                if crontab_time > wakeup_time:
                    epoch += timedelta(days=1)

                epoch = epoch.strftime("%s")
                url = "%s/%s" % (url, epoch)

            aw.api_call(url, data=None, calltype="post", save_error=False)
            logger.debug("System %s scheduled" % tdo.task_type)
            t.state = "finished"

        except Exception as e:
            t.state = "failed"
            logger.error("Failed to schedule system %s" % tdo.task_type)
            logger.exception(e)

        finally:
            # t.end = datetime.utcnow().replace(tzinfo=utc)
            t.save()

    else:
        logger.debug(
            "Cron scheduled task not executed because outside time/day window ranges"
        )
コード例 #19
0
 def initialize(self):
     self.aw = APIWrapper()
コード例 #20
0
def main():
    tid = int(sys.argv[1])
    cwindow = sys.argv[2] if len(sys.argv) > 2 else '*-*-*-*-*-*'
    if (crontabwindow.crontab_range(cwindow)):
        # Performance note: immediately check task execution time/day window
        # range to avoid other calls
        tdo = TaskDefinition.objects.get(id=tid)
        aw = APIWrapper()
        if (tdo.task_type not in ['reboot', 'shutdown', 'suspend']):
            logger.error('task_type(%s) is not a system reboot, '
                         'shutdown or suspend.' % tdo.task_type)
            return
        meta = json.loads(tdo.json_meta)
        validate_shutdown_meta(meta)

        now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
        schedule = now + timedelta(minutes=3)
        t = Task(task_def=tdo, state='scheduled', start=now, end=schedule)

        try:
            # set default command url before checking if it's a shutdown
            # and if we have an rtc wake up
            url = ('commands/%s' % tdo.task_type)

            # if task_type is shutdown and rtc wake up true
            # parse crontab hour & minute vs rtc hour & minute to state
            # if wake will occur same day or next day, finally update
            # command url adding wake up epoch time
            if (tdo.task_type in ['shutdown', 'suspend'] and meta['wakeup']):
                crontab_fields = tdo.crontab.split()
                crontab_time = (int(crontab_fields[1]) * 60 +
                                int(crontab_fields[0]))
                wakeup_time = meta['rtc_hour'] * 60 + meta['rtc_minute']
                # rtc wake up requires UTC epoch, but users on WebUI set time
                # thinking to localtime, so first we set wake up time,
                # update it if wake up is on next day, finally move it to UTC
                # and get its epoch
                epoch = datetime.now().replace(hour=int(meta['rtc_hour']),
                                               minute=int(meta['rtc_minute']),
                                               second=0,
                                               microsecond=0)
                # if wake up < crontab time wake up will run next day
                if (crontab_time > wakeup_time):
                    epoch += timedelta(days=1)

                epoch = epoch.strftime('%s')
                url = ('%s/%s' % (url, epoch))

            aw.api_call(url, data=None, calltype='post', save_error=False)
            logger.debug('System %s scheduled' % tdo.task_type)
            t.state = 'finished'

        except Exception as e:
            t.state = 'failed'
            logger.error('Failed to schedule system %s' % tdo.task_type)
            logger.exception(e)

        finally:
            # t.end = datetime.utcnow().replace(tzinfo=utc)
            t.save()

    else:
        logger.debug('Cron scheduled task not executed because outside '
                     'time/day window ranges')