示例#1
0
    def testQueue_Task(self):
        def isnotqueued(result):
            self.assertEqual(result.id, None)
            self.assertEqual(result.uuid, None)
            self.assertEqual(len(result.errors.keys()) > 0, True)

        def isqueued(result):
            self.assertNotEqual(result.id, None)
            self.assertNotEqual(result.uuid, None)
            self.assertEqual(len(result.errors.keys()), 0)

        s = Scheduler(self.db)
        fname = 'foo'
        watch = s.queue_task(fname, task_name='watch')
        # queuing a task returns id, errors, uuid
        self.assertEqual(set(watch.keys()), set(['id', 'uuid', 'errors']))
        # queueing nothing isn't allowed
        self.assertRaises(TypeError, s.queue_task, *[])
        # passing pargs and pvars wrongly
        # # pargs as dict
        isnotqueued(s.queue_task(fname, dict(a=1), dict(b=1)))
        # # pvars as list
        isnotqueued(s.queue_task(fname, ['foo', 'bar'], ['foo', 'bar']))
        # two tasks with the same uuid won't be there
        isqueued(s.queue_task(fname, uuid='a'))
        isnotqueued(s.queue_task(fname, uuid='a'))
示例#2
0
    def testNoReturn_and_Timeout_and_Progress(self):
        s = Scheduler(self.db)
        noret1 = s.queue_task('demo5')
        noret2 = s.queue_task('demo3')
        timeout1 = s.queue_task('demo4', timeout=5)
        timeout2 = s.queue_task('demo4')
        progress = s.queue_task('demo6', sync_output=2)
        self.db.commit()
        self.writefunction(r"""
def demo3():
    time.sleep(15)
    print(1/0)
    return None

def demo4():
    time.sleep(15)
    print("I'm printing something")
    return dict(a=1, b=2)

def demo5():
    time.sleep(15)
    print("I'm printing something")
    rtn = dict(a=1, b=2)

def demo6():
    time.sleep(5)
    print('50%')
    time.sleep(5)
    print('!clear!100%')
    return 1
""")
        ret = self.exec_sched()
        self.assertEqual(ret, 0)
        # noreturn check
        task1, task_run1 = self.fetch_results(s, noret1)
        task2, task_run2 = self.fetch_results(s, noret2)
        res = [
            ("tasks no_returns1 completed", task1.status == 'COMPLETED'),
            ("tasks no_returns2 failed", task2.status == 'FAILED'),
            ("no_returns1 doesn't have a scheduler_run record", len(task_run1) == 0),
            ("no_returns2 has a scheduler_run record FAILED", (len(task_run2) == 1 and task_run2[0].status == 'FAILED')),
        ]
        self.exec_asserts(res, 'NO_RETURN')

        # timeout check
        task1 = s.task_status(timeout1.id, output=True)
        task2 = s.task_status(timeout2.id, output=True)
        res = [
            ("tasks timeouts1 timeoutted", task1.scheduler_task.status == 'TIMEOUT'),
            ("tasks timeouts2 completed", task2.scheduler_task.status == 'COMPLETED')
        ]
        self.exec_asserts(res, 'TIMEOUT')

        # progress check
        task1 = s.task_status(progress.id, output=True)
        res = [
            ("tasks percentages completed", task1.scheduler_task.status == 'COMPLETED'),
            ("output contains only 100%", task1.scheduler_run.run_output.strip() == "100%")
        ]
        self.exec_asserts(res, 'PROGRESS')
示例#3
0
    def testQueue_Task(self):

        def isnotqueued(result):
            self.assertEqual(result.id, None)
            self.assertEqual(result.uuid, None)
            self.assertEqual(len(list(result.errors.keys())) > 0, True)

        def isqueued(result):
            self.assertNotEqual(result.id, None)
            self.assertNotEqual(result.uuid, None)
            self.assertEqual(len(list(result.errors.keys())), 0)

        s = Scheduler(self.db)
        fname = 'foo'
        watch = s.queue_task(fname, task_name='watch')
        # queuing a task returns id, errors, uuid
        self.assertEqual(set(watch.keys()), set(['id', 'uuid', 'errors']))
        # queueing nothing isn't allowed
        self.assertRaises(TypeError, s.queue_task, *[])
        # passing pargs and pvars wrongly
        # # pargs as dict
        isnotqueued(s.queue_task(fname, dict(a=1), dict(b=1)))
        # # pvars as list
        isnotqueued(s.queue_task(fname, ['foo', 'bar'], ['foo', 'bar']))
        # two tasks with the same uuid won't be there
        isqueued(s.queue_task(fname, uuid='a'))
        isnotqueued(s.queue_task(fname, uuid='a'))
示例#4
0
    def testRegressions(self):
        s = Scheduler(self.db)
        huge_result = s.queue_task('demo10', retry_failed=1, period=1)
        issue_1485 = s.queue_task('issue_1485')
        termination = s.queue_task('termination')
        self.db.commit()
        self.writefunction(r"""
def demo10():
    res = 'a' * 99999
    return dict(res=res)

def issue_1485():
    return response.render('issue_1485.html', dict(variable='abc'))
""")
        self.writeview(r"""<span>{{=variable}}</span>""", 'issue_1485.html')
        ret = self.exec_sched()
        # process finished just fine
        self.assertEqual(ret, 0)
        # huge_result - checks
        task_huge = s.task_status(huge_result.id, output=True)
        res = [
            ("task status completed", task_huge.scheduler_task.status == 'COMPLETED'),
            ("task times_run is 1", task_huge.scheduler_task.times_run == 1),
            ("result is the correct one", task_huge.result == dict(res='a' * 99999))
        ]
        self.exec_asserts(res, 'HUGE_RESULT')

        task_issue_1485 = s.task_status(issue_1485.id, output=True)
        res = [
            ("task status completed", task_issue_1485.scheduler_task.status == 'COMPLETED'),
            ("task times_run is 1", task_issue_1485.scheduler_task.times_run == 1),
            ("result is the correct one", task_issue_1485.result == '<span>abc</span>')
        ]
        self.exec_asserts(res, 'issue_1485')
示例#5
0
    def testRegressions(self):
        s = Scheduler(self.db)
        huge_result = s.queue_task('demo10', retry_failed=1, period=1)
        issue_1485 = s.queue_task('issue_1485')
        termination = s.queue_task('termination')
        self.db.commit()
        self.writefunction(r"""
def demo10():
    res = 'a' * 99999
    return dict(res=res)

def issue_1485():
    return response.render('issue_1485.html', dict(variable='abc'))
""")
        self.writeview(r"""<span>{{=variable}}</span>""", 'issue_1485.html')
        ret = self.exec_sched()
        # process finished just fine
        self.assertEqual(ret, 0)
        # huge_result - checks
        task_huge = s.task_status(huge_result.id, output=True)
        res = [
            ("task status completed", task_huge.scheduler_task.status == 'COMPLETED'),
            ("task times_run is 1", task_huge.scheduler_task.times_run == 1),
            ("result is the correct one", task_huge.result == dict(res='a' * 99999))
        ]
        self.exec_asserts(res, 'HUGE_RESULT')

        task_issue_1485 = s.task_status(issue_1485.id, output=True)
        res = [
            ("task status completed", task_issue_1485.scheduler_task.status == 'COMPLETED'),
            ("task times_run is 1", task_issue_1485.scheduler_task.times_run == 1),
            ("result is the correct one", task_issue_1485.result == '<span>abc</span>')
        ]
        self.exec_asserts(res, 'issue_1485')
示例#6
0
    def testRetryFailed(self):
        s = Scheduler(self.db)
        failed = s.queue_task('demo2', retry_failed=1, period=1)
        failed_consecutive = s.queue_task('demo8',
                                          retry_failed=2,
                                          repeats=2,
                                          period=1)
        self.db.commit()
        self.writefunction(r"""
def demo2():
    1/0

def demo8():
    placeholder = os.path.join(request.folder, 'private', 'demo8.pholder')
    with open(placeholder, 'a') as g:
        g.write('\nplaceholder for demo8 created')
    num_of_lines = 0
    with open(placeholder) as f:
        num_of_lines = len([a for a in f.read().split('\n') if a])
    print('number of lines', num_of_lines)
    if num_of_lines <= 2:
       1/0
    else:
        os.unlink(placeholder)
    return 1
""")
        ret = self.exec_sched()
        # process finished just fine
        self.assertEqual(ret, 0)
        # failed - checks
        task, task_run = self.fetch_results(s, failed)
        res = [("task status failed", task.status == 'FAILED'),
               ("task times_run is 0", task.times_run == 0),
               ("task times_failed is 2", task.times_failed == 2),
               ("task ran 2 times only", len(task_run) == 2),
               ("scheduler_run records are FAILED",
                (task_run[0].status == task_run[1].status == 'FAILED')),
               ("period is respected",
                (task_run[1].start_time > task_run[0].start_time +
                 datetime.timedelta(seconds=task.period)))]
        self.exec_asserts(res, 'FAILED')

        # failed consecutive - checks
        task, task_run = self.fetch_results(s, failed_consecutive)
        res = [
            ("task status completed", task.status == 'COMPLETED'),
            ("task times_run is 2", task.times_run == 2),
            ("task times_failed is 0", task.times_failed == 0),
            ("task ran 6 times", len(task_run) == 6),
            ("scheduler_run records for COMPLETED is 2",
             len([run.status for run in task_run
                  if run.status == 'COMPLETED']) == 2),
            ("scheduler_run records for FAILED is 4",
             len([run.status for run in task_run
                  if run.status == 'FAILED']) == 4),
        ]
        self.exec_asserts(res, 'FAILED_CONSECUTIVE')
示例#7
0
    def testRetryFailed(self):
        s = Scheduler(self.db)
        failed = s.queue_task('demo2', retry_failed=1, period=5)
        failed_consecutive = s.queue_task('demo8', retry_failed=2, repeats=2, period=5)
        self.db.commit()
        self.writefunction(r"""

def demo2():
    1/0

def demo8():
    placeholder = os.path.join(request.folder, 'private', 'demo8.pholder')
    with open(placeholder, 'a') as g:
        g.write('\nplaceholder for demo8 created')
    num_of_lines = 0
    with open(placeholder) as f:
        num_of_lines = len([a for a in f.read().split('\n') if a])
    print 'number of lines', num_of_lines
    if num_of_lines <= 2:
       1/0
    else:
        os.unlink(placeholder)
    return 1
""")
        ret = self.exec_sched()
        # process finished just fine
        self.assertEqual(ret, 0)
        # failed - checks
        info = s.task_status(failed.id)
        task_runs = self.db(self.db.scheduler_run.task_id == info.id).select()
        res = [
            ("task status failed", info.status == 'FAILED'),
            ("task times_run is 0", info.times_run == 0),
            ("task times_failed is 2", info.times_failed == 2),
            ("task ran 2 times only", len(task_runs) == 2),
            ("scheduler_run records are FAILED", (task_runs[0].status == task_runs[1].status == 'FAILED')),
            ("period is respected", (task_runs[1].start_time > task_runs[0].start_time + datetime.timedelta(seconds=info.period)))
        ]
        for a in res:
            self.assertEqual(a[1], True, msg=a[0])

        # failed consecutive - checks
        info = s.task_status(failed_consecutive.id)
        task_runs = self.db(self.db.scheduler_run.task_id == info.id).select()
        res = [
            ("task status completed", info.status == 'COMPLETED'),
            ("task times_run is 2", info.times_run == 2),
            ("task times_failed is 0", info.times_failed == 0),
            ("task ran 6 times", len(task_runs) == 6),
            ("scheduler_run records for COMPLETED is 2", len([run.status for run in task_runs if run.status == 'COMPLETED']) == 2),
            ("scheduler_run records for FAILED is 4", len([run.status for run in task_runs if run.status == 'FAILED']) == 4),
        ]
        for a in res:
            self.assertEqual(a[1], True, msg=a[0])
示例#8
0
    def testRepeats_and_Expired_and_Prio(self):
        s = Scheduler(self.db)
        repeats = s.queue_task('demo1', ['a', 'b'],
                               dict(c=1, d=2),
                               repeats=2,
                               period=5)
        a_while_ago = datetime.datetime.now() - datetime.timedelta(seconds=60)
        expired = s.queue_task('demo4', stop_time=a_while_ago)
        prio1 = s.queue_task('demo1', ['scheduled_first'])
        prio2 = s.queue_task('demo1', ['scheduled_second'],
                             next_run_time=a_while_ago)
        self.db.commit()
        self.writefunction(r"""
def demo1(*args,**vars):
    print('you passed args=%s and vars=%s' % (args, vars))
    return args[0]

def demo4():
    time.sleep(15)
    print("I'm printing something")
    return dict(a=1, b=2)
""")
        ret = self.exec_sched()
        self.assertEqual(ret, 0)
        # repeats check
        task, task_run = self.fetch_results(s, repeats)
        res = [("task status completed", task.status == 'COMPLETED'),
               ("task times_run is 2", task.times_run == 2),
               ("task ran 2 times only", len(task_run) == 2),
               ("scheduler_run records are COMPLETED ",
                (task_run[0].status == task_run[1].status == 'COMPLETED')),
               ("period is respected",
                (task_run[1].start_time > task_run[0].start_time +
                 datetime.timedelta(seconds=task.period)))]
        self.exec_asserts(res, 'REPEATS')

        # expired check
        task, task_run = self.fetch_results(s, expired)
        res = [("task status expired", task.status == 'EXPIRED'),
               ("task times_run is 0", task.times_run == 0),
               ("task didn't run at all", len(task_run) == 0)]
        self.exec_asserts(res, 'EXPIRATION')

        # prio check
        task1 = s.task_status(prio1.id, output=True)
        task2 = s.task_status(prio2.id, output=True)
        res = [("tasks status completed", task1.scheduler_task.status ==
                task2.scheduler_task.status == 'COMPLETED'),
               ("priority2 was executed before priority1",
                task1.scheduler_run.id > task2.scheduler_run.id)]
        self.exec_asserts(res, 'PRIORITY')
示例#9
0
    def testDrift_and_env_and_immediate(self):
        s = Scheduler(self.db)
        immediate = s.queue_task('demo1', ['a', 'b'],
                                 dict(c=1, d=2),
                                 immediate=True)
        env = s.queue_task('demo7')
        drift = s.queue_task('demo1', ['a', 'b'],
                             dict(c=1, d=2),
                             period=93,
                             prevent_drift=True)
        termination = s.queue_task('termination')
        self.db.commit()
        self.writefunction(r"""
def demo1(*args,**vars):
    print('you passed args=%s and vars=%s' % (args, vars))
    return args[0]
import random
def demo7():
    time.sleep(random.randint(1,5))
    print(W2P_TASK, request.now)
    return W2P_TASK.id, W2P_TASK.uuid, W2P_TASK.run_id
""")
        ret = self.exec_sched()
        self.assertEqual(ret, 0)
        # immediate check, can only check that nothing breaks
        task1 = s.task_status(immediate.id)
        res = [
            ("tasks status completed", task1.status == 'COMPLETED'),
        ]
        self.exec_asserts(res, 'IMMEDIATE')

        # drift check
        task, task_run = self.fetch_results(s, drift)
        res = [("task status completed", task.status == 'COMPLETED'),
               ("next_run_time is exactly start_time + period",
                (task.next_run_time == task.start_time +
                 datetime.timedelta(seconds=task.period)))]
        self.exec_asserts(res, 'DRIFT')

        # env check
        task1 = s.task_status(env.id, output=True)
        res = [
            ("task %s returned W2P_TASK correctly" % (task1.scheduler_task.id),
             task1.result == [
                 task1.scheduler_task.id, task1.scheduler_task.uuid,
                 task1.scheduler_run.id
             ]),
        ]
        self.exec_asserts(res, 'ENV')
示例#10
0
    def testRetryFailed(self):
        s = Scheduler(self.db)
        failed = s.queue_task('demo2', retry_failed=1, period=1)
        failed_consecutive = s.queue_task('demo8', retry_failed=2, repeats=2, period=1)
        self.db.commit()
        self.writefunction(r"""
def demo2():
    1/0

def demo8():
    placeholder = os.path.join(request.folder, 'private', 'demo8.pholder')
    with open(placeholder, 'a') as g:
        g.write('\nplaceholder for demo8 created')
    num_of_lines = 0
    with open(placeholder) as f:
        num_of_lines = len([a for a in f.read().split('\n') if a])
    print('number of lines', num_of_lines)
    if num_of_lines <= 2:
       1/0
    else:
        os.unlink(placeholder)
    return 1
""")
        ret = self.exec_sched()
        # process finished just fine
        self.assertEqual(ret, 0)
        # failed - checks
        task, task_run = self.fetch_results(s, failed)
        res = [
            ("task status failed", task.status == 'FAILED'),
            ("task times_run is 0", task.times_run == 0),
            ("task times_failed is 2", task.times_failed == 2),
            ("task ran 2 times only", len(task_run) == 2),
            ("scheduler_run records are FAILED", (task_run[0].status == task_run[1].status == 'FAILED')),
            ("period is respected", (task_run[1].start_time > task_run[0].start_time + datetime.timedelta(seconds=task.period)))
        ]
        self.exec_asserts(res, 'FAILED')

        # failed consecutive - checks
        task, task_run = self.fetch_results(s, failed_consecutive)
        res = [
            ("task status completed", task.status == 'COMPLETED'),
            ("task times_run is 2", task.times_run == 2),
            ("task times_failed is 0", task.times_failed == 0),
            ("task ran 6 times", len(task_run) == 6),
            ("scheduler_run records for COMPLETED is 2", len([run.status for run in task_run if run.status == 'COMPLETED']) == 2),
            ("scheduler_run records for FAILED is 4", len([run.status for run in task_run if run.status == 'FAILED']) == 4),
        ]
        self.exec_asserts(res, 'FAILED_CONSECUTIVE')
示例#11
0
    def testRepeats_and_Expired_and_Prio(self):
        s = Scheduler(self.db)
        repeats = s.queue_task('demo1', ['a', 'b'], dict(c=1, d=2), repeats=2, period=5)
        a_while_ago = datetime.datetime.now() - datetime.timedelta(seconds=60)
        expired = s.queue_task('demo4', stop_time=a_while_ago)
        prio1 = s.queue_task('demo1', ['scheduled_first'])
        prio2 = s.queue_task('demo1', ['scheduled_second'], next_run_time=a_while_ago)
        self.db.commit()
        self.writefunction(r"""
def demo1(*args,**vars):
    print('you passed args=%s and vars=%s' % (args, vars))
    return args[0]

def demo4():
    time.sleep(15)
    print("I'm printing something")
    return dict(a=1, b=2)
""")
        ret = self.exec_sched()
        self.assertEqual(ret, 0)
        # repeats check
        task, task_run = self.fetch_results(s, repeats)
        res = [
            ("task status completed", task.status == 'COMPLETED'),
            ("task times_run is 2", task.times_run == 2),
            ("task ran 2 times only", len(task_run) == 2),
            ("scheduler_run records are COMPLETED ", (task_run[0].status == task_run[1].status == 'COMPLETED')),
            ("period is respected", (task_run[1].start_time > task_run[0].start_time + datetime.timedelta(seconds=task.period)))
        ]
        self.exec_asserts(res, 'REPEATS')

        # expired check
        task, task_run = self.fetch_results(s, expired)
        res = [
            ("task status expired", task.status == 'EXPIRED'),
            ("task times_run is 0", task.times_run == 0),
            ("task didn't run at all", len(task_run) == 0)
        ]
        self.exec_asserts(res, 'EXPIRATION')

        # prio check
        task1 = s.task_status(prio1.id, output=True)
        task2 = s.task_status(prio2.id, output=True)
        res = [
            ("tasks status completed", task1.scheduler_task.status == task2.scheduler_task.status == 'COMPLETED'),
            ("priority2 was executed before priority1" , task1.scheduler_run.id > task2.scheduler_run.id)
        ]
        self.exec_asserts(res, 'PRIORITY')
示例#12
0
    def testJobGraph(self):
        s = Scheduler(self.db)
        myjob = JobGraph(self.db, 'job_1')
        fname = 'foo'
        # We have a few items to wear, and there's an "order" to respect...
        # Items are: watch, jacket, shirt, tie, pants, undershorts, belt, shoes, socks
        # Now, we can't put on the tie without wearing the shirt first, etc...
        watch = s.queue_task(fname, task_name='watch')
        jacket = s.queue_task(fname, task_name='jacket')
        shirt = s.queue_task(fname, task_name='shirt')
        tie = s.queue_task(fname, task_name='tie')
        pants = s.queue_task(fname, task_name='pants')
        undershorts = s.queue_task(fname, task_name='undershorts')
        belt = s.queue_task(fname, task_name='belt')
        shoes = s.queue_task(fname, task_name='shoes')
        socks = s.queue_task(fname, task_name='socks')
        # before the tie, comes the shirt
        myjob.add_deps(tie.id, shirt.id)
        # before the belt too comes the shirt
        myjob.add_deps(belt.id, shirt.id)
        # before the jacket, comes the tie
        myjob.add_deps(jacket.id, tie.id)
        # before the belt, come the pants
        myjob.add_deps(belt.id, pants.id)
        # before the shoes, comes the pants
        myjob.add_deps(shoes.id, pants.id)
        # before the pants, comes the undershorts
        myjob.add_deps(pants.id, undershorts.id)
        # before the shoes, comes the undershorts
        myjob.add_deps(shoes.id, undershorts.id)
        # before the jacket, comes the belt
        myjob.add_deps(jacket.id, belt.id)
        # before the shoes, comes the socks
        myjob.add_deps(shoes.id, socks.id)

        ## results in the following topological sort
        # 9,3,6 --> 4,5 --> 8,7 --> 2
        # socks, shirt, undershorts
        # tie, pants
        # shoes, belt
        # jacket
        known_toposort = [
            set([socks.id, shirt.id, undershorts.id]),
            set([tie.id, pants.id]),
            set([shoes.id, belt.id]),
            set([jacket.id])
        ]
        toposort = myjob.validate('job_1')
        self.assertEqual(toposort, known_toposort)
        # add a cyclic dependency, jacket to undershorts
        myjob.add_deps(undershorts.id, jacket.id)
        # no exceptions raised, but result None
        self.assertEqual(myjob.validate('job_1'), None)
示例#13
0
    def testJobGraph(self):
        s = Scheduler(self.db)
        myjob = JobGraph(self.db, 'job_1')
        fname = 'foo'
        # We have a few items to wear, and there's an "order" to respect...
        # Items are: watch, jacket, shirt, tie, pants, undershorts, belt, shoes, socks
        # Now, we can't put on the tie without wearing the shirt first, etc...
        watch = s.queue_task(fname, task_name='watch')
        jacket = s.queue_task(fname, task_name='jacket')
        shirt = s.queue_task(fname, task_name='shirt')
        tie = s.queue_task(fname, task_name='tie')
        pants = s.queue_task(fname, task_name='pants')
        undershorts = s.queue_task(fname, task_name='undershorts')
        belt = s.queue_task(fname, task_name='belt')
        shoes = s.queue_task(fname, task_name='shoes')
        socks = s.queue_task(fname, task_name='socks')
        # before the tie, comes the shirt
        myjob.add_deps(tie.id, shirt.id)
        # before the belt too comes the shirt
        myjob.add_deps(belt.id, shirt.id)
        # before the jacket, comes the tie
        myjob.add_deps(jacket.id, tie.id)
        # before the belt, come the pants
        myjob.add_deps(belt.id, pants.id)
        # before the shoes, comes the pants
        myjob.add_deps(shoes.id, pants.id)
        # before the pants, comes the undershorts
        myjob.add_deps(pants.id, undershorts.id)
        # before the shoes, comes the undershorts
        myjob.add_deps(shoes.id, undershorts.id)
        # before the jacket, comes the belt
        myjob.add_deps(jacket.id, belt.id)
        # before the shoes, comes the socks
        myjob.add_deps(shoes.id, socks.id)

        ## results in the following topological sort
        # 9,3,6 --> 4,5 --> 8,7 --> 2
        # socks, shirt, undershorts
        # tie, pants
        # shoes, belt
        # jacket
        known_toposort = [
            set([socks.id, shirt.id, undershorts.id]),
            set([tie.id, pants.id]),
            set([shoes.id, belt.id]),
            set([jacket.id])
        ]
        toposort = myjob.validate('job_1')
        self.assertEqual(toposort, known_toposort)
        # add a cyclic dependency, jacket to undershorts
        myjob.add_deps(undershorts.id, jacket.id)
        # no exceptions raised, but result None
        self.assertEqual(myjob.validate('job_1'), None)
示例#14
0
    def testDrift_and_env_and_immediate(self):
        s = Scheduler(self.db)
        immediate = s.queue_task('demo1', ['a', 'b'], dict(c=1, d=2), immediate=True)
        env = s.queue_task('demo7')
        drift = s.queue_task('demo1', ['a', 'b'], dict(c=1, d=2), period=93, prevent_drift=True)
        termination = s.queue_task('termination')
        self.db.commit()
        self.writefunction(r"""
def demo1(*args,**vars):
    print('you passed args=%s and vars=%s' % (args, vars))
    return args[0]
import random
def demo7():
    time.sleep(random.randint(1,5))
    print(W2P_TASK, request.now)
    return W2P_TASK.id, W2P_TASK.uuid, W2P_TASK.run_id
""")
        ret = self.exec_sched()
        self.assertEqual(ret, 0)
        # immediate check, can only check that nothing breaks
        task1 = s.task_status(immediate.id)
        res = [
            ("tasks status completed", task1.status == 'COMPLETED'),
        ]
        self.exec_asserts(res, 'IMMEDIATE')

        # drift check
        task, task_run = self.fetch_results(s, drift)
        res = [
            ("task status completed", task.status == 'COMPLETED'),
            ("next_run_time is exactly start_time + period", (task.next_run_time == task.start_time + datetime.timedelta(seconds=task.period)))
        ]
        self.exec_asserts(res, 'DRIFT')

        # env check
        task1 = s.task_status(env.id, output=True)
        res = [
            ("task %s returned W2P_TASK correctly" % (task1.scheduler_task.id),  task1.result == [task1.scheduler_task.id, task1.scheduler_task.uuid, task1.scheduler_run.id]),
        ]
        self.exec_asserts(res, 'ENV')
    def testBasic(self):
        s = Scheduler(self.db)
        foo = s.queue_task('foo')
        self.db.commit()
        self.writefunction(r"""
def foo():
    return 'a'
""")
        ret = self.exec_sched()
        # process finished just fine
        self.assertEqual(ret, 0)
        info = s.task_status(foo.id, output=True)
        self.assertEqual(info.result, 'a')
示例#16
0
    def testBasic(self):
        s = Scheduler(self.db)
        foo = s.queue_task('foo')
        self.db.commit()
        self.writefunction(r"""
def foo():
    return 'a'
""")
        ret = self.exec_sched()
        # process finished just fine
        self.assertEqual(ret, 0)
        info = s.task_status(foo.id, output=True)
        self.assertEqual(info.result, 'a')
示例#17
0
 def testJobGraphDifferentJobs(self):
     s = Scheduler(self.db)
     myjob1 = JobGraph(self.db, 'job_1')
     myjob2 = JobGraph(self.db, 'job_2')
     fname = 'foo'
     # We have a few items to wear, and there's an "order" to respect...
     # Items are: watch, jacket, shirt, tie, pants, undershorts, belt, shoes, socks
     # Now, we can't put on the tie without wearing the shirt first, etc...
     watch = s.queue_task(fname, task_name='watch')
     jacket = s.queue_task(fname, task_name='jacket')
     shirt = s.queue_task(fname, task_name='shirt')
     tie = s.queue_task(fname, task_name='tie')
     pants = s.queue_task(fname, task_name='pants')
     undershorts = s.queue_task(fname, task_name='undershorts')
     belt = s.queue_task(fname, task_name='belt')
     shoes = s.queue_task(fname, task_name='shoes')
     socks = s.queue_task(fname, task_name='socks')
     # before the tie, comes the shirt
     myjob1.add_deps(tie.id, shirt.id)
     # before the belt too comes the shirt
     myjob1.add_deps(belt.id, shirt.id)
     # before the jacket, comes the tie
     myjob1.add_deps(jacket.id, tie.id)
     # before the belt, come the pants
     myjob1.add_deps(belt.id, pants.id)
     # before the shoes, comes the pants
     myjob2.add_deps(shoes.id, pants.id)
     # before the pants, comes the undershorts
     myjob2.add_deps(pants.id, undershorts.id)
     # before the shoes, comes the undershorts
     myjob2.add_deps(shoes.id, undershorts.id)
     # before the jacket, comes the belt
     myjob2.add_deps(jacket.id, belt.id)
     # before the shoes, comes the socks
     myjob2.add_deps(shoes.id, socks.id)
     # every job by itself can be completed
     self.assertNotEqual(myjob1.validate('job_1'), None)
     self.assertNotEqual(myjob1.validate('job_2'), None)
     # and, implicitly, every queued task can be too
     self.assertNotEqual(myjob1.validate(), None)
     # add a cyclic dependency, jacket to undershorts
     myjob2.add_deps(undershorts.id, jacket.id)
     # every job can still be completed by itself
     self.assertNotEqual(myjob1.validate('job_1'), None)
     self.assertNotEqual(myjob1.validate('job_2'), None)
     # but trying to see if every task will ever be completed fails
     self.assertEqual(myjob2.validate(), None)
示例#18
0
 def testJobGraphDifferentJobs(self):
     s = Scheduler(self.db)
     myjob1 = JobGraph(self.db, 'job_1')
     myjob2 = JobGraph(self.db, 'job_2')
     fname = 'foo'
     # We have a few items to wear, and there's an "order" to respect...
     # Items are: watch, jacket, shirt, tie, pants, undershorts, belt, shoes, socks
     # Now, we can't put on the tie without wearing the shirt first, etc...
     watch = s.queue_task(fname, task_name='watch')
     jacket = s.queue_task(fname, task_name='jacket')
     shirt = s.queue_task(fname, task_name='shirt')
     tie = s.queue_task(fname, task_name='tie')
     pants = s.queue_task(fname, task_name='pants')
     undershorts = s.queue_task(fname, task_name='undershorts')
     belt = s.queue_task(fname, task_name='belt')
     shoes = s.queue_task(fname, task_name='shoes')
     socks = s.queue_task(fname, task_name='socks')
     # before the tie, comes the shirt
     myjob1.add_deps(tie.id, shirt.id)
     # before the belt too comes the shirt
     myjob1.add_deps(belt.id, shirt.id)
     # before the jacket, comes the tie
     myjob1.add_deps(jacket.id, tie.id)
     # before the belt, come the pants
     myjob1.add_deps(belt.id, pants.id)
     # before the shoes, comes the pants
     myjob2.add_deps(shoes.id, pants.id)
     # before the pants, comes the undershorts
     myjob2.add_deps(pants.id, undershorts.id)
     # before the shoes, comes the undershorts
     myjob2.add_deps(shoes.id, undershorts.id)
     # before the jacket, comes the belt
     myjob2.add_deps(jacket.id, belt.id)
     # before the shoes, comes the socks
     myjob2.add_deps(shoes.id, socks.id)
     # every job by itself can be completed
     self.assertNotEqual(myjob1.validate('job_1'), None)
     self.assertNotEqual(myjob1.validate('job_2'), None)
     # and, implicitly, every queued task can be too
     self.assertNotEqual(myjob1.validate(), None)
     # add a cyclic dependency, jacket to undershorts
     myjob2.add_deps(undershorts.id, jacket.id)
     # every job can still be completed by itself
     self.assertNotEqual(myjob1.validate('job_1'), None)
     self.assertNotEqual(myjob1.validate('job_2'), None)
     # but trying to see if every task will ever be completed fails
     self.assertEqual(myjob2.validate(), None)
示例#19
0
 def testTask_Status(self):
     s = Scheduler(self.db)
     fname = 'foo'
     watch = s.queue_task(fname, task_name='watch')
     # fetch status by id
     by_id = s.task_status(watch.id)
     # fetch status by uuid
     by_uuid = s.task_status(watch.uuid)
     # fetch status by query
     by_query = s.task_status(self.db.scheduler_task.function_name == 'foo')
     self.assertEqual(by_id, by_uuid)
     self.assertEqual(by_id, by_query)
     # fetch status by anything else throws
     self.assertRaises(SyntaxError, s.task_status, *[[1, 2]])
     # adding output returns the joined set, plus "result"
     rtn = s.task_status(watch.id, output=True)
     self.assertEqual(set(rtn.keys()), set(['scheduler_run', 'scheduler_task', 'result']))
示例#20
0
 def testTask_Status(self):
     s = Scheduler(self.db)
     fname = 'foo'
     watch = s.queue_task(fname, task_name='watch')
     # fetch status by id
     by_id = s.task_status(watch.id)
     # fetch status by uuid
     by_uuid = s.task_status(watch.uuid)
     # fetch status by query
     by_query = s.task_status(self.db.scheduler_task.function_name == 'foo')
     self.assertEqual(by_id, by_uuid)
     self.assertEqual(by_id, by_query)
     # fetch status by anything else throws
     self.assertRaises(SyntaxError, s.task_status, *[[1, 2]])
     # adding output returns the joined set, plus "result"
     rtn = s.task_status(watch.id, output=True)
     self.assertEqual(set(rtn.keys()), set(['scheduler_run', 'scheduler_task', 'result']))
示例#21
0
 def testJobGraphFailing(self):
     s = Scheduler(self.db)
     myjob = JobGraph(self.db, 'job_1')
     fname = 'foo'
     # We have a few items to wear, and there's an "order" to respect...
     # Items are: watch, jacket, shirt, tie, pants, undershorts, belt, shoes, socks
     # Now, we can't put on the tie without wearing the shirt first, etc...
     watch = s.queue_task(fname, task_name='watch')
     jacket = s.queue_task(fname, task_name='jacket')
     shirt = s.queue_task(fname, task_name='shirt')
     tie = s.queue_task(fname, task_name='tie')
     pants = s.queue_task(fname, task_name='pants')
     undershorts = s.queue_task(fname, task_name='undershorts')
     belt = s.queue_task(fname, task_name='belt')
     shoes = s.queue_task(fname, task_name='shoes')
     socks = s.queue_task(fname, task_name='socks')
     # before the tie, comes the shirt
     myjob.add_deps(tie.id, shirt.id)
     # before the belt too comes the shirt
     myjob.add_deps(belt.id, shirt.id)
     # before the jacket, comes the tie
     myjob.add_deps(jacket.id, tie.id)
     # before the belt, come the pants
     myjob.add_deps(belt.id, pants.id)
     # before the shoes, comes the pants
     myjob.add_deps(shoes.id, pants.id)
     # before the pants, comes the undershorts
     myjob.add_deps(pants.id, undershorts.id)
     # before the shoes, comes the undershorts
     myjob.add_deps(shoes.id, undershorts.id)
     # before the jacket, comes the belt
     myjob.add_deps(jacket.id, belt.id)
     # before the shoes, comes the socks
     myjob.add_deps(shoes.id, socks.id)
     # add a cyclic dependency, jacket to undershorts
     myjob.add_deps(undershorts.id, jacket.id)
     # no exceptions raised, but result None
     self.assertEqual(myjob.validate('job_1'), None)
     # and no deps added
     deps_inserted = self.db(self.db.scheduler_task_deps.id>0).count()
     self.assertEqual(deps_inserted, 0)
示例#22
0
 def testJobGraphFailing(self):
     s = Scheduler(self.db)
     myjob = JobGraph(self.db, 'job_1')
     fname = 'foo'
     # We have a few items to wear, and there's an "order" to respect...
     # Items are: watch, jacket, shirt, tie, pants, undershorts, belt, shoes, socks
     # Now, we can't put on the tie without wearing the shirt first, etc...
     watch = s.queue_task(fname, task_name='watch')
     jacket = s.queue_task(fname, task_name='jacket')
     shirt = s.queue_task(fname, task_name='shirt')
     tie = s.queue_task(fname, task_name='tie')
     pants = s.queue_task(fname, task_name='pants')
     undershorts = s.queue_task(fname, task_name='undershorts')
     belt = s.queue_task(fname, task_name='belt')
     shoes = s.queue_task(fname, task_name='shoes')
     socks = s.queue_task(fname, task_name='socks')
     # before the tie, comes the shirt
     myjob.add_deps(tie.id, shirt.id)
     # before the belt too comes the shirt
     myjob.add_deps(belt.id, shirt.id)
     # before the jacket, comes the tie
     myjob.add_deps(jacket.id, tie.id)
     # before the belt, come the pants
     myjob.add_deps(belt.id, pants.id)
     # before the shoes, comes the pants
     myjob.add_deps(shoes.id, pants.id)
     # before the pants, comes the undershorts
     myjob.add_deps(pants.id, undershorts.id)
     # before the shoes, comes the undershorts
     myjob.add_deps(shoes.id, undershorts.id)
     # before the jacket, comes the belt
     myjob.add_deps(jacket.id, belt.id)
     # before the shoes, comes the socks
     myjob.add_deps(shoes.id, socks.id)
     # add a cyclic dependency, jacket to undershorts
     myjob.add_deps(undershorts.id, jacket.id)
     # no exceptions raised, but result None
     self.assertEqual(myjob.validate('job_1'), None)
     # and no deps added
     deps_inserted = self.db(self.db.scheduler_task_deps.id > 0).count()
     self.assertEqual(deps_inserted, 0)
示例#23
0
    def testHugeResult(self):
        s = Scheduler(self.db)
        huge_result = s.queue_task('demo10', retry_failed=1, period=1)
        self.db.commit()
        self.writefunction(r"""
def demo10():
    res = 'a' * 99999
    return dict(res=res)
""")
        ret = self.exec_sched()
        # process finished just fine
        self.assertEqual(ret, 0)
        # huge_result - checks
        task = s.task_status(huge_result.id, output=True)
        res = [("task status completed",
                task.scheduler_task.status == 'COMPLETED'),
               ("task times_run is 1", task.scheduler_task.times_run == 1),
               ("result is the correct one",
                task.result == dict(res='a' * 99999))]
        self.exec_asserts(res, 'HUGE_RESULT')
示例#24
0
    def testHugeResult(self):
        s = Scheduler(self.db)
        huge_result = s.queue_task('demo10', retry_failed=1, period=1)
        self.db.commit()
        self.writefunction(r"""
def demo10():
    res = 'a' * 99999
    return dict(res=res)
""")
        ret = self.exec_sched()
        # process finished just fine
        self.assertEqual(ret, 0)
        # huge_result - checks
        task = s.task_status(huge_result.id, output=True)
        res = [
            ("task status completed", task.scheduler_task.status == 'COMPLETED'),
            ("task times_run is 1", task.scheduler_task.times_run == 1),
            ("result is the correct one", task.result == dict(res='a' * 99999))
        ]
        self.exec_asserts(res, 'HUGE_RESULT')
示例#25
0
    knn = neighbors.KNeighborsClassifier(weights='distance',
                                         metric='minkowski')
    knn.fit(knnIn, knnOut)
    for row in db(db.user_interests.user_id == userid).select():
        aRow = []
        for field in db.knnRef.fields[2:]:
            aRow.append(row[field])
    #print aRow
    #print len(aRow)
    aRow = np.array(aRow[1:])
    aRow = aRow.astype(float)
    clusterId = knn.predict(aRow)
    nRow = db(db.user_clusters.user_id == userid).count()
    if nRow:
        temp = int(clusterId[0])
        db(db.user_clusters.user_id == userid).update(cluster_id=temp)
    else:
        db.user_clusters.insert(user_id=userid, cluster_id=int(clusterId[0]))
    return 0


#knnSelect(8)
#knnMake()
#make_clusters()
from gluon.scheduler import Scheduler
scheduler = Scheduler(db)
scheduler.queue_task(make_clusters,
                     repeats=0,
                     start_time=datetime.datetime.now(),
                     period=86400)
示例#26
0
    aggregateRows = aggregateRows.astype(float)
    #print "computing knn"
    knnOut = aggregateRows[:, 0]
    knnIn = aggregateRows[:,1:]
    knn = neighbors.KNeighborsClassifier( weights='distance',metric='minkowski')
    knn.fit(knnIn, knnOut) 
    for row in db(db.user_interests.user_id == userid).select():
        aRow = []
        for field in db.knnRef.fields[2:]:
               aRow.append(row[field])
    #print aRow
    #print len(aRow)
    aRow = np.array(aRow[1:])
    aRow = aRow.astype(float)
    clusterId =  knn.predict(aRow)
    nRow = db(db.user_clusters.user_id == userid).count()
    if nRow:
        temp = int(clusterId[0])
        db(db.user_clusters.user_id == userid).update(cluster_id = temp )
    else: 
        db.user_clusters.insert(user_id=userid, cluster_id=int(clusterId[0] ))
    return 0
    
    
#knnSelect(8)
#knnMake()
#make_clusters()
from gluon.scheduler import Scheduler
scheduler = Scheduler(db)
scheduler.queue_task(make_clusters, repeats=0, start_time=datetime.datetime.now(), period=86400)
示例#27
0
    p = subprocess.Popen(['open', 'http://localhost:8088/fsr/newsAPI/index'])
    time.sleep(1800)
    p.kill()
    return "Finished"


from gluon.scheduler import Scheduler
Now = datetime.datetime.now()
start_time = datetime.datetime.strftime(Now, '%Y-%m-%d 15:00:00')
start_time1 = datetime.datetime.strftime(Now, '%Y-%m-%d %H:%M:%S')
start_time = datetime.datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S')
start_time2 = datetime.datetime.strptime(
    start_time1, '%Y-%m-%d %H:%M:%S') + timedelta(minutes=5)
start_time3 = datetime.datetime.strptime(
    start_time1, '%Y-%m-%d %H:%M:%S') + timedelta(minutes=35)

scheduler = Scheduler(fbdb)

scheduler.queue_task('DailyUpdater',
                     repeats=0,
                     period=3600 * 24,
                     start_time=start_time)
scheduler.queue_task('HourlyUpdater',
                     repeats=0,
                     period=3600,
                     start_time=start_time2)
scheduler.queue_task('NewsUpdater',
                     repeats=0,
                     period=3600,
                     start_time=start_time3)
示例#28
0
current.scheduler = scheduler

# Make sure to run the ad login refresh every hour or so
refresh_ad_login = current.cache.ram('refresh_ad_login',
                                     lambda: True,
                                     time_expire=60 * 60)
if refresh_ad_login is True and request.is_scheduler is not True:
    # Set the current value to false so we don't need to refresh for a while
    current.cache.ram('refresh_ad_login', lambda: False, time_expire=-1)
    # Update the last login value for all users (students and faculty)
    AD.Init()  # Make sur AD settings are loaded
    print("Queueing up refresh_all_ad_logins...")
    # print(str(request.is_scheduler))
    # print(str(request))
    if AD._ldap_enabled is not True:
        # Not enabled, skip
        print("AD Not enabled, skipping refresh_all_ad_logins...")
    else:
        # Schedule the process
        result = scheduler.queue_task('refresh_all_ad_logins',
                                      timeout=1200,
                                      sync_output=5,
                                      group_name="misc",
                                      repeats=1,
                                      period=0,
                                      pvars=dict(run_from='x_scheduler.py'))

    # Make sure to start the scheduler process
    # cmd = "/usr/bin/nohup /usr/bin/python " + os.path.join(request.folder, 'static/scheduler/start_misc_scheduler.py') + " > /dev/null 2>&1 &"
    # p = subprocess.Popen(cmd, shell=True, close_fds=True)
示例#29
0
            actor = actor,
            edApp = edApp,
            group = organization,
            object = resource,
            eventTime = datetime.now().isoformat(),
            action = "NavigatedTo"
            )

    # Once built, you can use your sensor to describe one or more often used
    # entities; suppose for example, you'll be sending a number of events
    # that all have the same actor

    ret = the_sensor.describe(the_event.actor)

    # The return structure from the sensor will be a dictionary of lists: each
    # item in the dictionary has a key corresponding to a client key,
    # so ret['default'] fetches back the list of URIs of all the @ids of
    # the fully described Caliper objects you have sent with that describe call.
    #
    # Now you can use this list with event sendings to send only the identifiers
    # of already-described entities, and not their full forms:
    #print(the_sensor.send(the_event, described_objects=ret['default'])

    # You can also just send the event in its full form, with all fleshed out
    # entities:
    the_sensor.send(the_event)

    rslogger.info("Event sent!")
# Period set to 60 for testing, this should be configurable and a lot longer
scheduler.queue_task(send_events_to_caliper, period=30, repeats=0)
示例#30
0
class S3Task(object):
    """ Asynchronous Task Execution """

    TASK_TABLENAME = "scheduler_task"

    # -------------------------------------------------------------------------
    def __init__(self):

        migrate = current.deployment_settings.get_base_migrate()
        tasks = current.response.s3.tasks

        # Instantiate Scheduler
        try:
            from gluon.scheduler import Scheduler
        except ImportError:
            # Warning should already have been given by eden_update_check.py
            self.scheduler = None
        else:
            self.scheduler = Scheduler(
                current.db,
                tasks,
                migrate=migrate,
                #use_spawn = True # Possible subprocess method with Py3
            )

    # -------------------------------------------------------------------------
    def configure_tasktable_crud(
        self,
        task=None,
        function=None,
        args=None,
        vars=None,
        period=3600,  # seconds, so 1 hour
        status_writable=False,
    ):
        """
            Configure the task table for interactive CRUD,
            setting defaults, widgets and hiding unnecessary fields

            @param task: the task name (will use a UUID if omitted)
            @param function: the function name (won't hide if omitted)
            @param args: the function position arguments
            @param vars: the function named arguments
            @param period: the default period for tasks
            @param status_writable: make status and next run time editable
        """

        T = current.T
        NONE = current.messages["NONE"]
        UNLIMITED = T("unlimited")

        tablename = self.TASK_TABLENAME
        table = current.db[tablename]

        # Configure start/stop time fields
        for fn in ("start_time", "stop_time"):
            field = table[fn]
            field.represent = lambda dt: \
                            S3DateTime.datetime_represent(dt, utc=True)
            set_min = set_max = None
            if fn == "start_time":
                field.requires = IS_UTC_DATETIME()
                set_min = "#scheduler_task_stop_time"
            elif fn == "stop_time":
                field.requires = IS_EMPTY_OR(IS_UTC_DATETIME())
                set_max = "#scheduler_task_start_time"
            field.widget = S3CalendarWidget(
                past=0,
                set_min=set_min,
                set_max=set_max,
                timepicker=True,
            )

        # Task name (default use UUID)
        if task is None:
            from uuid import uuid4
            task = str(uuid4())
        field = table.task_name
        field.default = task
        field.readable = field.writable = False

        # Function (default+hide if specified as parameter)
        if function:
            field = table.function_name
            field.default = function
            field.readable = field.writable = False

        # Args and vars
        if isinstance(args, list):
            field = table.args
            field.default = json.dumps(args)
            field.readable = field.writable = False
        else:
            field.default = "[]"
        if isinstance(vars, dict):
            field = table.vars
            field.default = json.dumps(vars)
            field.readable = field.writable = False
        else:
            field.default = {}

        # Fields which are always editable
        field = table.repeats
        field.label = T("Repeat")
        field.comment = T("times (0 = unlimited)")
        field.default = 0
        field.represent = lambda opt: \
            opt and "%s %s" % (opt, T("times")) or \
            opt == 0 and UNLIMITED or \
            NONE

        field = table.period
        field.label = T("Run every")
        field.default = period
        field.widget = S3TimeIntervalWidget.widget
        field.requires = IS_INT_IN_RANGE(0, None)
        field.represent = S3TimeIntervalWidget.represent
        field.comment = None

        table.timeout.default = 600
        table.timeout.represent = lambda opt: \
                                    opt and "%s %s" % (opt, T("seconds")) or \
                                    opt == 0 and UNLIMITED or \
                                    NONE

        # Always use "default" controller (web2py uses current controller),
        # otherwise the anonymous worker does not pass the controller
        # permission check and gets redirected to login before it reaches
        # the task function which does the s3_impersonate
        field = table.application_name
        field.default = "%s/default" % current.request.application
        field.readable = field.writable = False

        # Hidden fields
        hidden = (
            "uuid",
            "broadcast",
            "group_name",
            "times_run",
            "assigned_worker_name",
            "sync_output",
            "times_failed",
            "cronline",
        )
        for fn in hidden:
            table[fn].readable = table[fn].writable = False

        # Optionally editable fields
        fields = ("next_run_time", "status", "prevent_drift")
        for fn in fields:
            table[fn].readable = table[fn].writable = status_writable

        list_fields = [
            "id", "enabled", "start_time", "repeats", "period",
            (T("Last run"), "last_run_time"), (T("Last status"), "status"),
            (T("Next run"), "next_run_time"), "stop_time"
        ]
        if not function:
            list_fields[1:1] = ["task_name", "function_name"]

        current.s3db.configure(
            tablename,
            list_fields=list_fields,
        )

        response = current.response
        if response:
            response.s3.crud_strings[tablename] = Storage(
                label_create=T("Create Job"),
                title_display=T("Job Details"),
                title_list=T("Job Schedule"),
                title_update=T("Edit Job"),
                label_list_button=T("List Jobs"),
                msg_record_created=T("Job added"),
                msg_record_modified=T("Job updated"),
                msg_record_deleted=T("Job deleted"),
                msg_list_empty=T("No jobs configured yet"),
                msg_no_match=T("No jobs configured"))

    # -------------------------------------------------------------------------
    # API Function run within the main flow of the application
    # -------------------------------------------------------------------------
    def run_async(self, task, args=None, vars=None, timeout=300):
        """
            Wrapper to call an asynchronous task.
            - run from the main request

            @param task: The function which should be run
                         - async if a worker is alive
            @param args: The list of unnamed args to send to the function
            @param vars: The list of named vars to send to the function
            @param timeout: The length of time available for the task to complete
                            - default 300s (5 mins)
        """

        if args is None:
            args = []
        if vars is None:
            vars = {}

        # Check that task is defined (and callable)
        tasks = current.response.s3.tasks
        if not tasks or not callable(tasks.get(task)):
            return False

        # Check that args/vars are JSON-serializable
        try:
            json.dumps(args)
        except (ValueError, TypeError):
            msg = "S3Task.run_async args not JSON-serializable: %s" % args
            current.log.error(msg)
            raise
        try:
            json.dumps(vars)
        except (ValueError, TypeError):
            msg = "S3Task.run_async vars not JSON-serializable: %s" % vars
            current.log.error(msg)
            raise

        # Run synchronously if scheduler not running
        if not self._is_alive():
            tasks[task](*args, **vars)
            return None  # No task ID in this case

        # Queue the task (async)
        try:
            # Add the current user to the vars
            vars["user_id"] = current.auth.user.id
        except AttributeError:
            pass
        queued = self.scheduler.queue_task(task,
                                           pargs = args,
                                           pvars = vars,
                                           application_name = "%s/default" % \
                                                              current.request.application,
                                           function_name = task,
                                           timeout = timeout,
                                           )

        # Return task ID so that status can be polled
        return queued.id

    # -------------------------------------------------------------------------
    def schedule_task(
            self,
            task,
            args=None,  # args to pass to the task
            vars=None,  # vars to pass to the task
            function_name=None,
            start_time=None,
            next_run_time=None,
            stop_time=None,
            repeats=None,
            retry_failed=None,
            period=None,
            timeout=None,
            enabled=None,  # None = Enabled
            group_name=None,
            ignore_duplicate=False,
            sync_output=0):
        """
            Schedule a task in web2py Scheduler

            @param task: name of the function/task to be scheduled
            @param args: args to be passed to the scheduled task
            @param vars: vars to be passed to the scheduled task
            @param function_name: function name (if different from task name)
            @param start_time: start_time for the scheduled task
            @param next_run_time: next_run_time for the the scheduled task
            @param stop_time: stop_time for the the scheduled task
            @param repeats: number of times the task to be repeated (0=unlimited)
            @param retry_failed: number of times the task to be retried (-1=unlimited)
            @param period: time period between two consecutive runs (seconds)
            @param timeout: set timeout for a running task
            @param enabled: enabled flag for the scheduled task
            @param group_name: group_name for the scheduled task
            @param ignore_duplicate: disable or enable duplicate checking
            @param sync_output: sync output every n seconds (0 = disable sync)
        """

        if args is None:
            args = []
        if vars is None:
            vars = {}

        if not ignore_duplicate and self._duplicate_task_exists(
                task, args, vars):
            # if duplicate task exists, do not insert a new one
            current.log.warning("Duplicate Task, Not Inserted", value=task)
            return False

        kwargs = {}

        if function_name is None:
            function_name = task

        # storing valid keyword arguments only if they are provided
        if start_time:
            kwargs["start_time"] = start_time

        if next_run_time:
            kwargs["next_run_time"] = next_run_time
        elif start_time:
            # default it to start_time
            kwargs["next_run_time"] = start_time

        if stop_time:
            kwargs["stop_time"] = stop_time
        elif start_time:
            # default it to one day ahead of given start_time
            if not isinstance(start_time, datetime.datetime):
                start_time = datetime.datetime.strptime(
                    start_time, "%Y-%m-%d %H:%M:%S")
            stop_time = start_time + datetime.timedelta(days=1)

        if repeats is not None:
            kwargs["repeats"] = repeats

        if retry_failed is not None:
            kwargs["retry_failed"] = retry_failed

        if period:
            kwargs["period"] = period

        if timeout:
            kwargs["timeout"] = timeout

        if enabled != None:
            # NB None => enabled
            kwargs["enabled"] = enabled

        if group_name:
            kwargs["group_name"] = group_name

        if sync_output != 0:
            kwargs["sync_output"] = sync_output

        auth = current.auth
        if auth.is_logged_in():
            # Add the current user to the vars
            vars["user_id"] = auth.user.id

        # Add to DB for pickup by Scheduler task
        # @ToDo: Switch to API: self.scheduler.queue_task()
        task_id = current.db.scheduler_task.insert(application_name = "%s/default" % \
                                                   current.request.application,
                                                   task_name = task,
                                                   function_name = function_name,
                                                   args = json.dumps(args),
                                                   vars = json.dumps(vars),
                                                   **kwargs)
        return task_id

    # -------------------------------------------------------------------------
    @staticmethod
    def _duplicate_task_exists(task, args, vars):
        """
            Checks if given task already exists in the Scheduler and both coincide
            with their execution time

            @param task: name of the task function
            @param args: the job position arguments (list)
            @param vars: the job named arguments (dict)
        """

        db = current.db
        ttable = db.scheduler_task

        args_json = json.dumps(args)

        query = ((ttable.function_name == task) & \
                 (ttable.args == args_json) & \
                 (ttable.status.belongs(["RUNNING", "QUEUED", "ALLOCATED"])))
        jobs = db(query).select(ttable.vars)
        for job in jobs:
            job_vars = json.loads(job.vars)
            if job_vars == vars:
                return True
        return False

    # -------------------------------------------------------------------------
    @staticmethod
    def _is_alive():
        """
            Returns True if there is at least 1 active worker to run scheduled tasks
            - run from the main request

            NB Can't run this 1/request at the beginning since the tables
               only get defined in zz_last
        """

        #if self.scheduler:
        #    return self.scheduler.is_alive()
        #else:
        #    return False

        db = current.db
        table = db.scheduler_worker

        now = datetime.datetime.now()
        offset = datetime.timedelta(minutes=1)

        query = (table.last_heartbeat > (now - offset))
        cache = current.response.s3.cache
        worker_alive = db(query).select(
            table.id,
            limitby=(0, 1),
            cache=cache,
        ).first()

        return True if worker_alive else False

    # -------------------------------------------------------------------------
    @staticmethod
    def reset(task_id):
        """
            Reset the status of a task to QUEUED after FAILED

            @param task_id: the task record ID
        """

        db = current.db
        ttable = db.scheduler_task

        query = (ttable.id == task_id) & (ttable.status == "FAILED")
        task = db(query).select(ttable.id, limitby=(0, 1)).first()
        if task:
            task.update_record(status="QUEUED")

    # =========================================================================
    # Functions run within the Task itself
    # =========================================================================
    @staticmethod
    def authenticate(user_id):
        """
            Activate the authentication passed from the caller to this new request
            - run from within the task

            NB This is so simple that we don't normally run via this API
               - this is just kept as an example of what needs to happen within the task
        """

        current.auth.s3_impersonate(user_id)
示例#31
0
            eCPC = spend / clicks
            eCPI = spend / install
            conditions = val.campaign_rule.conditions

            try:
                ans = eval(conditions)
                tempList.append(val.campaign_rule.id)
                if ans:
                    db(db.campaign_rule.id == val.campaign.id).update(
                        status='deactivate',
                        next_run_time=datetime.datetime.now().date() +
                        datetime.timedelta(days=1))

                    db.commit()
                    mail.send(to=['*****@*****.**'],
                              subject='hello',
                              reply_to='*****@*****.**',
                              message=' Stop campaign')
            except:
                pass

    return 1


scheduler.queue_task(checkCampaign,
                     prevent_drift=True,
                     start_time=datetime.datetime.now(),
                     timeout=60,
                     period=9000,
                     repeat=1)
示例#32
0
            timeline_table = db(db.timeline.user_extra_id==user['id']).select()
            now_time = datetime.now()
            if status and len(user_exist):
                if  not len(timeline_table) or timeline_table[-1]['end_time']:  # if not exist end_time record
                    logger.debug('Insert')
                    db.timeline.insert(week_day=now_time.strftime('%A %d %b'),
                                       user_extra_id=user['id'],
                                       start_time=now_time.isoformat())
                    db.commit()
                else:
                    continue
            elif len(user_exist):
                if (len(timeline_table) and
                        timeline_table[-1]['start_time'] and
                        not timeline_table[-1]['end_time']):
                    logger.debug('Update')
                    timeline_table[-1].end_time=now_time.isoformat()
                    timeline_table[-1].update_record()
        elif type == 'facebook':
            pass

    return True or False

# def write_to_db():
#     if check_online(user_id):
from gluon.scheduler import Scheduler
scheduler = Scheduler(db)

# scheduler.queue_task(task_add,pvars=dict(a=1,b=2))
scheduler.queue_task(check_online())
示例#33
0
    db_scheduler,
    discard_results=True,
    heartbeat=settings.scheduler['heartbeat'],
)

# check if the set_failed task is there, else insert it
sched_failed = cache.ram(
    'sched_failed',
    lambda: db_scheduler(db_scheduler.scheduler_task.task_name == "set_failed"
                         ).select().first(),
    time_expire=60)
if not sched_failed:
    scheduler.queue_task(
        set_failed,
        task_name="set_failed",
        timeout=30,
        retry_failed=-1,
        period=60,
        repeats=0,
    )

# check if the clean_imagedir task is there, else insert it
sched_clean_imagedir = cache.ram(
    'sched_clean_imagedir',
    lambda: db_scheduler(db_scheduler.scheduler_task.task_name ==
                         "clean_imagedir").select().first(),
    time_expire=60)
if not sched_clean_imagedir:
    scheduler.queue_task(
        clean_imagedir,
        task_name="clean_imagedir",
        timeout=300,
示例#34
0
    heartbeat=settings.scheduler['heartbeat'],
)

# check if the set_failed task is there, else insert it
sched_failed = cache.ram(
    'sched_failed',
    lambda: db_scheduler(
        db_scheduler.scheduler_task.task_name == "set_failed"
    ).select().first(),
    time_expire=60
)
if not sched_failed:
    scheduler.queue_task(
        set_failed,
        task_name="set_failed",
        timeout=30,
        retry_failed=-1,
        period=60,
        repeats=0,
    )

# check if the clean_imagedir task is there, else insert it
sched_clean_imagedir = cache.ram(
    'sched_clean_imagedir',
    lambda: db_scheduler(
        db_scheduler.scheduler_task.task_name == "clean_imagedir"
    ).select().first(),
    time_expire=60
)
if not sched_clean_imagedir:
    scheduler.queue_task(
        clean_imagedir,
示例#35
0
else:
    sch_db = db


def check_classes_status():
    classes = db(Class.status != 2).select()
    log_in_file("Checking for open classes...", path="/tmp/scheduler.log")
    for course_class in classes:
        if int(course_class.status) == 1 and course_class.end_date < request.now.date():
            log_in_file("Course %s being closed." % course_class.course.id, path="/tmp/scheduler.log")
            course_class.update_record(status=2)
        elif int(course_class.status) == 3 and course_class.start_date <= request.now.date():
            log_in_file("Course %s in progress." % course_class.course.id, path="/tmp/scheduler.log")
            course_class.update_record(status=1)
    db.commit()
    log_in_file("All status updated!", path="/tmp/scheduler.log")


scheduler = Scheduler(sch_db, tasks=dict(check_classes_status=check_classes_status))

if sch_db(sch_db.scheduler_task).count() == 0:
    ## do check_classes_status once by day (86400 seconds = 24 hours)
    ## repeats = 0 means it will repeat forever
    ## it starts at midnight after you have created it
    import datetime

    today_midnight = datetime.datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
    start_time = today_midnight + datetime.timedelta(days=1)
    sched = scheduler.queue_task("check_classes_status", start_time=start_time, period=86400, repeats=0)
    log_in_file("New scheduler created: ID %d" % sched.id, path="/tmp/scheduler.log")
示例#36
0
from gluon.scheduler import Scheduler

scheduler = Scheduler(db)


def send_email():
    # rows = db.courier().select(orderby=~db.courier.arrival_date)
    # temp = []
    # for row in rows:
    #      if row.taken == False:
    #          temp.append(row)
    # print temp[0]
    hisemail = "*****@*****.**"  ###############change it to the mail of user

    sub = "New parcel.IIIT courier portal"
    msg = "You have new Parcels . Collect it form Nilgiri"
    if mail:
        if mail.send(to=[hisemail], subject=sub, message=msg):
            response.flash = 'email sent successfully.'
        else:
            response.flash = 'fail to send email sorry!'
    else:
        response.flash = 'Unable to send the email : email parameters not defined'


scheduler.queue_task(send_email)
示例#37
0
                                        vm_util_rrd=vm_utilization_rrd,
                                        vm_daily_checks=process_vmdaily_checks,
                                        vm_garbage_collector=process_unusedvm,
                                        memory_overload=overload_memory,
                		                networking_host=host_networking,
					                    rrd_task=task_rrd,
                                        vm_loadbalance=process_loadbalancer), 
                             group_names=['vm_task', 'vm_sanity', 'host_task', 'vm_rrd', 'snapshot_task','host_network'])


midnight_time = request.now.replace(hour=23, minute=59, second=59)

vm_scheduler.queue_task(TASK_SNAPSHOT, 
                    pvars = dict(snapshot_type = SNAPSHOT_DAILY),
                    repeats = 0, # run indefinitely
                    start_time = midnight_time, 
                    period = 24 * HOURS, # every 24h
                    timeout = 5 * MINUTES,
                    uuid = UUID_SNAPSHOT_DAILY,
                    group_name = 'snapshot_task')

vm_scheduler.queue_task(TASK_SNAPSHOT, 
                    pvars = dict(snapshot_type = SNAPSHOT_WEEKLY),
                    repeats = 0, # run indefinitely
                    start_time = midnight_time, 
                    period = 7 * DAYS, # every 7 days
                    timeout = 5 * MINUTES,
                    uuid = UUID_SNAPSHOT_WEEKLY,
                    group_name = 'snapshot_task')

vm_scheduler.queue_task(TASK_SNAPSHOT, 
                    pvars = dict(snapshot_type = SNAPSHOT_MONTHLY),
示例#38
0
from gluon.scheduler import Scheduler
from datetime import datetime, timedelta
from signal import SIGKILL
import os

def build_monitor(): 
	running_builds = db((db.current_builds.finished==False)).select()
	for build in running_builds:
		# check which of the running builds are running too long
		if (build.start_time + timedelta(seconds=MAX_BUILD_TIME)) < datetime.now():
			if build.PID == None:
				print 'The build with the ', build.id, 'has not started yet but has already timed out. It is probably garbage.'
			else:
				# kill the build. the build module will resume and take care of cleanup, etc.
				print 'The build', build.id, 'has timed out!'
				os.kill(build.PID, SIGKILL)

	return True

TASK_UUID = '29aa3d33-1f7b-4d11-a589-75afa399a4e9'

# initiate scheduler
scheduler = Scheduler(db, discard_results=False, heartbeat=1)


# build_monitor task - drop and reinsert to avoid time stamp conflicts
scheduler.queue_task('build_monitor', task_name='build_monitor', repeats=0, period=2, timeout=2, uuid=TASK_UUID, retry_failed=-1)
示例#39
0
    def testNoReturn_and_Timeout_and_Progress(self):
        s = Scheduler(self.db)
        noret1 = s.queue_task('demo5')
        noret2 = s.queue_task('demo3')
        timeout1 = s.queue_task('demo4', timeout=5)
        timeout2 = s.queue_task('demo4')
        progress = s.queue_task('demo6', sync_output=2)
        termination = s.queue_task('termination')
        self.db.commit()
        self.writefunction(r"""
def demo3():
    time.sleep(3)
    print(1/0)
    return None

def demo4():
    time.sleep(15)
    print("I'm printing something")
    return dict(a=1, b=2)

def demo5():
    time.sleep(3)
    print("I'm printing something")
    rtn = dict(a=1, b=2)

def demo6():
    time.sleep(5)
    print('50%')
    time.sleep(5)
    print('!clear!100%')
    return 1
""")
        ret = self.exec_sched()
        self.assertEqual(ret, 0)
        # noreturn check
        task1, task_run1 = self.fetch_results(s, noret1)
        task2, task_run2 = self.fetch_results(s, noret2)
        res = [
            ("tasks no_returns1 completed", task1.status == 'COMPLETED'),
            ("tasks no_returns2 failed", task2.status == 'FAILED'),
            ("no_returns1 doesn't have a scheduler_run record",
             len(task_run1) == 0),
            ("no_returns2 has a scheduler_run record FAILED",
             (len(task_run2) == 1 and task_run2[0].status == 'FAILED')),
        ]
        self.exec_asserts(res, 'NO_RETURN')

        # timeout check
        task1 = s.task_status(timeout1.id, output=True)
        task2 = s.task_status(timeout2.id, output=True)
        res = [("tasks timeouts1 timeoutted",
                task1.scheduler_task.status == 'TIMEOUT'),
               ("tasks timeouts2 completed",
                task2.scheduler_task.status == 'COMPLETED')]
        self.exec_asserts(res, 'TIMEOUT')

        # progress check
        task1 = s.task_status(progress.id, output=True)
        res = [("tasks percentages completed",
                task1.scheduler_task.status == 'COMPLETED'),
               ("output contains only 100%",
                task1.scheduler_run.run_output.strip() == "100%")]
        self.exec_asserts(res, 'PROGRESS')
示例#40
0
            log_in_file('Course %s being closed.' % course_class.course.id,
                        path='/tmp/scheduler.log')
            course_class.update_record(status=2)
        elif int(course_class.status
                 ) == 3 and course_class.start_date <= request.now.date():
            log_in_file('Course %s in progress.' % course_class.course.id,
                        path='/tmp/scheduler.log')
            course_class.update_record(status=1)
    db.commit()
    log_in_file('All status updated!', path='/tmp/scheduler.log')


scheduler = Scheduler(sch_db,
                      tasks=dict(check_classes_status=check_classes_status))

if sch_db(sch_db.scheduler_task).count() == 0:
    ## do check_classes_status once by day (86400 seconds = 24 hours)
    ## repeats = 0 means it will repeat forever
    ## it starts at midnight after you have created it
    import datetime
    today_midnight = datetime.datetime.now().replace(hour=0,
                                                     minute=0,
                                                     second=0,
                                                     microsecond=0)
    start_time = today_midnight + datetime.timedelta(days=1)
    sched = scheduler.queue_task('check_classes_status',
                                 start_time=start_time,
                                 period=86400,
                                 repeats=0)
    log_in_file('New scheduler created: ID %d' % sched.id,
                path='/tmp/scheduler.log')
            db.export_to_csv_file(backup)
    except Exception as error:
        logger.exception(str(error))
        raise
    logger.info('Backup completed')
    return True


def delete_sessions():
    single_loop(auth.settings.expiration)


scheduler = Scheduler(db)

# schedule the backup
if not db((db.scheduler_task.task_name == 'db_export')).select():
    scheduler.queue_task(db_export,
                         pvars={},
                         timeout=60 * 10,
                         period=((60*60) * backup_hours),
                         repeats=0,
                         retry_failed=5)

# schedule the cleaning of the sessions
if not db((db.scheduler_task.task_name == 'delete_sessions')).select():
    scheduler.queue_task(delete_sessions,
                         pvars={},
                         timeout=60 * 10,
                         period=((60*60) * sessions_clean_hours),
                         repeats=0,
                         retry_failed=5)
示例#42
0
            now_time = datetime.now()
            if status and len(user_exist):
                if not len(timeline_table) or timeline_table[-1][
                        'end_time']:  # if not exist end_time record
                    logger.debug('Insert')
                    db.timeline.insert(week_day=now_time.strftime('%A %d %b'),
                                       user_extra_id=user['id'],
                                       start_time=now_time.isoformat())
                    db.commit()
                else:
                    continue
            elif len(user_exist):
                if (len(timeline_table) and timeline_table[-1]['start_time']
                        and not timeline_table[-1]['end_time']):
                    logger.debug('Update')
                    timeline_table[-1].end_time = now_time.isoformat()
                    timeline_table[-1].update_record()
        elif type == 'facebook':
            pass

    return True or False


# def write_to_db():
#     if check_online(user_id):
from gluon.scheduler import Scheduler
scheduler = Scheduler(db)

# scheduler.queue_task(task_add,pvars=dict(a=1,b=2))
scheduler.queue_task(check_online())
示例#43
0
class S3Task(object):
    """ Asynchronous Task Execution """

    TASK_TABLENAME = "scheduler_task"

    # -------------------------------------------------------------------------
    def __init__(self):

        migrate = current.deployment_settings.get_base_migrate()
        tasks = current.response.s3.tasks

        # Instantiate Scheduler
        try:
            from gluon.scheduler import Scheduler
        except:
            # Warning should already have been given by eden_update_check.py
            self.scheduler = None
        else:
            self.scheduler = Scheduler(current.db, tasks, migrate=migrate)

    # -------------------------------------------------------------------------
    def configure_tasktable_crud(
            self,
            task=None,
            function=None,
            args=None,
            vars=None,
            period=3600,  # seconds, so 1 hour
    ):
        """
            Configure the task table for interactive CRUD,
            setting defaults, widgets and hiding unnecessary fields

            @param task: the task name (will use a UUID if omitted)
            @param function: the function name (won't hide if omitted)
            @param args: the function position arguments
            @param vars: the function named arguments
        """

        if args is None:
            args = []
        if vars is None:
            vars = {}

        T = current.T
        NONE = current.messages["NONE"]
        UNLIMITED = T("unlimited")

        tablename = self.TASK_TABLENAME
        table = current.db[tablename]

        table.uuid.readable = table.uuid.writable = False

        table.prevent_drift.readable = table.prevent_drift.writable = False

        table.sync_output.readable = table.sync_output.writable = False

        table.times_failed.readable = False

        field = table.start_time
        field.represent = lambda dt: \
            S3DateTime.datetime_represent(dt, utc=True)
        field.widget = S3DateTimeWidget(past=0)
        field.requires = IS_UTC_DATETIME(
            format=current.deployment_settings.get_L10n_datetime_format())

        field = table.stop_time
        field.represent = lambda dt: \
            S3DateTime.datetime_represent(dt, utc=True)
        field.widget = S3DateTimeWidget(past=0)
        field.requires = IS_EMPTY_OR(
            IS_UTC_DATETIME(
                format=current.deployment_settings.get_L10n_datetime_format()))

        if not task:
            import uuid
            task = str(uuid.uuid4())
        field = table.task_name
        field.default = task
        field.readable = False
        field.writable = False

        if function:
            field = table.function_name
            field.default = function
            field.readable = False
            field.writable = False

        field = table.args
        field.default = json.dumps(args)
        field.readable = False
        field.writable = False

        field = table.repeats
        field.label = T("Repeat")
        field.comment = T("times (0 = unlimited)")
        field.default = 0
        field.represent = lambda opt: \
            opt and "%s %s" % (opt, T("times")) or \
            opt == 0 and UNLIMITED or \
            NONE

        field = table.period
        field.label = T("Run every")
        field.default = period
        field.widget = S3TimeIntervalWidget.widget
        field.requires = IS_TIME_INTERVAL_WIDGET(table.period)
        field.represent = S3TimeIntervalWidget.represent
        field.comment = None

        table.timeout.default = 600
        table.timeout.represent = lambda opt: \
            opt and "%s %s" % (opt, T("seconds")) or \
            opt == 0 and UNLIMITED or \
            NONE

        field = table.vars
        field.default = json.dumps(vars)
        field.readable = field.writable = False

        # Always use "default" controller (web2py uses current controller),
        # otherwise the anonymous worker does not pass the controller
        # permission check and gets redirected to login before it reaches
        # the task function which does the s3_impersonate
        field = table.application_name
        field.default = "%s/default" % current.request.application
        field.readable = field.writable = False
        table.group_name.readable = table.group_name.writable = False
        table.status.readable = table.status.writable = False
        table.next_run_time.readable = table.next_run_time.writable = False
        table.times_run.readable = table.times_run.writable = False
        table.assigned_worker_name.readable = \
            table.assigned_worker_name.writable = False

        current.s3db.configure(
            tablename,
            list_fields=[
                "id", "enabled", "start_time", "repeats", "period",
                (T("Last run"), "last_run_time"), (T("Last status"), "status"),
                (T("Next run"), "next_run_time"), "stop_time"
            ],
        )

        response = current.response
        if response:
            response.s3.crud_strings[tablename] = Storage(
                label_create=T("Create Job"),
                title_display=T("Scheduled Jobs"),
                title_list=T("Job Schedule"),
                title_update=T("Edit Job"),
                label_list_button=T("List Jobs"),
                msg_record_created=T("Job added"),
                msg_record_modified=T("Job updated"),
                msg_record_deleted=T("Job deleted"),
                msg_list_empty=T("No jobs configured yet"),
                msg_no_match=T("No jobs configured"))

        return

    # -------------------------------------------------------------------------
    # API Function run within the main flow of the application
    # -------------------------------------------------------------------------
    def async (self, task, args=None, vars=None, timeout=300):
        """
            Wrapper to call an asynchronous task.
            - run from the main request

            @param task: The function which should be run
                         - async if a worker is alive
            @param args: The list of unnamed args to send to the function
            @param vars: The list of named vars to send to the function
            @param timeout: The length of time available for the task to complete
                            - default 300s (5 mins)
        """

        if args is None:
            args = []
        if vars is None:
            vars = {}

        # Check that task is defined
        tasks = current.response.s3.tasks
        if not tasks:
            return False
        if task not in tasks:
            return False

        # Check that worker is alive
        if not self._is_alive():
            # Run the task synchronously
            _args = []
            for arg in args:
                if isinstance(arg, (int, long, float)):
                    _args.append(str(arg))
                elif isinstance(arg, basestring):
                    _args.append("%s" % str(json.dumps(arg)))
                else:
                    error = "Unhandled arg type: %s" % arg
                    current.log.error(error)
                    raise HTTP(501, error)
            args = ",".join(_args)
            _vars = ",".join(
                ["%s=%s" % (str(var), str(vars[var])) for var in vars])
            if args:
                statement = "tasks['%s'](%s,%s)" % (task, args, _vars)
            else:
                statement = "tasks['%s'](%s)" % (task, _vars)
            # Handle JSON
            null = None
            exec(statement)
            return None

        auth = current.auth
        if auth.is_logged_in():
            # Add the current user to the vars
            vars["user_id"] = auth.user.id

        # Run the task asynchronously
        record = current.db.scheduler_task.insert(
            application_name="%s/default" % current.request.application,
            task_name=task,
            function_name=task,
            args=json.dumps(args),
            vars=json.dumps(vars),
            timeout=timeout)

        # Return record so that status can be polled
        return record

    # -------------------------------------------------------------------------
    def schedule_task(
            self,
            task,
            args=None,  # args to pass to the task
            vars=None,  # vars to pass to the task
            function_name=None,
            start_time=None,
            next_run_time=None,
            stop_time=None,
            repeats=None,
            period=None,
            timeout=None,
            enabled=None,  # None = Enabled
            group_name=None,
            ignore_duplicate=False,
            sync_output=0,
            report_progress=False):
        """
            Schedule a task in web2py Scheduler

            @param task: name of the function/task to be scheduled
            @param args: args to be passed to the scheduled task
            @param vars: vars to be passed to the scheduled task
            @param function_name: function name (if different from task name)
            @param start_time: start_time for the scheduled task
            @param next_run_time: next_run_time for the the scheduled task
            @param stop_time: stop_time for the the scheduled task
            @param repeats: number of times the task to be repeated
            @param period: time period between two consecutive runs
            @param timeout: set timeout for a running task
            @param enabled: enabled flag for the scheduled task
            @param group_name: group_name for the scheduled task
            @param ignore_duplicate: disable or enable duplicate checking
            @param sync_output: sync output every n seconds (0 = disable sync)
        """

        if args is None:
            args = []
        if vars is None:
            vars = {}

        kwargs = {}

        if function_name is None:
            function_name = task

        # storing valid keyword arguments only if they are provided
        if start_time:
            kwargs["start_time"] = start_time

        if next_run_time:
            kwargs["next_run_time"] = next_run_time
        elif start_time:
            # default it to start_time
            kwargs["next_run_time"] = start_time

        if stop_time:
            kwargs["stop_time"] = stop_time
        elif start_time:
            # default it to one day ahead of given start_time
            if not isinstance(start_time, datetime.datetime):
                start_time = datetime.datetime.strptime(
                    start_time, "%Y-%m-%d %H:%M:%S")
            stop_time = start_time + datetime.timedelta(days=1)

        if repeats is not None:
            kwargs["repeats"] = repeats

        if period:
            kwargs["period"] = period

        if timeout:
            kwargs["timeout"] = timeout

        if enabled != None:
            # NB None => enabled
            kwargs["enabled"] = enabled

        if group_name:
            kwargs["group_name"] = group_name

        if not ignore_duplicate and self._duplicate_task_exists(
                task, args, vars):
            # if duplicate task exists, do not insert a new one
            current.log.warning("Duplicate Task, Not Inserted", value=task)
            return False

        if sync_output != 0:
            kwargs["sync_output"] = sync_output

        auth = current.auth
        if auth.is_logged_in():
            # Add the current user to the vars
            vars["user_id"] = auth.user.id

        # Add to DB for pickup by Scheduler task
        record = self.scheduler.queue_task(task,
                                           args,
                                           vars,
                                           sync_output=sync_output)
        if report_progress:
            log_name = datetime.datetime.now() \
                .strftime("%y-%m-%d-%H-%M") + "_" + task + ".txt"
            from time import sleep
            rt = RepeatedTimer(1, self.check_status, log_name, record.id,
                               self.scheduler, task, current.request.folder)
            try:
                '''
                While the task is running..? What if it
                never gets out of a QUEUED state?
                Every second for 10 seconds, check
                the task's status
                '''
                sleep(15)
            finally:
                rt.stop()
        return record

    #--------------------------------------------------------------------------
    def check_status(user_id, log_name, task_id, scheduler, task_name, folder):
        import os
        log_path = os.path.join(folder, "logs", "tasks")
        from gluon import DAL, Field
        '''
        If we use current.db here instead of getting a
        new handle to the db, the task that we
        previously queued won't get inserted into the db
        so every call we make in this method to check
        on the task's status will always result in the task being in
        the 'QUEUED' state.
        '''
        db = DAL('sqlite://storage.db',
                 folder='applications/eden/databases',
                 auto_import=True)
        table = db.scheduler_task
        query = (table.id == task_id)
        task_status = None
        try:
            task_status = db(query).select(table.status).first().status
        except AttributeError:
            task_status = 'Unknown (task not yet in db)'
        '''
        This is the preferred way to check a task's status since
        it's using the web2py API, but we can't use this
        because the scheduler is pointing to
        current.db (see above comment):
        task_status = scheduler.task_status(task_id, output=True)
        print task_status.scheduler_task.status
        print task_status.result
        print task_status.scheduler_run.run_output
        '''

        if not os.path.exists(log_path):
            os.makedirs(log_path)

        with open(os.path.join(log_path, log_name), "a+") as log:
            log.write('<%s>: %s is currently in the %s state\n' %
                      (datetime.datetime.now(), task_name, task_status))

    # -------------------------------------------------------------------------
    def _duplicate_task_exists(self, task, args, vars):
        """
            Checks if given task already exists in the Scheduler and both coincide
            with their execution time

            @param task: name of the task function
            @param args: the job position arguments (list)
            @param vars: the job named arguments (dict)
        """

        db = current.db
        ttable = db.scheduler_task

        _args = json.dumps(args)

        query = ((ttable.function_name == task) & \
                 (ttable.args == _args) & \
                 (ttable.status.belongs(["RUNNING", "QUEUED", "ALLOCATED"])))
        jobs = db(query).select(ttable.vars)
        for job in jobs:
            job_vars = json.loads(job.vars)
            if job_vars == vars:
                return True
        return False

    # -------------------------------------------------------------------------
    def _is_alive(self):
        """
            Returns True if there is at least 1 active worker to run scheduled tasks
            - run from the main request

            NB Can't run this 1/request at the beginning since the tables
               only get defined in zz_last
        """

        #if self.scheduler:
        #    return self.scheduler.is_alive()
        #else:
        #    return False

        db = current.db
        cache = current.response.s3.cache
        now = datetime.datetime.now()

        offset = datetime.timedelta(minutes=1)
        table = db.scheduler_worker
        query = (table.last_heartbeat > (now - offset))
        worker_alive = db(query).select(table.id, limitby=(0, 1),
                                        cache=cache).first()
        if worker_alive:
            return True
        else:
            return False

    # -------------------------------------------------------------------------
    @staticmethod
    def reset(task_id):
        """
            Reset the status of a task to QUEUED after FAILED

            @param task_id: the task record ID
        """

        db = current.db
        ttable = db.scheduler_task

        query = (ttable.id == task_id) & (ttable.status == "FAILED")
        task = db(query).select(ttable.id, limitby=(0, 1)).first()
        if task:
            task.update_record(status="QUEUED")

    # =========================================================================
    # Functions run within the Task itself
    # =========================================================================
    def authenticate(self, user_id):
        """
            Activate the authentication passed from the caller to this new request
            - run from within the task

            NB This is so simple that we don't normally run via this API
               - this is just kept as an example of what needs to happen within the task
        """

        current.auth.s3_impersonate(user_id)
示例#44
0
                                        vnc_access=check_vnc_access,
                                        host_sanity=host_sanity_check,
                                        vm_util_rrd=vm_utilization_rrd,
                                        vm_daily_checks=process_vmdaily_checks,
                                        vm_purge_unused=process_unusedvm_purge,
                    					memory_overload=overload_memory,
                    					networking_host=host_networking), 
                             group_names=['vm_task', 'vm_sanity', 'host_task', 'vm_rrd', 'snapshot_task'])


midnight_time = request.now.replace(hour=23, minute=59, second=59)

vm_scheduler.queue_task(TASK_SNAPSHOT, 
                    pvars = dict(snapshot_type = SNAPSHOT_DAILY),
                    repeats = 0, # run indefinitely
                    start_time = midnight_time, 
                    period = 24 * HOURS, # every 24h
                    timeout = 5 * MINUTES,
                    uuid = UUID_SNAPSHOT_DAILY,
                    group_name = 'snapshot_task')

vm_scheduler.queue_task(TASK_SNAPSHOT, 
                    pvars = dict(snapshot_type = SNAPSHOT_WEEKLY),
                    repeats = 0, # run indefinitely
                    start_time = midnight_time, 
                    period = 7 * DAYS, # every 7 days
                    timeout = 5 * MINUTES,
                    uuid = UUID_SNAPSHOT_WEEKLY,
                    group_name = 'snapshot_task')

vm_scheduler.queue_task(TASK_SNAPSHOT, 
                    pvars = dict(snapshot_type = SNAPSHOT_MONTHLY),