def testBasic(self):
        s = Scheduler(self.db)
        foo = s.queue_task('foo')
        self.db.commit()
        self.writefunction(r"""
def foo():
    return 'a'
""")
        ret = self.exec_sched()
        # process finished just fine
        self.assertEqual(ret, 0)
        info = s.task_status(foo.id, output=True)
        self.assertEqual(info.result, 'a')
Example #2
0
    def testRetryFailed(self):
        s = Scheduler(self.db)
        failed = s.queue_task('demo2', retry_failed=1, period=1)
        failed_consecutive = s.queue_task('demo8', retry_failed=2, repeats=2, period=1)
        self.db.commit()
        self.writefunction(r"""
def demo2():
    1/0

def demo8():
    placeholder = os.path.join(request.folder, 'private', 'demo8.pholder')
    with open(placeholder, 'a') as g:
        g.write('\nplaceholder for demo8 created')
    num_of_lines = 0
    with open(placeholder) as f:
        num_of_lines = len([a for a in f.read().split('\n') if a])
    print('number of lines', num_of_lines)
    if num_of_lines <= 2:
       1/0
    else:
        os.unlink(placeholder)
    return 1
""")
        ret = self.exec_sched()
        # process finished just fine
        self.assertEqual(ret, 0)
        # failed - checks
        task, task_run = self.fetch_results(s, failed)
        res = [
            ("task status failed", task.status == 'FAILED'),
            ("task times_run is 0", task.times_run == 0),
            ("task times_failed is 2", task.times_failed == 2),
            ("task ran 2 times only", len(task_run) == 2),
            ("scheduler_run records are FAILED", (task_run[0].status == task_run[1].status == 'FAILED')),
            ("period is respected", (task_run[1].start_time > task_run[0].start_time + datetime.timedelta(seconds=task.period)))
        ]
        self.exec_asserts(res, 'FAILED')

        # failed consecutive - checks
        task, task_run = self.fetch_results(s, failed_consecutive)
        res = [
            ("task status completed", task.status == 'COMPLETED'),
            ("task times_run is 2", task.times_run == 2),
            ("task times_failed is 0", task.times_failed == 0),
            ("task ran 6 times", len(task_run) == 6),
            ("scheduler_run records for COMPLETED is 2", len([run.status for run in task_run if run.status == 'COMPLETED']) == 2),
            ("scheduler_run records for FAILED is 4", len([run.status for run in task_run if run.status == 'FAILED']) == 4),
        ]
        self.exec_asserts(res, 'FAILED_CONSECUTIVE')
    def testRetryFailed(self):
        s = Scheduler(self.db)
        failed = s.queue_task('demo2', retry_failed=1, period=1)
        failed_consecutive = s.queue_task('demo8', retry_failed=2, repeats=2, period=1)
        self.db.commit()
        self.writefunction(r"""
def demo2():
    1/0

def demo8():
    placeholder = os.path.join(request.folder, 'private', 'demo8.pholder')
    with open(placeholder, 'a') as g:
        g.write('\nplaceholder for demo8 created')
    num_of_lines = 0
    with open(placeholder) as f:
        num_of_lines = len([a for a in f.read().split('\n') if a])
    print('number of lines', num_of_lines)
    if num_of_lines <= 2:
       1/0
    else:
        os.unlink(placeholder)
    return 1
""")
        ret = self.exec_sched()
        # process finished just fine
        self.assertEqual(ret, 0)
        # failed - checks
        task, task_run = self.fetch_results(s, failed)
        res = [
            ("task status failed", task.status == 'FAILED'),
            ("task times_run is 0", task.times_run == 0),
            ("task times_failed is 2", task.times_failed == 2),
            ("task ran 2 times only", len(task_run) == 2),
            ("scheduler_run records are FAILED", (task_run[0].status == task_run[1].status == 'FAILED')),
            ("period is respected", (task_run[1].start_time >= task_run[0].start_time + datetime.timedelta(seconds=task.period)))
        ]
        self.exec_asserts(res, 'FAILED')

        # failed consecutive - checks
        task, task_run = self.fetch_results(s, failed_consecutive)
        res = [
            ("task status completed", task.status == 'COMPLETED'),
            ("task times_run is 2", task.times_run == 2),
            ("task times_failed is 0", task.times_failed == 0),
            ("task ran 6 times", len(task_run) == 6),
            ("scheduler_run records for COMPLETED is 2", len([run.status for run in task_run if run.status == 'COMPLETED']) == 2),
            ("scheduler_run records for FAILED is 4", len([run.status for run in task_run if run.status == 'FAILED']) == 4),
        ]
        self.exec_asserts(res, 'FAILED_CONSECUTIVE')
Example #4
0
    def testJobGraph(self):
        s = Scheduler(self.db)
        myjob = JobGraph(self.db, 'job_1')
        fname = 'foo'
        # We have a few items to wear, and there's an "order" to respect...
        # Items are: watch, jacket, shirt, tie, pants, undershorts, belt, shoes, socks
        # Now, we can't put on the tie without wearing the shirt first, etc...
        watch = s.queue_task(fname, task_name='watch')
        jacket = s.queue_task(fname, task_name='jacket')
        shirt = s.queue_task(fname, task_name='shirt')
        tie = s.queue_task(fname, task_name='tie')
        pants = s.queue_task(fname, task_name='pants')
        undershorts = s.queue_task(fname, task_name='undershorts')
        belt = s.queue_task(fname, task_name='belt')
        shoes = s.queue_task(fname, task_name='shoes')
        socks = s.queue_task(fname, task_name='socks')
        # before the tie, comes the shirt
        myjob.add_deps(tie.id, shirt.id)
        # before the belt too comes the shirt
        myjob.add_deps(belt.id, shirt.id)
        # before the jacket, comes the tie
        myjob.add_deps(jacket.id, tie.id)
        # before the belt, come the pants
        myjob.add_deps(belt.id, pants.id)
        # before the shoes, comes the pants
        myjob.add_deps(shoes.id, pants.id)
        # before the pants, comes the undershorts
        myjob.add_deps(pants.id, undershorts.id)
        # before the shoes, comes the undershorts
        myjob.add_deps(shoes.id, undershorts.id)
        # before the jacket, comes the belt
        myjob.add_deps(jacket.id, belt.id)
        # before the shoes, comes the socks
        myjob.add_deps(shoes.id, socks.id)

        ## results in the following topological sort
        # 9,3,6 --> 4,5 --> 8,7 --> 2
        # socks, shirt, undershorts
        # tie, pants
        # shoes, belt
        # jacket
        known_toposort = [
            set([socks.id, shirt.id, undershorts.id]),
            set([tie.id, pants.id]),
            set([shoes.id, belt.id]),
            set([jacket.id])
        ]
        toposort = myjob.validate('job_1')
        self.assertEqual(toposort, known_toposort)
        # add a cyclic dependency, jacket to undershorts
        myjob.add_deps(undershorts.id, jacket.id)
        # no exceptions raised, but result None
        self.assertEqual(myjob.validate('job_1'), None)
Example #5
0
 def testJobGraphDifferentJobs(self):
     s = Scheduler(self.db)
     myjob1 = JobGraph(self.db, 'job_1')
     myjob2 = JobGraph(self.db, 'job_2')
     fname = 'foo'
     # We have a few items to wear, and there's an "order" to respect...
     # Items are: watch, jacket, shirt, tie, pants, undershorts, belt, shoes, socks
     # Now, we can't put on the tie without wearing the shirt first, etc...
     watch = s.queue_task(fname, task_name='watch')
     jacket = s.queue_task(fname, task_name='jacket')
     shirt = s.queue_task(fname, task_name='shirt')
     tie = s.queue_task(fname, task_name='tie')
     pants = s.queue_task(fname, task_name='pants')
     undershorts = s.queue_task(fname, task_name='undershorts')
     belt = s.queue_task(fname, task_name='belt')
     shoes = s.queue_task(fname, task_name='shoes')
     socks = s.queue_task(fname, task_name='socks')
     # before the tie, comes the shirt
     myjob1.add_deps(tie.id, shirt.id)
     # before the belt too comes the shirt
     myjob1.add_deps(belt.id, shirt.id)
     # before the jacket, comes the tie
     myjob1.add_deps(jacket.id, tie.id)
     # before the belt, come the pants
     myjob1.add_deps(belt.id, pants.id)
     # before the shoes, comes the pants
     myjob2.add_deps(shoes.id, pants.id)
     # before the pants, comes the undershorts
     myjob2.add_deps(pants.id, undershorts.id)
     # before the shoes, comes the undershorts
     myjob2.add_deps(shoes.id, undershorts.id)
     # before the jacket, comes the belt
     myjob2.add_deps(jacket.id, belt.id)
     # before the shoes, comes the socks
     myjob2.add_deps(shoes.id, socks.id)
     # every job by itself can be completed
     self.assertNotEqual(myjob1.validate('job_1'), None)
     self.assertNotEqual(myjob1.validate('job_2'), None)
     # and, implicitly, every queued task can be too
     self.assertNotEqual(myjob1.validate(), None)
     # add a cyclic dependency, jacket to undershorts
     myjob2.add_deps(undershorts.id, jacket.id)
     # every job can still be completed by itself
     self.assertNotEqual(myjob1.validate('job_1'), None)
     self.assertNotEqual(myjob1.validate('job_2'), None)
     # but trying to see if every task will ever be completed fails
     self.assertEqual(myjob2.validate(), None)
Example #6
0
    def __init__(self):

        migrate = current.deployment_settings.get_base_migrate()
        tasks = current.response.s3.tasks

        # Instantiate Scheduler
        try:
            from gluon.scheduler import Scheduler
        except ImportError:
            # Warning should already have been given by eden_update_check.py
            self.scheduler = None
        else:
            self.scheduler = Scheduler(current.db,
                                       tasks,
                                       migrate = migrate,
                                       #use_spawn = True # Possible subprocess method with Py3
                                       )
Example #7
0
    def testQueue_Task(self):
        def isnotqueued(result):
            self.assertEqual(result.id, None)
            self.assertEqual(result.uuid, None)
            self.assertEqual(len(result.errors.keys()) > 0, True)

        def isqueued(result):
            self.assertNotEqual(result.id, None)
            self.assertNotEqual(result.uuid, None)
            self.assertEqual(len(result.errors.keys()), 0)

        s = Scheduler(self.db)
        fname = 'foo'
        watch = s.queue_task(fname, task_name='watch')
        # queuing a task returns id, errors, uuid
        self.assertEqual(set(watch.keys()), set(['id', 'uuid', 'errors']))
        # queueing nothing isn't allowed
        self.assertRaises(TypeError, s.queue_task, *[])
        # passing pargs and pvars wrongly
        # # pargs as dict
        isnotqueued(s.queue_task(fname, dict(a=1), dict(b=1)))
        # # pvars as list
        isnotqueued(s.queue_task(fname, ['foo', 'bar'], ['foo', 'bar']))
        # two tasks with the same uuid won't be there
        isqueued(s.queue_task(fname, uuid='a'))
        isnotqueued(s.queue_task(fname, uuid='a'))
Example #8
0
    def testNoReturn_and_Timeout_and_Progress(self):
        s = Scheduler(self.db)
        noret1 = s.queue_task('demo5')
        noret2 = s.queue_task('demo3')
        timeout1 = s.queue_task('demo4', timeout=5)
        timeout2 = s.queue_task('demo4')
        progress = s.queue_task('demo6', sync_output=2)
        self.db.commit()
        self.writefunction(r"""
def demo3():
    time.sleep(15)
    print(1/0)
    return None

def demo4():
    time.sleep(15)
    print("I'm printing something")
    return dict(a=1, b=2)

def demo5():
    time.sleep(15)
    print("I'm printing something")
    rtn = dict(a=1, b=2)

def demo6():
    time.sleep(5)
    print('50%')
    time.sleep(5)
    print('!clear!100%')
    return 1
""")
        ret = self.exec_sched()
        self.assertEqual(ret, 0)
        # noreturn check
        task1, task_run1 = self.fetch_results(s, noret1)
        task2, task_run2 = self.fetch_results(s, noret2)
        res = [
            ("tasks no_returns1 completed", task1.status == 'COMPLETED'),
            ("tasks no_returns2 failed", task2.status == 'FAILED'),
            ("no_returns1 doesn't have a scheduler_run record", len(task_run1) == 0),
            ("no_returns2 has a scheduler_run record FAILED", (len(task_run2) == 1 and task_run2[0].status == 'FAILED')),
        ]
        self.exec_asserts(res, 'NO_RETURN')

        # timeout check
        task1 = s.task_status(timeout1.id, output=True)
        task2 = s.task_status(timeout2.id, output=True)
        res = [
            ("tasks timeouts1 timeoutted", task1.scheduler_task.status == 'TIMEOUT'),
            ("tasks timeouts2 completed", task2.scheduler_task.status == 'COMPLETED')
        ]
        self.exec_asserts(res, 'TIMEOUT')

        # progress check
        task1 = s.task_status(progress.id, output=True)
        res = [
            ("tasks percentages completed", task1.scheduler_task.status == 'COMPLETED'),
            ("output contains only 100%", task1.scheduler_run.run_output.strip() == "100%")
        ]
        self.exec_asserts(res, 'PROGRESS')
Example #9
0
    def testRegressions(self):
        s = Scheduler(self.db)
        huge_result = s.queue_task('demo10', retry_failed=1, period=1)
        issue_1485 = s.queue_task('issue_1485')
        termination = s.queue_task('termination')
        self.db.commit()
        self.writefunction(r"""
def demo10():
    res = 'a' * 99999
    return dict(res=res)

def issue_1485():
    return response.render('issue_1485.html', dict(variable='abc'))
""")
        self.writeview(r"""<span>{{=variable}}</span>""", 'issue_1485.html')
        ret = self.exec_sched()
        # process finished just fine
        self.assertEqual(ret, 0)
        # huge_result - checks
        task_huge = s.task_status(huge_result.id, output=True)
        res = [
            ("task status completed", task_huge.scheduler_task.status == 'COMPLETED'),
            ("task times_run is 1", task_huge.scheduler_task.times_run == 1),
            ("result is the correct one", task_huge.result == dict(res='a' * 99999))
        ]
        self.exec_asserts(res, 'HUGE_RESULT')

        task_issue_1485 = s.task_status(issue_1485.id, output=True)
        res = [
            ("task status completed", task_issue_1485.scheduler_task.status == 'COMPLETED'),
            ("task times_run is 1", task_issue_1485.scheduler_task.times_run == 1),
            ("result is the correct one", task_issue_1485.result == '<span>abc</span>')
        ]
        self.exec_asserts(res, 'issue_1485')
Example #10
0
 def testJobGraphDifferentJobs(self):
     s = Scheduler(self.db)
     myjob1 = JobGraph(self.db, 'job_1')
     myjob2 = JobGraph(self.db, 'job_2')
     fname = 'foo'
     # We have a few items to wear, and there's an "order" to respect...
     # Items are: watch, jacket, shirt, tie, pants, undershorts, belt, shoes, socks
     # Now, we can't put on the tie without wearing the shirt first, etc...
     watch = s.queue_task(fname, task_name='watch')
     jacket = s.queue_task(fname, task_name='jacket')
     shirt = s.queue_task(fname, task_name='shirt')
     tie = s.queue_task(fname, task_name='tie')
     pants = s.queue_task(fname, task_name='pants')
     undershorts = s.queue_task(fname, task_name='undershorts')
     belt = s.queue_task(fname, task_name='belt')
     shoes = s.queue_task(fname, task_name='shoes')
     socks = s.queue_task(fname, task_name='socks')
     # before the tie, comes the shirt
     myjob1.add_deps(tie.id, shirt.id)
     # before the belt too comes the shirt
     myjob1.add_deps(belt.id, shirt.id)
     # before the jacket, comes the tie
     myjob1.add_deps(jacket.id, tie.id)
     # before the belt, come the pants
     myjob1.add_deps(belt.id, pants.id)
     # before the shoes, comes the pants
     myjob2.add_deps(shoes.id, pants.id)
     # before the pants, comes the undershorts
     myjob2.add_deps(pants.id, undershorts.id)
     # before the shoes, comes the undershorts
     myjob2.add_deps(shoes.id, undershorts.id)
     # before the jacket, comes the belt
     myjob2.add_deps(jacket.id, belt.id)
     # before the shoes, comes the socks
     myjob2.add_deps(shoes.id, socks.id)
     # every job by itself can be completed
     self.assertNotEqual(myjob1.validate('job_1'), None)
     self.assertNotEqual(myjob1.validate('job_2'), None)
     # and, implicitly, every queued task can be too
     self.assertNotEqual(myjob1.validate(), None)
     # add a cyclic dependency, jacket to undershorts
     myjob2.add_deps(undershorts.id, jacket.id)
     # every job can still be completed by itself
     self.assertNotEqual(myjob1.validate('job_1'), None)
     self.assertNotEqual(myjob1.validate('job_2'), None)
     # but trying to see if every task will ever be completed fails
     self.assertEqual(myjob2.validate(), None)
Example #11
0
    def testHugeResult(self):
        s = Scheduler(self.db)
        huge_result = s.queue_task('demo10', retry_failed=1, period=1)
        self.db.commit()
        self.writefunction(r"""
def demo10():
    res = 'a' * 99999
    return dict(res=res)
""")
        ret = self.exec_sched()
        # process finished just fine
        self.assertEqual(ret, 0)
        # huge_result - checks
        task = s.task_status(huge_result.id, output=True)
        res = [
            ("task status completed", task.scheduler_task.status == 'COMPLETED'),
            ("task times_run is 1", task.scheduler_task.times_run == 1),
            ("result is the correct one", task.result == dict(res='a' * 99999))
        ]
        self.exec_asserts(res, 'HUGE_RESULT')
    def testHugeResult(self):
        s = Scheduler(self.db)
        huge_result = s.queue_task('demo10', retry_failed=1, period=1)
        self.db.commit()
        self.writefunction(r"""
def demo10():
    res = 'a' * 99999
    return dict(res=res)
""")
        ret = self.exec_sched()
        # process finished just fine
        self.assertEqual(ret, 0)
        # huge_result - checks
        task = s.task_status(huge_result.id, output=True)
        res = [("task status completed",
                task.scheduler_task.status == 'COMPLETED'),
               ("task times_run is 1", task.scheduler_task.times_run == 1),
               ("result is the correct one",
                task.result == dict(res='a' * 99999))]
        self.exec_asserts(res, 'HUGE_RESULT')
Example #13
0
 def testJobGraphFailing(self):
     s = Scheduler(self.db)
     myjob = JobGraph(self.db, 'job_1')
     fname = 'foo'
     # We have a few items to wear, and there's an "order" to respect...
     # Items are: watch, jacket, shirt, tie, pants, undershorts, belt, shoes, socks
     # Now, we can't put on the tie without wearing the shirt first, etc...
     watch = s.queue_task(fname, task_name='watch')
     jacket = s.queue_task(fname, task_name='jacket')
     shirt = s.queue_task(fname, task_name='shirt')
     tie = s.queue_task(fname, task_name='tie')
     pants = s.queue_task(fname, task_name='pants')
     undershorts = s.queue_task(fname, task_name='undershorts')
     belt = s.queue_task(fname, task_name='belt')
     shoes = s.queue_task(fname, task_name='shoes')
     socks = s.queue_task(fname, task_name='socks')
     # before the tie, comes the shirt
     myjob.add_deps(tie.id, shirt.id)
     # before the belt too comes the shirt
     myjob.add_deps(belt.id, shirt.id)
     # before the jacket, comes the tie
     myjob.add_deps(jacket.id, tie.id)
     # before the belt, come the pants
     myjob.add_deps(belt.id, pants.id)
     # before the shoes, comes the pants
     myjob.add_deps(shoes.id, pants.id)
     # before the pants, comes the undershorts
     myjob.add_deps(pants.id, undershorts.id)
     # before the shoes, comes the undershorts
     myjob.add_deps(shoes.id, undershorts.id)
     # before the jacket, comes the belt
     myjob.add_deps(jacket.id, belt.id)
     # before the shoes, comes the socks
     myjob.add_deps(shoes.id, socks.id)
     # add a cyclic dependency, jacket to undershorts
     myjob.add_deps(undershorts.id, jacket.id)
     # no exceptions raised, but result None
     self.assertEqual(myjob.validate('job_1'), None)
     # and no deps added
     deps_inserted = self.db(self.db.scheduler_task_deps.id > 0).count()
     self.assertEqual(deps_inserted, 0)
Example #14
0
    def testQueue_Task(self):

        def isnotqueued(result):
            self.assertEqual(result.id, None)
            self.assertEqual(result.uuid, None)
            self.assertEqual(len(list(result.errors.keys())) > 0, True)

        def isqueued(result):
            self.assertNotEqual(result.id, None)
            self.assertNotEqual(result.uuid, None)
            self.assertEqual(len(list(result.errors.keys())), 0)

        s = Scheduler(self.db)
        fname = 'foo'
        watch = s.queue_task(fname, task_name='watch')
        # queuing a task returns id, errors, uuid
        self.assertEqual(set(watch.keys()), set(['id', 'uuid', 'errors']))
        # queueing nothing isn't allowed
        self.assertRaises(TypeError, s.queue_task, *[])
        # passing pargs and pvars wrongly
        # # pargs as dict
        isnotqueued(s.queue_task(fname, dict(a=1), dict(b=1)))
        # # pvars as list
        isnotqueued(s.queue_task(fname, ['foo', 'bar'], ['foo', 'bar']))
        # two tasks with the same uuid won't be there
        isqueued(s.queue_task(fname, uuid='a'))
        isnotqueued(s.queue_task(fname, uuid='a'))
Example #15
0
    def testRegressions(self):
        s = Scheduler(self.db)
        huge_result = s.queue_task('demo10', retry_failed=1, period=1)
        issue_1485 = s.queue_task('issue_1485')
        termination = s.queue_task('termination')
        self.db.commit()
        self.writefunction(r"""
def demo10():
    res = 'a' * 99999
    return dict(res=res)

def issue_1485():
    return response.render('issue_1485.html', dict(variable='abc'))
""")
        self.writeview(r"""<span>{{=variable}}</span>""", 'issue_1485.html')
        ret = self.exec_sched()
        # process finished just fine
        self.assertEqual(ret, 0)
        # huge_result - checks
        task_huge = s.task_status(huge_result.id, output=True)
        res = [
            ("task status completed", task_huge.scheduler_task.status == 'COMPLETED'),
            ("task times_run is 1", task_huge.scheduler_task.times_run == 1),
            ("result is the correct one", task_huge.result == dict(res='a' * 99999))
        ]
        self.exec_asserts(res, 'HUGE_RESULT')

        task_issue_1485 = s.task_status(issue_1485.id, output=True)
        res = [
            ("task status completed", task_issue_1485.scheduler_task.status == 'COMPLETED'),
            ("task times_run is 1", task_issue_1485.scheduler_task.times_run == 1),
            ("result is the correct one", task_issue_1485.result == '<span>abc</span>')
        ]
        self.exec_asserts(res, 'issue_1485')
Example #16
0
    def __init__(self):

        migrate = current.deployment_settings.get_base_migrate()
        tasks = current.response.s3.tasks

        # Instantiate Scheduler
        try:
            from gluon.scheduler import Scheduler
        except:
            # Warning should already have been given by eden_update_check.py
            self.scheduler = None
        else:
            self.scheduler = Scheduler(current.db, tasks, migrate=migrate)
Example #17
0
    def testDrift_and_env_and_immediate(self):
        s = Scheduler(self.db)
        immediate = s.queue_task('demo1', ['a', 'b'],
                                 dict(c=1, d=2),
                                 immediate=True)
        env = s.queue_task('demo7')
        drift = s.queue_task('demo1', ['a', 'b'],
                             dict(c=1, d=2),
                             period=93,
                             prevent_drift=True)
        termination = s.queue_task('termination')
        self.db.commit()
        self.writefunction(r"""
def demo1(*args,**vars):
    print('you passed args=%s and vars=%s' % (args, vars))
    return args[0]
import random
def demo7():
    time.sleep(random.randint(1,5))
    print(W2P_TASK, request.now)
    return W2P_TASK.id, W2P_TASK.uuid, W2P_TASK.run_id
""")
        ret = self.exec_sched()
        self.assertEqual(ret, 0)
        # immediate check, can only check that nothing breaks
        task1 = s.task_status(immediate.id)
        res = [
            ("tasks status completed", task1.status == 'COMPLETED'),
        ]
        self.exec_asserts(res, 'IMMEDIATE')

        # drift check
        task, task_run = self.fetch_results(s, drift)
        res = [("task status completed", task.status == 'COMPLETED'),
               ("next_run_time is exactly start_time + period",
                (task.next_run_time == task.start_time +
                 datetime.timedelta(seconds=task.period)))]
        self.exec_asserts(res, 'DRIFT')

        # env check
        task1 = s.task_status(env.id, output=True)
        res = [
            ("task %s returned W2P_TASK correctly" % (task1.scheduler_task.id),
             task1.result == [
                 task1.scheduler_task.id, task1.scheduler_task.uuid,
                 task1.scheduler_run.id
             ]),
        ]
        self.exec_asserts(res, 'ENV')
Example #18
0
    def testRepeats_and_Expired_and_Prio(self):
        s = Scheduler(self.db)
        repeats = s.queue_task('demo1', ['a', 'b'],
                               dict(c=1, d=2),
                               repeats=2,
                               period=5)
        a_while_ago = datetime.datetime.now() - datetime.timedelta(seconds=60)
        expired = s.queue_task('demo4', stop_time=a_while_ago)
        prio1 = s.queue_task('demo1', ['scheduled_first'])
        prio2 = s.queue_task('demo1', ['scheduled_second'],
                             next_run_time=a_while_ago)
        self.db.commit()
        self.writefunction(r"""
def demo1(*args,**vars):
    print('you passed args=%s and vars=%s' % (args, vars))
    return args[0]

def demo4():
    time.sleep(15)
    print("I'm printing something")
    return dict(a=1, b=2)
""")
        ret = self.exec_sched()
        self.assertEqual(ret, 0)
        # repeats check
        task, task_run = self.fetch_results(s, repeats)
        res = [("task status completed", task.status == 'COMPLETED'),
               ("task times_run is 2", task.times_run == 2),
               ("task ran 2 times only", len(task_run) == 2),
               ("scheduler_run records are COMPLETED ",
                (task_run[0].status == task_run[1].status == 'COMPLETED')),
               ("period is respected",
                (task_run[1].start_time > task_run[0].start_time +
                 datetime.timedelta(seconds=task.period)))]
        self.exec_asserts(res, 'REPEATS')

        # expired check
        task, task_run = self.fetch_results(s, expired)
        res = [("task status expired", task.status == 'EXPIRED'),
               ("task times_run is 0", task.times_run == 0),
               ("task didn't run at all", len(task_run) == 0)]
        self.exec_asserts(res, 'EXPIRATION')

        # prio check
        task1 = s.task_status(prio1.id, output=True)
        task2 = s.task_status(prio2.id, output=True)
        res = [("tasks status completed", task1.scheduler_task.status ==
                task2.scheduler_task.status == 'COMPLETED'),
               ("priority2 was executed before priority1",
                task1.scheduler_run.id > task2.scheduler_run.id)]
        self.exec_asserts(res, 'PRIORITY')
Example #19
0
 def testJobGraphFailing(self):
     s = Scheduler(self.db)
     myjob = JobGraph(self.db, 'job_1')
     fname = 'foo'
     # We have a few items to wear, and there's an "order" to respect...
     # Items are: watch, jacket, shirt, tie, pants, undershorts, belt, shoes, socks
     # Now, we can't put on the tie without wearing the shirt first, etc...
     watch = s.queue_task(fname, task_name='watch')
     jacket = s.queue_task(fname, task_name='jacket')
     shirt = s.queue_task(fname, task_name='shirt')
     tie = s.queue_task(fname, task_name='tie')
     pants = s.queue_task(fname, task_name='pants')
     undershorts = s.queue_task(fname, task_name='undershorts')
     belt = s.queue_task(fname, task_name='belt')
     shoes = s.queue_task(fname, task_name='shoes')
     socks = s.queue_task(fname, task_name='socks')
     # before the tie, comes the shirt
     myjob.add_deps(tie.id, shirt.id)
     # before the belt too comes the shirt
     myjob.add_deps(belt.id, shirt.id)
     # before the jacket, comes the tie
     myjob.add_deps(jacket.id, tie.id)
     # before the belt, come the pants
     myjob.add_deps(belt.id, pants.id)
     # before the shoes, comes the pants
     myjob.add_deps(shoes.id, pants.id)
     # before the pants, comes the undershorts
     myjob.add_deps(pants.id, undershorts.id)
     # before the shoes, comes the undershorts
     myjob.add_deps(shoes.id, undershorts.id)
     # before the jacket, comes the belt
     myjob.add_deps(jacket.id, belt.id)
     # before the shoes, comes the socks
     myjob.add_deps(shoes.id, socks.id)
     # add a cyclic dependency, jacket to undershorts
     myjob.add_deps(undershorts.id, jacket.id)
     # no exceptions raised, but result None
     self.assertEqual(myjob.validate('job_1'), None)
     # and no deps added
     deps_inserted = self.db(self.db.scheduler_task_deps.id>0).count()
     self.assertEqual(deps_inserted, 0)
Example #20
0
    def testRetryFailed(self):
        s = Scheduler(self.db)
        failed = s.queue_task('demo2', retry_failed=1, period=5)
        failed_consecutive = s.queue_task('demo8', retry_failed=2, repeats=2, period=5)
        self.db.commit()
        self.writefunction(r"""

def demo2():
    1/0

def demo8():
    placeholder = os.path.join(request.folder, 'private', 'demo8.pholder')
    with open(placeholder, 'a') as g:
        g.write('\nplaceholder for demo8 created')
    num_of_lines = 0
    with open(placeholder) as f:
        num_of_lines = len([a for a in f.read().split('\n') if a])
    print 'number of lines', num_of_lines
    if num_of_lines <= 2:
       1/0
    else:
        os.unlink(placeholder)
    return 1
""")
        ret = self.exec_sched()
        # process finished just fine
        self.assertEqual(ret, 0)
        # failed - checks
        info = s.task_status(failed.id)
        task_runs = self.db(self.db.scheduler_run.task_id == info.id).select()
        res = [
            ("task status failed", info.status == 'FAILED'),
            ("task times_run is 0", info.times_run == 0),
            ("task times_failed is 2", info.times_failed == 2),
            ("task ran 2 times only", len(task_runs) == 2),
            ("scheduler_run records are FAILED", (task_runs[0].status == task_runs[1].status == 'FAILED')),
            ("period is respected", (task_runs[1].start_time > task_runs[0].start_time + datetime.timedelta(seconds=info.period)))
        ]
        for a in res:
            self.assertEqual(a[1], True, msg=a[0])

        # failed consecutive - checks
        info = s.task_status(failed_consecutive.id)
        task_runs = self.db(self.db.scheduler_run.task_id == info.id).select()
        res = [
            ("task status completed", info.status == 'COMPLETED'),
            ("task times_run is 2", info.times_run == 2),
            ("task times_failed is 0", info.times_failed == 0),
            ("task ran 6 times", len(task_runs) == 6),
            ("scheduler_run records for COMPLETED is 2", len([run.status for run in task_runs if run.status == 'COMPLETED']) == 2),
            ("scheduler_run records for FAILED is 4", len([run.status for run in task_runs if run.status == 'FAILED']) == 4),
        ]
        for a in res:
            self.assertEqual(a[1], True, msg=a[0])
Example #21
0
 def testTask_Status(self):
     s = Scheduler(self.db)
     fname = 'foo'
     watch = s.queue_task(fname, task_name='watch')
     # fetch status by id
     by_id = s.task_status(watch.id)
     # fetch status by uuid
     by_uuid = s.task_status(watch.uuid)
     # fetch status by query
     by_query = s.task_status(self.db.scheduler_task.function_name == 'foo')
     self.assertEqual(by_id, by_uuid)
     self.assertEqual(by_id, by_query)
     # fetch status by anything else throws
     self.assertRaises(SyntaxError, s.task_status, *[[1, 2]])
     # adding output returns the joined set, plus "result"
     rtn = s.task_status(watch.id, output=True)
     self.assertEqual(set(rtn.keys()), set(['scheduler_run', 'scheduler_task', 'result']))
Example #22
0
    def testRepeats_and_Expired_and_Prio(self):
        s = Scheduler(self.db)
        repeats = s.queue_task('demo1', ['a', 'b'], dict(c=1, d=2), repeats=2, period=5)
        a_while_ago = datetime.datetime.now() - datetime.timedelta(seconds=60)
        expired = s.queue_task('demo4', stop_time=a_while_ago)
        prio1 = s.queue_task('demo1', ['scheduled_first'])
        prio2 = s.queue_task('demo1', ['scheduled_second'], next_run_time=a_while_ago)
        self.db.commit()
        self.writefunction(r"""
def demo1(*args,**vars):
    print('you passed args=%s and vars=%s' % (args, vars))
    return args[0]

def demo4():
    time.sleep(15)
    print("I'm printing something")
    return dict(a=1, b=2)
""")
        ret = self.exec_sched()
        self.assertEqual(ret, 0)
        # repeats check
        task, task_run = self.fetch_results(s, repeats)
        res = [
            ("task status completed", task.status == 'COMPLETED'),
            ("task times_run is 2", task.times_run == 2),
            ("task ran 2 times only", len(task_run) == 2),
            ("scheduler_run records are COMPLETED ", (task_run[0].status == task_run[1].status == 'COMPLETED')),
            ("period is respected", (task_run[1].start_time > task_run[0].start_time + datetime.timedelta(seconds=task.period)))
        ]
        self.exec_asserts(res, 'REPEATS')

        # expired check
        task, task_run = self.fetch_results(s, expired)
        res = [
            ("task status expired", task.status == 'EXPIRED'),
            ("task times_run is 0", task.times_run == 0),
            ("task didn't run at all", len(task_run) == 0)
        ]
        self.exec_asserts(res, 'EXPIRATION')

        # prio check
        task1 = s.task_status(prio1.id, output=True)
        task2 = s.task_status(prio2.id, output=True)
        res = [
            ("tasks status completed", task1.scheduler_task.status == task2.scheduler_task.status == 'COMPLETED'),
            ("priority2 was executed before priority1" , task1.scheduler_run.id > task2.scheduler_run.id)
        ]
        self.exec_asserts(res, 'PRIORITY')
Example #23
0
 def testTask_Status(self):
     s = Scheduler(self.db)
     fname = 'foo'
     watch = s.queue_task(fname, task_name='watch')
     # fetch status by id
     by_id = s.task_status(watch.id)
     # fetch status by uuid
     by_uuid = s.task_status(watch.uuid)
     # fetch status by query
     by_query = s.task_status(self.db.scheduler_task.function_name == 'foo')
     self.assertEqual(by_id, by_uuid)
     self.assertEqual(by_id, by_query)
     # fetch status by anything else throws
     self.assertRaises(SyntaxError, s.task_status, *[[1, 2]])
     # adding output returns the joined set, plus "result"
     rtn = s.task_status(watch.id, output=True)
     self.assertEqual(set(rtn.keys()), set(['scheduler_run', 'scheduler_task', 'result']))
Example #24
0
    def testDrift_and_env_and_immediate(self):
        s = Scheduler(self.db)
        immediate = s.queue_task('demo1', ['a', 'b'], dict(c=1, d=2), immediate=True)
        env = s.queue_task('demo7')
        drift = s.queue_task('demo1', ['a', 'b'], dict(c=1, d=2), period=93, prevent_drift=True)
        termination = s.queue_task('termination')
        self.db.commit()
        self.writefunction(r"""
def demo1(*args,**vars):
    print('you passed args=%s and vars=%s' % (args, vars))
    return args[0]
import random
def demo7():
    time.sleep(random.randint(1,5))
    print(W2P_TASK, request.now)
    return W2P_TASK.id, W2P_TASK.uuid, W2P_TASK.run_id
""")
        ret = self.exec_sched()
        self.assertEqual(ret, 0)
        # immediate check, can only check that nothing breaks
        task1 = s.task_status(immediate.id)
        res = [
            ("tasks status completed", task1.status == 'COMPLETED'),
        ]
        self.exec_asserts(res, 'IMMEDIATE')

        # drift check
        task, task_run = self.fetch_results(s, drift)
        res = [
            ("task status completed", task.status == 'COMPLETED'),
            ("next_run_time is exactly start_time + period", (task.next_run_time == task.start_time + datetime.timedelta(seconds=task.period)))
        ]
        self.exec_asserts(res, 'DRIFT')

        # env check
        task1 = s.task_status(env.id, output=True)
        res = [
            ("task %s returned W2P_TASK correctly" % (task1.scheduler_task.id),  task1.result == [task1.scheduler_task.id, task1.scheduler_task.uuid, task1.scheduler_run.id]),
        ]
        self.exec_asserts(res, 'ENV')
Example #25
0
# Enable the scheduler
from gluon.scheduler import Scheduler

scheduler = Scheduler(
    db_scheduler,
    max_empty_runs=0,
    heartbeat=3,
    group_names=[
        'process_videos', 'create_home_directory', 'wamap_delete',
        'wamap_videos', 'misc', "download_videos"
    ],
    tasks=dict(
        process_media_file=process_media_file,
        process_wamap_video_links=process_wamap_video_links,
        create_home_directory=create_home_directory,
        remove_old_wamap_video_files=remove_old_wamap_video_files,
        download_wamap_qimages=download_wamap_qimages,
        refresh_all_ad_logins=refresh_all_ad_logins,
        update_media_database_from_json_files=
        update_media_database_from_json_files,
        pull_youtube_video=pull_youtube_video,
        update_document_database_from_json_files=
        update_document_database_from_json_files,
        flush_redis_keys=flush_redis_keys,
        pull_youtube_caption=pull_youtube_caption,
    ))
current.scheduler = scheduler

# Make sure to run the ad login refresh every hour or so
refresh_ad_login = current.cache.ram('refresh_ad_login',
Example #26
0
import os, os.path
import sys
import re
from paver.easy import sh
import logging
from pkg_resources import resource_string, resource_filename

import caliper
import requests, json, sys

from datetime import datetime

rslogger = logging.getLogger(settings.sched_logger)
rslogger.setLevel(settings.log_level)

scheduler = Scheduler(db, migrate='runestone_')

################
## This task will run as a scheduled task using the web2py scheduler.
## It's dispached from build() and build_custom() in controllers/designer.py
################
def run_sphinx(rvars=None, folder=None, application=None, http_host=None, base_course=None):
    # workingdir is the application folder
    workingdir = folder
    # sourcedir holds the all sources temporarily
    sourcedir = path.join(workingdir, 'build', rvars['projectname'])

    rslogger.debug("Starting to build {}".format(rvars['projectname']))

    # create the custom_courses dir if it doesn't already exist
    if not os.path.exists(path.join(workingdir, 'custom_courses')):
Example #27
0
    def testNoReturn_and_Timeout_and_Progress(self):
        s = Scheduler(self.db)
        noret1 = s.queue_task('demo5')
        noret2 = s.queue_task('demo3')
        timeout1 = s.queue_task('demo4', timeout=5)
        timeout2 = s.queue_task('demo4')
        progress = s.queue_task('demo6', sync_output=2)
        termination = s.queue_task('termination')
        self.db.commit()
        self.writefunction(r"""
def demo3():
    time.sleep(3)
    print(1/0)
    return None

def demo4():
    time.sleep(15)
    print("I'm printing something")
    return dict(a=1, b=2)

def demo5():
    time.sleep(3)
    print("I'm printing something")
    rtn = dict(a=1, b=2)

def demo6():
    time.sleep(5)
    print('50%')
    time.sleep(5)
    print('!clear!100%')
    return 1
""")
        ret = self.exec_sched()
        self.assertEqual(ret, 0)
        # noreturn check
        task1, task_run1 = self.fetch_results(s, noret1)
        task2, task_run2 = self.fetch_results(s, noret2)
        res = [
            ("tasks no_returns1 completed", task1.status == 'COMPLETED'),
            ("tasks no_returns2 failed", task2.status == 'FAILED'),
            ("no_returns1 doesn't have a scheduler_run record",
             len(task_run1) == 0),
            ("no_returns2 has a scheduler_run record FAILED",
             (len(task_run2) == 1 and task_run2[0].status == 'FAILED')),
        ]
        self.exec_asserts(res, 'NO_RETURN')

        # timeout check
        task1 = s.task_status(timeout1.id, output=True)
        task2 = s.task_status(timeout2.id, output=True)
        res = [("tasks timeouts1 timeoutted",
                task1.scheduler_task.status == 'TIMEOUT'),
               ("tasks timeouts2 completed",
                task2.scheduler_task.status == 'COMPLETED')]
        self.exec_asserts(res, 'TIMEOUT')

        # progress check
        task1 = s.task_status(progress.id, output=True)
        res = [("tasks percentages completed",
                task1.scheduler_task.status == 'COMPLETED'),
               ("output contains only 100%",
                task1.scheduler_run.run_output.strip() == "100%")]
        self.exec_asserts(res, 'PROGRESS')
Example #28
0
    aggregateRows = aggregateRows.astype(float)
    #print "computing knn"
    knnOut = aggregateRows[:, 0]
    knnIn = aggregateRows[:,1:]
    knn = neighbors.KNeighborsClassifier( weights='distance',metric='minkowski')
    knn.fit(knnIn, knnOut) 
    for row in db(db.user_interests.user_id == userid).select():
        aRow = []
        for field in db.knnRef.fields[2:]:
               aRow.append(row[field])
    #print aRow
    #print len(aRow)
    aRow = np.array(aRow[1:])
    aRow = aRow.astype(float)
    clusterId =  knn.predict(aRow)
    nRow = db(db.user_clusters.user_id == userid).count()
    if nRow:
        temp = int(clusterId[0])
        db(db.user_clusters.user_id == userid).update(cluster_id = temp )
    else: 
        db.user_clusters.insert(user_id=userid, cluster_id=int(clusterId[0] ))
    return 0
    
    
#knnSelect(8)
#knnMake()
#make_clusters()
from gluon.scheduler import Scheduler
scheduler = Scheduler(db)
scheduler.queue_task(make_clusters, repeats=0, start_time=datetime.datetime.now(), period=86400)
Example #29
0
    for i in xrange(0,active_host_no):	
        host_ip_list.append(active_host_list[i].private_ip)
        host_name_list.append(active_host_name[i].host_name)
    logger.debug( host_ip_list)
    logger.debug( host_name_list)
    collect_data_from_host(host_ip_list,host_name_list)
    logger.debug("collected host networking data")    
     
# Defining scheduler tasks
from gluon.scheduler import Scheduler
vm_scheduler = Scheduler(db, tasks=dict(vm_task=process_task_queue, 
                                        clone_task=process_clone_task,
                                        snapshot_vm=process_snapshot_vm,
                                        vm_sanity=vm_sanity_check,
                                        vnc_access=check_vnc_access,
                                        host_sanity=host_sanity_check,
                                        vm_util_rrd=vm_utilization_rrd,
                                        vm_daily_checks=process_vmdaily_checks,
                                        vm_purge_unused=process_unusedvm_purge,
                    					memory_overload=overload_memory,
                    					networking_host=host_networking), 
                             group_names=['vm_task', 'vm_sanity', 'host_task', 'vm_rrd', 'snapshot_task'])


midnight_time = request.now.replace(hour=23, minute=59, second=59)

vm_scheduler.queue_task(TASK_SNAPSHOT, 
                    pvars = dict(snapshot_type = SNAPSHOT_DAILY),
                    repeats = 0, # run indefinitely
                    start_time = midnight_time, 
                    period = 24 * HOURS, # every 24h
                    timeout = 5 * MINUTES,
Example #30
0
# License Content: Creative Commons Attribution 3.0
#
# Also visit: www.web2py.com
# or Groups: http://groups.google.com/group/web2py
# For details on the web framework used for this development
#
# With thanks to Guido, Massimo and many other that make this sort of thing
# much easier than it used to be

from ndsfunctions import score_question, resulthtml, truncquest, getrundates
import datetime
from ndspermt import get_exclude_groups

from gluon.scheduler import Scheduler

scheduler = Scheduler(db, heartbeat=15)


def activity(id=0,
             resend=False,
             period='Week',
             format='html',
             source='default'):
    # This will be triggered from runactivity function below which figures out if
    # this needs to be run and on success rolls the run date forward for the next
    # period this just formats the message and formats for sending via email

    db = current.db

    if id > 0:
        rows = db(db.email_runs.id == id).select()
Example #31
0

def import_all_nexpose_vulndata(overwrite=False, nexpose_server={}):
    """
    Import all vulnerability data from Nexpose
    """
    from skaldship.nexpose import import_all_vulndata

    import_all_vulndata(overwrite=overwrite, nexpose_server=nexpose_server)
    return True


##-------------------------------------------------------
def connect_exploits():
    """
    Process Nexpose exploits.xml file into the database
    """
    from skaldship.exploits import connect_exploits

    connect_exploits()
    return True


##----------------------------------------------------------------------------

scheduler = Scheduler(
    db=db,
    migrate=settings.migrate,
    group_names=[settings.scheduler_group_name],
)
Example #32
0
# -*- coding: utf-8 -*-
from gluon.scheduler import Scheduler

scheduler = Scheduler(db, heartbeat=int(myconf.get('scheduler.heartbeat')))


def __schedule_daemon_tasks():
    for t in DAEMON_TASKS:
        __schedule_daemon_task(t)


def __midnight(dt):
    from dateutil.relativedelta import relativedelta
    return (dt + relativedelta(days=1)).replace(hour=0,
                                                minute=0,
                                                second=0,
                                                microsecond=0)


def __schedule_daemon_task(task_tuple):
    task_name = task_tuple[0]
    task_period = task_tuple[1]
    tasks = db(db.scheduler_task.function_name == task_name).count()
    if not tasks:
        now = datetime.datetime.now()
        ttime = __midnight(now) if (task_name in MIDNIGHT_TASKS) else now
        t = scheduler.queue_task(
            task_name,
            start_time=ttime,
            next_run_time=ttime,
            pvars={},
Example #33
0
from gluon.scheduler import Scheduler

scheduler = Scheduler(db)


def send_email():
    # rows = db.courier().select(orderby=~db.courier.arrival_date)
    # temp = []
    # for row in rows:
    #      if row.taken == False:
    #          temp.append(row)
    # print temp[0]
    hisemail = "*****@*****.**"  ###############change it to the mail of user

    sub = "New parcel.IIIT courier portal"
    msg = "You have new Parcels . Collect it form Nilgiri"
    if mail:
        if mail.send(to=[hisemail], subject=sub, message=msg):
            response.flash = 'email sent successfully.'
        else:
            response.flash = 'fail to send email sorry!'
    else:
        response.flash = 'Unable to send the email : email parameters not defined'


scheduler.queue_task(send_email)
Example #34
0
from gluon.scheduler import Scheduler
from datetime import datetime, timedelta
from signal import SIGKILL
import os

def build_monitor(): 
	running_builds = db((db.current_builds.finished==False)).select()
	for build in running_builds:
		# check which of the running builds are running too long
		if (build.start_time + timedelta(seconds=MAX_BUILD_TIME)) < datetime.now():
			if build.PID == None:
				print 'The build with the ', build.id, 'has not started yet but has already timed out. It is probably garbage.'
			else:
				# kill the build. the build module will resume and take care of cleanup, etc.
				print 'The build', build.id, 'has timed out!'
				os.kill(build.PID, SIGKILL)

	return True

TASK_UUID = '29aa3d33-1f7b-4d11-a589-75afa399a4e9'

# initiate scheduler
scheduler = Scheduler(db, discard_results=False, heartbeat=1)


# build_monitor task - drop and reinsert to avoid time stamp conflicts
scheduler.queue_task('build_monitor', task_name='build_monitor', repeats=0, period=2, timeout=2, uuid=TASK_UUID, retry_failed=-1)
Example #35
0
    # here: don't. It will prevent the job from completing, this probably is a
    # scheduler bug. See https://www.pythonanywhere.com/forums/topic/744/
    return ret


def clean_imagedir():
    # cleans images that are older than config.keep_images hours
    from clean_imagedir import cleanup
    ret = cleanup()
    return ret


# start the scheduler
scheduler = Scheduler(
    db_scheduler,
    discard_results=True,
    heartbeat=settings.scheduler['heartbeat'],
)

# check if the set_failed task is there, else insert it
sched_failed = cache.ram(
    'sched_failed',
    lambda: db_scheduler(
        db_scheduler.scheduler_task.task_name == "set_failed"
    ).select().first(),
    time_expire=60
)
if not sched_failed:
    scheduler.queue_task(
        set_failed,
        task_name="set_failed",
Example #36
0
response.meta.author = configuration.get('app.author')
response.meta.description = configuration.get('app.description')
response.meta.keywords = configuration.get('app.keywords')
response.meta.generator = configuration.get('app.generator')

# -------------------------------------------------------------------------
# your http://google.com/analytics id
# -------------------------------------------------------------------------
response.google_analytics_id = configuration.get('google.analytics_id')

# -------------------------------------------------------------------------
# maybe use the scheduler
# -------------------------------------------------------------------------

from gluon.scheduler import Scheduler
scheduler = Scheduler(db)

# -------------------------------------------------------------------------
# Define your tables below (or better in another model file) for example
#
# >>> db.define_table('mytable', Field('myfield', 'string'))
#
# Fields can be 'string','text','password','integer','double','boolean'
#       'date','time','datetime','blob','upload', 'reference TABLENAME'
# There is an implicit 'id integer autoincrement' field
# Consult manual for more options, validators, etc.
#
# More API examples for controllers:
#
# >>> db.mytable.insert(myfield='value')
# >>> rows = db(db.mytable.myfield == 'value').select(db.mytable.ALL)
Example #37
0
    ## if NOT running on Google App Engine use SQLite or other DB
    db = DAL('sqlite://storage.sqlite',
             pool_size=1,
             lazy_tables=True,
             migrate=True)
    db_sched = DAL('sqlite://storage_scheduler.sqlite')
else:
    ## connect to Google BigTable (optional 'google:datastore://namespace')
    db = DAL('google:datastore')
    ## store sessions and tickets there
    ## or store session in Memcache, Redis, etc.
    ## from gluon.contrib.memdb import MEMDB
    ## from google.appengine.api.memcache import Client
    ## session.connect(request, response, db = MEMDB(Client()))

scheduler = Scheduler(db_sched)

session.connect(request, response, db=db)

## by default give a view/generic.extension to all actions from localhost
## none otherwise. a pattern can be 'controller/function.extension'
# response.generic_patterns = ['*'] if request.is_local else ['*.rss']
response.generic_patterns = ['*.rss']
## (optional) optimize handling of static files
#response.optimize_css = 'concat,minify,inline'
#response.optimize_js = 'concat,minify,inline'

from gluon.tools import Auth, Crud, Service, PluginManager, prettydate

auth = Auth(db)
crud, service, plugins = Crud(db), Service(), PluginManager()
Example #38
0
response.meta.keywords = configuration.get("app.keywords")
response.meta.generator = configuration.get("app.generator")
response.show_toolbar = configuration.get("app.toolbar")

# -------------------------------------------------------------------------
# your http://google.com/analytics id
# -------------------------------------------------------------------------
response.google_analytics_id = configuration.get("google.analytics_id")

# -------------------------------------------------------------------------
# maybe use the scheduler
# -------------------------------------------------------------------------
if configuration.get("scheduler.enabled"):
    from gluon.scheduler import Scheduler

    scheduler = Scheduler(db, heartbeat=configuration.get("scheduler.heartbeat"))

# -------------------------------------------------------------------------
# Define your tables below (or better in another model file) for example
#
# >>> db.define_table('mytable', Field('myfield', 'string'))
#
# Fields can be 'string','text','password','integer','double','boolean'
#       'date','time','datetime','blob','upload', 'reference TABLENAME'
# There is an implicit 'id integer autoincrement' field
# Consult manual for more options, validators, etc.
#
# More API examples for controllers:
#
# >>> db.mytable.insert(myfield='value')
# >>> rows = db(db.mytable.myfield == 'value').select(db.mytable.ALL)
from gluon.scheduler import Scheduler
from module_admin_functions import scan_box

# The scheduler is loaded and defined in a model, so that it can register the
# required tables with the database. The functions are defined in separate modules.

# Load the scheduler and set the task names. With only daily tasks, a slower
# heartbeat is fine, but with dataset checking, a snappier response is needed,
# so the default 3 second heartbeat is used. Note that individual queue tasks
# can set immediate=TRUE to get prompter running of a task, but that still might
# wait for one or two heartbeats to actually run.

scheduler = Scheduler(db, tasks=dict(scan_box=scan_box))

# These tasks then need to be queued using scheduler.queue_task or manually via
# the appadmin interface. Don't do it here as they'll be queued every time the
# model runs, which is basically every time a webpage is loaded! So,
# programatically, they can go in a controller which an admin can run once to
# get a defined set of queues going.
Example #40
0
from gluon.scheduler import Scheduler
scheduler = Scheduler(db, migrate=False)

from applications.vtraffic.modules.tools import EPOCH_M
from datetime import timedelta


def test_write():
	db.define_table('test_write',
		Field('value', 'integer' ),
		migrate=True
	)
	insert_id = db.test_write.insert(value=123)
	n_insert  = db(db.test_write).count()
	db.commit()
	return (insert_id, n_insert)

## For each possible origin/destination couple finds the matches
def run_all():
	print 'start'
	stations = db(db.station.id).select(db.station.id, orderby=db.station.id)
	total = 0
	for o in stations:
		for d in stations:
			if o.id != d.id:
				matches = find_matches(o.id, d.id)
				__save_match(matches)
				total   += len(matches)
				query = (db.match.station_id_orig == o.id) & (db.match.station_id_dest == d.id)
				#__get_blocks_scheduler(query, 900, reset_cache=True)
Example #41
0
            user_exist = db(db.user_extra.auth_id==user['id']).select()  # number of all exist auth_users in user_extra table
            timeline_table = db(db.timeline.user_extra_id==user['id']).select()
            now_time = datetime.now()
            if status and len(user_exist):
                if  not len(timeline_table) or timeline_table[-1]['end_time']:  # if not exist end_time record
                    logger.debug('Insert')
                    db.timeline.insert(week_day=now_time.strftime('%A %d %b'),
                                       user_extra_id=user['id'],
                                       start_time=now_time.isoformat())
                    db.commit()
                else:
                    continue
            elif len(user_exist):
                if (len(timeline_table) and
                        timeline_table[-1]['start_time'] and
                        not timeline_table[-1]['end_time']):
                    logger.debug('Update')
                    timeline_table[-1].end_time=now_time.isoformat()
                    timeline_table[-1].update_record()
        elif type == 'facebook':
            pass

    return True or False

# def write_to_db():
#     if check_online(user_id):
from gluon.scheduler import Scheduler
scheduler = Scheduler(db)

# scheduler.queue_task(task_add,pvars=dict(a=1,b=2))
scheduler.queue_task(check_online())
Example #42
0
        rvars.get('downloads_enabled', 'false'),
        'enable_chatcodes':
        'false',
    }
    if 'loginreq' in rvars:
        opts['login_req'] = 'true'
    else:
        opts['login_req'] = 'false'
    if 'python3' in rvars:
        opts['python3'] = 'true'
    else:
        opts['python3'] = 'false'
    if 'allowpairs' in rvars:
        opts['allow_pairs'] = 'true'
    else:
        opts['allow_pairs'] = 'false'
    if 'short_name' in rvars:
        opts['short_name'] = short_name.replace(' ', '\\ ')
    else:
        opts['short_name'] = rvars['projectname']

    opts['dest'] = '../../static'

    paver_stuff = paver_stuff % opts
    with open(path.join(sourcedir, 'pavement.py'), 'w') as fp:
        fp.write(paver_stuff)


if settings.academy_mode:
    scheduler = Scheduler(db, migrate='runestone_')
Example #43
0
            db2(tasks_to_delete).delete()
            db(db.global_settings.kkey == 'operation_key').delete()
            db.commit()
            db2.commit()
        except:
            rtn.append('exception')
            db.rollback()
            db2.rollback()

    return rtn


myscheduler = Scheduler(db2,
                        dict(check_season=check_season,
                             create_path=create_path,
                             add_series=add_series,
                             update_single_series=update_single_series,
                             bit_actualizer=bit_actualizer,
                             check_subs=check_season_subs,
                             down_epbanners=down_epbanners,
                             down_sebanners=down_sebanners,
                             update=update,
                             maintenance=maintenance,
                             ep_metadata=ep_metadata,
                             queue_torrents=queue_torrents,
                             down_torrents=down_torrents,
                             scoop_season=scoop_season,
                             series_metadata=series_metadata,
                             the_boss=the_boss),
                        migrate=MIGRATE)
else:
    sch_db = db


def check_classes_status():
    classes = db(Class.status != 2).select()
    log_in_file("Checking for open classes...", path="/tmp/scheduler.log")
    for course_class in classes:
        if int(course_class.status) == 1 and course_class.end_date < request.now.date():
            log_in_file("Course %s being closed." % course_class.course.id, path="/tmp/scheduler.log")
            course_class.update_record(status=2)
        elif int(course_class.status) == 3 and course_class.start_date <= request.now.date():
            log_in_file("Course %s in progress." % course_class.course.id, path="/tmp/scheduler.log")
            course_class.update_record(status=1)
    db.commit()
    log_in_file("All status updated!", path="/tmp/scheduler.log")


scheduler = Scheduler(sch_db, tasks=dict(check_classes_status=check_classes_status))

if sch_db(sch_db.scheduler_task).count() == 0:
    ## do check_classes_status once by day (86400 seconds = 24 hours)
    ## repeats = 0 means it will repeat forever
    ## it starts at midnight after you have created it
    import datetime

    today_midnight = datetime.datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
    start_time = today_midnight + datetime.timedelta(days=1)
    sched = scheduler.queue_task("check_classes_status", start_time=start_time, period=86400, repeats=0)
    log_in_file("New scheduler created: ID %d" % sched.id, path="/tmp/scheduler.log")
Example #45
0
response.meta.description = configuration.get('app.description')
response.meta.keywords = configuration.get('app.keywords')
response.meta.generator = configuration.get('app.generator')
response.show_toolbar = configuration.get('app.toolbar')

# -------------------------------------------------------------------------
# your http://google.com/analytics id
# -------------------------------------------------------------------------
response.google_analytics_id = configuration.get('google.analytics_id')

# -------------------------------------------------------------------------
# maybe use the scheduler
# -------------------------------------------------------------------------
if configuration.get('scheduler.enabled'):
    from gluon.scheduler import Scheduler
    scheduler = Scheduler(db,
                          heartbeat=configuration.get('scheduler.heartbeat'))

# -------------------------------------------------------------------------
# Define your tables below (or better in another model file) for example
#
# >>> db.define_table('mytable', Field('myfield', 'string'))
#
# Fields can be 'string','text','password','integer','double','boolean'
#       'date','time','datetime','blob','upload', 'reference TABLENAME'
# There is an implicit 'id integer autoincrement' field
# Consult manual for more options, validators, etc.
#
# More API examples for controllers:
#
# >>> db.mytable.insert(myfield='value')
# >>> rows = db(db.mytable.myfield == 'value').select(db.mytable.ALL)
    try:
        with gzip.open(backup_file, 'wb') as backup:
            db.export_to_csv_file(backup)
    except Exception as error:
        logger.exception(str(error))
        raise
    logger.info('Backup completed')
    return True


def delete_sessions():
    single_loop(auth.settings.expiration)


scheduler = Scheduler(db)

# schedule the backup
if not db((db.scheduler_task.task_name == 'db_export')).select():
    scheduler.queue_task(db_export,
                         pvars={},
                         timeout=60 * 10,
                         period=((60*60) * backup_hours),
                         repeats=0,
                         retry_failed=5)

# schedule the cleaning of the sessions
if not db((db.scheduler_task.task_name == 'delete_sessions')).select():
    scheduler.queue_task(delete_sessions,
                         pvars={},
                         timeout=60 * 10,
Example #47
0
            ) + " change from when you last checked. \n Here are the closing prices of the stocks you are currently following: \n"
            for x in follow_list:
                follow_string += "Ticker: " + x[0] + " Closing Price: " + str(
                    x[1]) + "\n"
            mail = auth.settings.mailer
            mail.settings.server = 'smtp.gmail.com:587'
            mail.settings.sender = '*****@*****.**'
            mail.settings.login = '******'
            mail.send(
                to=user.email,
                subject='Your Daily Stock Information Courtesy of SlugStock',
                message=(follow_string))


def nextday():
    day = db(db.day.day != None).select().first()
    day.update_record(day=(day.day + 1))
    db(db.recent.day <= (day.day - 5)).delete()
    db.commit()


from gluon.scheduler import Scheduler
scheduler = Scheduler(db,
                      tasks=dict(email=email_daily,
                                 updatePrices=updateYahooPrices,
                                 emergency_email=emergency_email,
                                 clear=remove_completed_tasks,
                                 nextday=nextday,
                                 csv_read=csv_read,
                                 csv_daily=csv_daily))
Example #48
0
class S3Task(object):
    """ Asynchronous Task Execution """

    TASK_TABLENAME = "scheduler_task"

    # -------------------------------------------------------------------------
    def __init__(self):

        migrate = current.deployment_settings.get_base_migrate()
        tasks = current.response.s3.tasks

        # Instantiate Scheduler
        try:
            from gluon.scheduler import Scheduler
        except ImportError:
            # Warning should already have been given by eden_update_check.py
            self.scheduler = None
        else:
            self.scheduler = Scheduler(
                current.db,
                tasks,
                migrate=migrate,
                #use_spawn = True # Possible subprocess method with Py3
            )

    # -------------------------------------------------------------------------
    def configure_tasktable_crud(
        self,
        task=None,
        function=None,
        args=None,
        vars=None,
        period=3600,  # seconds, so 1 hour
        status_writable=False,
    ):
        """
            Configure the task table for interactive CRUD,
            setting defaults, widgets and hiding unnecessary fields

            @param task: the task name (will use a UUID if omitted)
            @param function: the function name (won't hide if omitted)
            @param args: the function position arguments
            @param vars: the function named arguments
            @param period: the default period for tasks
            @param status_writable: make status and next run time editable
        """

        T = current.T
        NONE = current.messages["NONE"]
        UNLIMITED = T("unlimited")

        tablename = self.TASK_TABLENAME
        table = current.db[tablename]

        # Configure start/stop time fields
        for fn in ("start_time", "stop_time"):
            field = table[fn]
            field.represent = lambda dt: \
                            S3DateTime.datetime_represent(dt, utc=True)
            set_min = set_max = None
            if fn == "start_time":
                field.requires = IS_UTC_DATETIME()
                set_min = "#scheduler_task_stop_time"
            elif fn == "stop_time":
                field.requires = IS_EMPTY_OR(IS_UTC_DATETIME())
                set_max = "#scheduler_task_start_time"
            field.widget = S3CalendarWidget(
                past=0,
                set_min=set_min,
                set_max=set_max,
                timepicker=True,
            )

        # Task name (default use UUID)
        if task is None:
            from uuid import uuid4
            task = str(uuid4())
        field = table.task_name
        field.default = task
        field.readable = field.writable = False

        # Function (default+hide if specified as parameter)
        if function:
            field = table.function_name
            field.default = function
            field.readable = field.writable = False

        # Args and vars
        if isinstance(args, list):
            field = table.args
            field.default = json.dumps(args)
            field.readable = field.writable = False
        else:
            field.default = "[]"
        if isinstance(vars, dict):
            field = table.vars
            field.default = json.dumps(vars)
            field.readable = field.writable = False
        else:
            field.default = {}

        # Fields which are always editable
        field = table.repeats
        field.label = T("Repeat")
        field.comment = T("times (0 = unlimited)")
        field.default = 0
        field.represent = lambda opt: \
            opt and "%s %s" % (opt, T("times")) or \
            opt == 0 and UNLIMITED or \
            NONE

        field = table.period
        field.label = T("Run every")
        field.default = period
        field.widget = S3TimeIntervalWidget.widget
        field.requires = IS_INT_IN_RANGE(0, None)
        field.represent = S3TimeIntervalWidget.represent
        field.comment = None

        table.timeout.default = 600
        table.timeout.represent = lambda opt: \
                                    opt and "%s %s" % (opt, T("seconds")) or \
                                    opt == 0 and UNLIMITED or \
                                    NONE

        # Always use "default" controller (web2py uses current controller),
        # otherwise the anonymous worker does not pass the controller
        # permission check and gets redirected to login before it reaches
        # the task function which does the s3_impersonate
        field = table.application_name
        field.default = "%s/default" % current.request.application
        field.readable = field.writable = False

        # Hidden fields
        hidden = (
            "uuid",
            "broadcast",
            "group_name",
            "times_run",
            "assigned_worker_name",
            "sync_output",
            "times_failed",
            "cronline",
        )
        for fn in hidden:
            table[fn].readable = table[fn].writable = False

        # Optionally editable fields
        fields = ("next_run_time", "status", "prevent_drift")
        for fn in fields:
            table[fn].readable = table[fn].writable = status_writable

        list_fields = [
            "id", "enabled", "start_time", "repeats", "period",
            (T("Last run"), "last_run_time"), (T("Last status"), "status"),
            (T("Next run"), "next_run_time"), "stop_time"
        ]
        if not function:
            list_fields[1:1] = ["task_name", "function_name"]

        current.s3db.configure(
            tablename,
            list_fields=list_fields,
        )

        response = current.response
        if response:
            response.s3.crud_strings[tablename] = Storage(
                label_create=T("Create Job"),
                title_display=T("Job Details"),
                title_list=T("Job Schedule"),
                title_update=T("Edit Job"),
                label_list_button=T("List Jobs"),
                msg_record_created=T("Job added"),
                msg_record_modified=T("Job updated"),
                msg_record_deleted=T("Job deleted"),
                msg_list_empty=T("No jobs configured yet"),
                msg_no_match=T("No jobs configured"))

    # -------------------------------------------------------------------------
    # API Function run within the main flow of the application
    # -------------------------------------------------------------------------
    def run_async(self, task, args=None, vars=None, timeout=300):
        """
            Wrapper to call an asynchronous task.
            - run from the main request

            @param task: The function which should be run
                         - async if a worker is alive
            @param args: The list of unnamed args to send to the function
            @param vars: The list of named vars to send to the function
            @param timeout: The length of time available for the task to complete
                            - default 300s (5 mins)
        """

        if args is None:
            args = []
        if vars is None:
            vars = {}

        # Check that task is defined (and callable)
        tasks = current.response.s3.tasks
        if not tasks or not callable(tasks.get(task)):
            return False

        # Check that args/vars are JSON-serializable
        try:
            json.dumps(args)
        except (ValueError, TypeError):
            msg = "S3Task.run_async args not JSON-serializable: %s" % args
            current.log.error(msg)
            raise
        try:
            json.dumps(vars)
        except (ValueError, TypeError):
            msg = "S3Task.run_async vars not JSON-serializable: %s" % vars
            current.log.error(msg)
            raise

        # Run synchronously if scheduler not running
        if not self._is_alive():
            tasks[task](*args, **vars)
            return None  # No task ID in this case

        # Queue the task (async)
        try:
            # Add the current user to the vars
            vars["user_id"] = current.auth.user.id
        except AttributeError:
            pass
        queued = self.scheduler.queue_task(task,
                                           pargs = args,
                                           pvars = vars,
                                           application_name = "%s/default" % \
                                                              current.request.application,
                                           function_name = task,
                                           timeout = timeout,
                                           )

        # Return task ID so that status can be polled
        return queued.id

    # -------------------------------------------------------------------------
    def schedule_task(
            self,
            task,
            args=None,  # args to pass to the task
            vars=None,  # vars to pass to the task
            function_name=None,
            start_time=None,
            next_run_time=None,
            stop_time=None,
            repeats=None,
            retry_failed=None,
            period=None,
            timeout=None,
            enabled=None,  # None = Enabled
            group_name=None,
            ignore_duplicate=False,
            sync_output=0):
        """
            Schedule a task in web2py Scheduler

            @param task: name of the function/task to be scheduled
            @param args: args to be passed to the scheduled task
            @param vars: vars to be passed to the scheduled task
            @param function_name: function name (if different from task name)
            @param start_time: start_time for the scheduled task
            @param next_run_time: next_run_time for the the scheduled task
            @param stop_time: stop_time for the the scheduled task
            @param repeats: number of times the task to be repeated (0=unlimited)
            @param retry_failed: number of times the task to be retried (-1=unlimited)
            @param period: time period between two consecutive runs (seconds)
            @param timeout: set timeout for a running task
            @param enabled: enabled flag for the scheduled task
            @param group_name: group_name for the scheduled task
            @param ignore_duplicate: disable or enable duplicate checking
            @param sync_output: sync output every n seconds (0 = disable sync)
        """

        if args is None:
            args = []
        if vars is None:
            vars = {}

        if not ignore_duplicate and self._duplicate_task_exists(
                task, args, vars):
            # if duplicate task exists, do not insert a new one
            current.log.warning("Duplicate Task, Not Inserted", value=task)
            return False

        kwargs = {}

        if function_name is None:
            function_name = task

        # storing valid keyword arguments only if they are provided
        if start_time:
            kwargs["start_time"] = start_time

        if next_run_time:
            kwargs["next_run_time"] = next_run_time
        elif start_time:
            # default it to start_time
            kwargs["next_run_time"] = start_time

        if stop_time:
            kwargs["stop_time"] = stop_time
        elif start_time:
            # default it to one day ahead of given start_time
            if not isinstance(start_time, datetime.datetime):
                start_time = datetime.datetime.strptime(
                    start_time, "%Y-%m-%d %H:%M:%S")
            stop_time = start_time + datetime.timedelta(days=1)

        if repeats is not None:
            kwargs["repeats"] = repeats

        if retry_failed is not None:
            kwargs["retry_failed"] = retry_failed

        if period:
            kwargs["period"] = period

        if timeout:
            kwargs["timeout"] = timeout

        if enabled != None:
            # NB None => enabled
            kwargs["enabled"] = enabled

        if group_name:
            kwargs["group_name"] = group_name

        if sync_output != 0:
            kwargs["sync_output"] = sync_output

        auth = current.auth
        if auth.is_logged_in():
            # Add the current user to the vars
            vars["user_id"] = auth.user.id

        # Add to DB for pickup by Scheduler task
        # @ToDo: Switch to API: self.scheduler.queue_task()
        task_id = current.db.scheduler_task.insert(application_name = "%s/default" % \
                                                   current.request.application,
                                                   task_name = task,
                                                   function_name = function_name,
                                                   args = json.dumps(args),
                                                   vars = json.dumps(vars),
                                                   **kwargs)
        return task_id

    # -------------------------------------------------------------------------
    @staticmethod
    def _duplicate_task_exists(task, args, vars):
        """
            Checks if given task already exists in the Scheduler and both coincide
            with their execution time

            @param task: name of the task function
            @param args: the job position arguments (list)
            @param vars: the job named arguments (dict)
        """

        db = current.db
        ttable = db.scheduler_task

        args_json = json.dumps(args)

        query = ((ttable.function_name == task) & \
                 (ttable.args == args_json) & \
                 (ttable.status.belongs(["RUNNING", "QUEUED", "ALLOCATED"])))
        jobs = db(query).select(ttable.vars)
        for job in jobs:
            job_vars = json.loads(job.vars)
            if job_vars == vars:
                return True
        return False

    # -------------------------------------------------------------------------
    @staticmethod
    def _is_alive():
        """
            Returns True if there is at least 1 active worker to run scheduled tasks
            - run from the main request

            NB Can't run this 1/request at the beginning since the tables
               only get defined in zz_last
        """

        #if self.scheduler:
        #    return self.scheduler.is_alive()
        #else:
        #    return False

        db = current.db
        table = db.scheduler_worker

        now = datetime.datetime.now()
        offset = datetime.timedelta(minutes=1)

        query = (table.last_heartbeat > (now - offset))
        cache = current.response.s3.cache
        worker_alive = db(query).select(
            table.id,
            limitby=(0, 1),
            cache=cache,
        ).first()

        return True if worker_alive else False

    # -------------------------------------------------------------------------
    @staticmethod
    def reset(task_id):
        """
            Reset the status of a task to QUEUED after FAILED

            @param task_id: the task record ID
        """

        db = current.db
        ttable = db.scheduler_task

        query = (ttable.id == task_id) & (ttable.status == "FAILED")
        task = db(query).select(ttable.id, limitby=(0, 1)).first()
        if task:
            task.update_record(status="QUEUED")

    # =========================================================================
    # Functions run within the Task itself
    # =========================================================================
    @staticmethod
    def authenticate(user_id):
        """
            Activate the authentication passed from the caller to this new request
            - run from within the task

            NB This is so simple that we don't normally run via this API
               - this is just kept as an example of what needs to happen within the task
        """

        current.auth.s3_impersonate(user_id)
Example #49
0
File: db.py Project: jcoetzer/bars
response.meta.author = configuration.get('app.author')
response.meta.description = configuration.get('app.description')
response.meta.keywords = configuration.get('app.keywords')
response.meta.generator = configuration.get('app.generator')

# -------------------------------------------------------------------------
# your http://google.com/analytics id
# -------------------------------------------------------------------------
response.google_analytics_id = configuration.get('google.analytics_id')

# -------------------------------------------------------------------------
# maybe use the scheduler
# -------------------------------------------------------------------------
if configuration.get('scheduler.enabled'):
    from gluon.scheduler import Scheduler
    scheduler = Scheduler(db, heartbeat=configure.get('heartbeat'))

# -------------------------------------------------------------------------
# Define your tables below (or better in another model file) for example
#
# >>> db.define_table('mytable', Field('myfield', 'string'))
#
# Fields can be 'string','text','password','integer','double','boolean'
#       'date','time','datetime','blob','upload', 'reference TABLENAME'
# There is an implicit 'id integer autoincrement' field
# Consult manual for more options, validators, etc.
#
# More API examples for controllers:
#
# >>> db.mytable.insert(myfield='value')
# >>> rows = db(db.mytable.myfield == 'value').select(db.mytable.ALL)
    logger.debug( host_name_list)
    collect_data_from_host(host_ip_list,host_name_list)
    logger.debug("collected host networking data")    



     
# Defining scheduler tasks
from gluon.scheduler import Scheduler
vm_scheduler = Scheduler(db, tasks=dict(vm_task=process_task_queue, 
                                        clone_task=process_clone_task,
                                        snapshot_vm=process_snapshot_vm,
                                        vm_sanity=vm_sanity_check,
                                        vnc_access=check_vnc_access,
                                        host_sanity=host_sanity_check,
                                        vm_util_rrd=vm_utilization_rrd,
                                        vm_daily_checks=process_vmdaily_checks,
                                        vm_garbage_collector=process_unusedvm,
                                        memory_overload=overload_memory,
                		                networking_host=host_networking,
					                    rrd_task=task_rrd,
                                        vm_loadbalance=process_loadbalancer), 
                             group_names=['vm_task', 'vm_sanity', 'host_task', 'vm_rrd', 'snapshot_task','host_network'])


midnight_time = request.now.replace(hour=23, minute=59, second=59)

vm_scheduler.queue_task(TASK_SNAPSHOT, 
                    pvars = dict(snapshot_type = SNAPSHOT_DAILY),
                    repeats = 0, # run indefinitely
                    start_time = midnight_time, 
                    period = 24 * HOURS, # every 24h