Пример #1
0
    def testRegressions(self):
        s = Scheduler(self.db)
        huge_result = s.queue_task('demo10', retry_failed=1, period=1)
        issue_1485 = s.queue_task('issue_1485')
        termination = s.queue_task('termination')
        self.db.commit()
        self.writefunction(r"""
def demo10():
    res = 'a' * 99999
    return dict(res=res)

def issue_1485():
    return response.render('issue_1485.html', dict(variable='abc'))
""")
        self.writeview(r"""<span>{{=variable}}</span>""", 'issue_1485.html')
        ret = self.exec_sched()
        # process finished just fine
        self.assertEqual(ret, 0)
        # huge_result - checks
        task_huge = s.task_status(huge_result.id, output=True)
        res = [
            ("task status completed", task_huge.scheduler_task.status == 'COMPLETED'),
            ("task times_run is 1", task_huge.scheduler_task.times_run == 1),
            ("result is the correct one", task_huge.result == dict(res='a' * 99999))
        ]
        self.exec_asserts(res, 'HUGE_RESULT')

        task_issue_1485 = s.task_status(issue_1485.id, output=True)
        res = [
            ("task status completed", task_issue_1485.scheduler_task.status == 'COMPLETED'),
            ("task times_run is 1", task_issue_1485.scheduler_task.times_run == 1),
            ("result is the correct one", task_issue_1485.result == '<span>abc</span>')
        ]
        self.exec_asserts(res, 'issue_1485')
Пример #2
0
    def testQueue_Task(self):
        def isnotqueued(result):
            self.assertEqual(result.id, None)
            self.assertEqual(result.uuid, None)
            self.assertEqual(len(result.errors.keys()) > 0, True)

        def isqueued(result):
            self.assertNotEqual(result.id, None)
            self.assertNotEqual(result.uuid, None)
            self.assertEqual(len(result.errors.keys()), 0)

        s = Scheduler(self.db)
        fname = 'foo'
        watch = s.queue_task(fname, task_name='watch')
        # queuing a task returns id, errors, uuid
        self.assertEqual(set(watch.keys()), set(['id', 'uuid', 'errors']))
        # queueing nothing isn't allowed
        self.assertRaises(TypeError, s.queue_task, *[])
        # passing pargs and pvars wrongly
        # # pargs as dict
        isnotqueued(s.queue_task(fname, dict(a=1), dict(b=1)))
        # # pvars as list
        isnotqueued(s.queue_task(fname, ['foo', 'bar'], ['foo', 'bar']))
        # two tasks with the same uuid won't be there
        isqueued(s.queue_task(fname, uuid='a'))
        isnotqueued(s.queue_task(fname, uuid='a'))
Пример #3
0
    def testRetryFailed(self):
        s = Scheduler(self.db)
        failed = s.queue_task('demo2', retry_failed=1, period=1)
        failed_consecutive = s.queue_task('demo8',
                                          retry_failed=2,
                                          repeats=2,
                                          period=1)
        self.db.commit()
        self.writefunction(r"""
def demo2():
    1/0

def demo8():
    placeholder = os.path.join(request.folder, 'private', 'demo8.pholder')
    with open(placeholder, 'a') as g:
        g.write('\nplaceholder for demo8 created')
    num_of_lines = 0
    with open(placeholder) as f:
        num_of_lines = len([a for a in f.read().split('\n') if a])
    print('number of lines', num_of_lines)
    if num_of_lines <= 2:
       1/0
    else:
        os.unlink(placeholder)
    return 1
""")
        ret = self.exec_sched()
        # process finished just fine
        self.assertEqual(ret, 0)
        # failed - checks
        task, task_run = self.fetch_results(s, failed)
        res = [("task status failed", task.status == 'FAILED'),
               ("task times_run is 0", task.times_run == 0),
               ("task times_failed is 2", task.times_failed == 2),
               ("task ran 2 times only", len(task_run) == 2),
               ("scheduler_run records are FAILED",
                (task_run[0].status == task_run[1].status == 'FAILED')),
               ("period is respected",
                (task_run[1].start_time > task_run[0].start_time +
                 datetime.timedelta(seconds=task.period)))]
        self.exec_asserts(res, 'FAILED')

        # failed consecutive - checks
        task, task_run = self.fetch_results(s, failed_consecutive)
        res = [
            ("task status completed", task.status == 'COMPLETED'),
            ("task times_run is 2", task.times_run == 2),
            ("task times_failed is 0", task.times_failed == 0),
            ("task ran 6 times", len(task_run) == 6),
            ("scheduler_run records for COMPLETED is 2",
             len([run.status for run in task_run
                  if run.status == 'COMPLETED']) == 2),
            ("scheduler_run records for FAILED is 4",
             len([run.status for run in task_run
                  if run.status == 'FAILED']) == 4),
        ]
        self.exec_asserts(res, 'FAILED_CONSECUTIVE')
Пример #4
0
    def testRetryFailed(self):
        s = Scheduler(self.db)
        failed = s.queue_task('demo2', retry_failed=1, period=5)
        failed_consecutive = s.queue_task('demo8', retry_failed=2, repeats=2, period=5)
        self.db.commit()
        self.writefunction(r"""

def demo2():
    1/0

def demo8():
    placeholder = os.path.join(request.folder, 'private', 'demo8.pholder')
    with open(placeholder, 'a') as g:
        g.write('\nplaceholder for demo8 created')
    num_of_lines = 0
    with open(placeholder) as f:
        num_of_lines = len([a for a in f.read().split('\n') if a])
    print 'number of lines', num_of_lines
    if num_of_lines <= 2:
       1/0
    else:
        os.unlink(placeholder)
    return 1
""")
        ret = self.exec_sched()
        # process finished just fine
        self.assertEqual(ret, 0)
        # failed - checks
        info = s.task_status(failed.id)
        task_runs = self.db(self.db.scheduler_run.task_id == info.id).select()
        res = [
            ("task status failed", info.status == 'FAILED'),
            ("task times_run is 0", info.times_run == 0),
            ("task times_failed is 2", info.times_failed == 2),
            ("task ran 2 times only", len(task_runs) == 2),
            ("scheduler_run records are FAILED", (task_runs[0].status == task_runs[1].status == 'FAILED')),
            ("period is respected", (task_runs[1].start_time > task_runs[0].start_time + datetime.timedelta(seconds=info.period)))
        ]
        for a in res:
            self.assertEqual(a[1], True, msg=a[0])

        # failed consecutive - checks
        info = s.task_status(failed_consecutive.id)
        task_runs = self.db(self.db.scheduler_run.task_id == info.id).select()
        res = [
            ("task status completed", info.status == 'COMPLETED'),
            ("task times_run is 2", info.times_run == 2),
            ("task times_failed is 0", info.times_failed == 0),
            ("task ran 6 times", len(task_runs) == 6),
            ("scheduler_run records for COMPLETED is 2", len([run.status for run in task_runs if run.status == 'COMPLETED']) == 2),
            ("scheduler_run records for FAILED is 4", len([run.status for run in task_runs if run.status == 'FAILED']) == 4),
        ]
        for a in res:
            self.assertEqual(a[1], True, msg=a[0])
Пример #5
0
    def testJobGraph(self):
        s = Scheduler(self.db)
        myjob = JobGraph(self.db, 'job_1')
        fname = 'foo'
        # We have a few items to wear, and there's an "order" to respect...
        # Items are: watch, jacket, shirt, tie, pants, undershorts, belt, shoes, socks
        # Now, we can't put on the tie without wearing the shirt first, etc...
        watch = s.queue_task(fname, task_name='watch')
        jacket = s.queue_task(fname, task_name='jacket')
        shirt = s.queue_task(fname, task_name='shirt')
        tie = s.queue_task(fname, task_name='tie')
        pants = s.queue_task(fname, task_name='pants')
        undershorts = s.queue_task(fname, task_name='undershorts')
        belt = s.queue_task(fname, task_name='belt')
        shoes = s.queue_task(fname, task_name='shoes')
        socks = s.queue_task(fname, task_name='socks')
        # before the tie, comes the shirt
        myjob.add_deps(tie.id, shirt.id)
        # before the belt too comes the shirt
        myjob.add_deps(belt.id, shirt.id)
        # before the jacket, comes the tie
        myjob.add_deps(jacket.id, tie.id)
        # before the belt, come the pants
        myjob.add_deps(belt.id, pants.id)
        # before the shoes, comes the pants
        myjob.add_deps(shoes.id, pants.id)
        # before the pants, comes the undershorts
        myjob.add_deps(pants.id, undershorts.id)
        # before the shoes, comes the undershorts
        myjob.add_deps(shoes.id, undershorts.id)
        # before the jacket, comes the belt
        myjob.add_deps(jacket.id, belt.id)
        # before the shoes, comes the socks
        myjob.add_deps(shoes.id, socks.id)

        ## results in the following topological sort
        # 9,3,6 --> 4,5 --> 8,7 --> 2
        # socks, shirt, undershorts
        # tie, pants
        # shoes, belt
        # jacket
        known_toposort = [
            set([socks.id, shirt.id, undershorts.id]),
            set([tie.id, pants.id]),
            set([shoes.id, belt.id]),
            set([jacket.id])
        ]
        toposort = myjob.validate('job_1')
        self.assertEqual(toposort, known_toposort)
        # add a cyclic dependency, jacket to undershorts
        myjob.add_deps(undershorts.id, jacket.id)
        # no exceptions raised, but result None
        self.assertEqual(myjob.validate('job_1'), None)
Пример #6
0
    def testBasic(self):
        s = Scheduler(self.db)
        foo = s.queue_task('foo')
        self.db.commit()
        self.writefunction(r"""
def foo():
    return 'a'
""")
        ret = self.exec_sched()
        # process finished just fine
        self.assertEqual(ret, 0)
        info = s.task_status(foo.id, output=True)
        self.assertEqual(info.result, 'a')
Пример #7
0
    def __init__(self):

        migrate = current.deployment_settings.get_base_migrate()
        tasks = current.response.s3.tasks

        # Instantiate Scheduler
        try:
            from gluon.scheduler import Scheduler
        except:
            # Warning should already have been given by eden_update_check.py
            self.scheduler = None
        else:
            self.scheduler = Scheduler(current.db, tasks, migrate=migrate)
Пример #8
0
    def testRepeats_and_Expired_and_Prio(self):
        s = Scheduler(self.db)
        repeats = s.queue_task('demo1', ['a', 'b'],
                               dict(c=1, d=2),
                               repeats=2,
                               period=5)
        a_while_ago = datetime.datetime.now() - datetime.timedelta(seconds=60)
        expired = s.queue_task('demo4', stop_time=a_while_ago)
        prio1 = s.queue_task('demo1', ['scheduled_first'])
        prio2 = s.queue_task('demo1', ['scheduled_second'],
                             next_run_time=a_while_ago)
        self.db.commit()
        self.writefunction(r"""
def demo1(*args,**vars):
    print('you passed args=%s and vars=%s' % (args, vars))
    return args[0]

def demo4():
    time.sleep(15)
    print("I'm printing something")
    return dict(a=1, b=2)
""")
        ret = self.exec_sched()
        self.assertEqual(ret, 0)
        # repeats check
        task, task_run = self.fetch_results(s, repeats)
        res = [("task status completed", task.status == 'COMPLETED'),
               ("task times_run is 2", task.times_run == 2),
               ("task ran 2 times only", len(task_run) == 2),
               ("scheduler_run records are COMPLETED ",
                (task_run[0].status == task_run[1].status == 'COMPLETED')),
               ("period is respected",
                (task_run[1].start_time > task_run[0].start_time +
                 datetime.timedelta(seconds=task.period)))]
        self.exec_asserts(res, 'REPEATS')

        # expired check
        task, task_run = self.fetch_results(s, expired)
        res = [("task status expired", task.status == 'EXPIRED'),
               ("task times_run is 0", task.times_run == 0),
               ("task didn't run at all", len(task_run) == 0)]
        self.exec_asserts(res, 'EXPIRATION')

        # prio check
        task1 = s.task_status(prio1.id, output=True)
        task2 = s.task_status(prio2.id, output=True)
        res = [("tasks status completed", task1.scheduler_task.status ==
                task2.scheduler_task.status == 'COMPLETED'),
               ("priority2 was executed before priority1",
                task1.scheduler_run.id > task2.scheduler_run.id)]
        self.exec_asserts(res, 'PRIORITY')
Пример #9
0
    def testDrift_and_env_and_immediate(self):
        s = Scheduler(self.db)
        immediate = s.queue_task('demo1', ['a', 'b'],
                                 dict(c=1, d=2),
                                 immediate=True)
        env = s.queue_task('demo7')
        drift = s.queue_task('demo1', ['a', 'b'],
                             dict(c=1, d=2),
                             period=93,
                             prevent_drift=True)
        termination = s.queue_task('termination')
        self.db.commit()
        self.writefunction(r"""
def demo1(*args,**vars):
    print('you passed args=%s and vars=%s' % (args, vars))
    return args[0]
import random
def demo7():
    time.sleep(random.randint(1,5))
    print(W2P_TASK, request.now)
    return W2P_TASK.id, W2P_TASK.uuid, W2P_TASK.run_id
""")
        ret = self.exec_sched()
        self.assertEqual(ret, 0)
        # immediate check, can only check that nothing breaks
        task1 = s.task_status(immediate.id)
        res = [
            ("tasks status completed", task1.status == 'COMPLETED'),
        ]
        self.exec_asserts(res, 'IMMEDIATE')

        # drift check
        task, task_run = self.fetch_results(s, drift)
        res = [("task status completed", task.status == 'COMPLETED'),
               ("next_run_time is exactly start_time + period",
                (task.next_run_time == task.start_time +
                 datetime.timedelta(seconds=task.period)))]
        self.exec_asserts(res, 'DRIFT')

        # env check
        task1 = s.task_status(env.id, output=True)
        res = [
            ("task %s returned W2P_TASK correctly" % (task1.scheduler_task.id),
             task1.result == [
                 task1.scheduler_task.id, task1.scheduler_task.uuid,
                 task1.scheduler_run.id
             ]),
        ]
        self.exec_asserts(res, 'ENV')
Пример #10
0
 def testJobGraphDifferentJobs(self):
     s = Scheduler(self.db)
     myjob1 = JobGraph(self.db, 'job_1')
     myjob2 = JobGraph(self.db, 'job_2')
     fname = 'foo'
     # We have a few items to wear, and there's an "order" to respect...
     # Items are: watch, jacket, shirt, tie, pants, undershorts, belt, shoes, socks
     # Now, we can't put on the tie without wearing the shirt first, etc...
     watch = s.queue_task(fname, task_name='watch')
     jacket = s.queue_task(fname, task_name='jacket')
     shirt = s.queue_task(fname, task_name='shirt')
     tie = s.queue_task(fname, task_name='tie')
     pants = s.queue_task(fname, task_name='pants')
     undershorts = s.queue_task(fname, task_name='undershorts')
     belt = s.queue_task(fname, task_name='belt')
     shoes = s.queue_task(fname, task_name='shoes')
     socks = s.queue_task(fname, task_name='socks')
     # before the tie, comes the shirt
     myjob1.add_deps(tie.id, shirt.id)
     # before the belt too comes the shirt
     myjob1.add_deps(belt.id, shirt.id)
     # before the jacket, comes the tie
     myjob1.add_deps(jacket.id, tie.id)
     # before the belt, come the pants
     myjob1.add_deps(belt.id, pants.id)
     # before the shoes, comes the pants
     myjob2.add_deps(shoes.id, pants.id)
     # before the pants, comes the undershorts
     myjob2.add_deps(pants.id, undershorts.id)
     # before the shoes, comes the undershorts
     myjob2.add_deps(shoes.id, undershorts.id)
     # before the jacket, comes the belt
     myjob2.add_deps(jacket.id, belt.id)
     # before the shoes, comes the socks
     myjob2.add_deps(shoes.id, socks.id)
     # every job by itself can be completed
     self.assertNotEqual(myjob1.validate('job_1'), None)
     self.assertNotEqual(myjob1.validate('job_2'), None)
     # and, implicitly, every queued task can be too
     self.assertNotEqual(myjob1.validate(), None)
     # add a cyclic dependency, jacket to undershorts
     myjob2.add_deps(undershorts.id, jacket.id)
     # every job can still be completed by itself
     self.assertNotEqual(myjob1.validate('job_1'), None)
     self.assertNotEqual(myjob1.validate('job_2'), None)
     # but trying to see if every task will ever be completed fails
     self.assertEqual(myjob2.validate(), None)
Пример #11
0
    def __init__(self):

        migrate = current.deployment_settings.get_base_migrate()
        tasks = current.response.s3.tasks

        # Instantiate Scheduler
        try:
            from gluon.scheduler import Scheduler
        except ImportError:
            # Warning should already have been given by eden_update_check.py
            self.scheduler = None
        else:
            self.scheduler = Scheduler(current.db,
                                       tasks,
                                       migrate = migrate,
                                       #use_spawn = True # Possible subprocess method with Py3
                                       )
Пример #12
0
 def testTask_Status(self):
     s = Scheduler(self.db)
     fname = 'foo'
     watch = s.queue_task(fname, task_name='watch')
     # fetch status by id
     by_id = s.task_status(watch.id)
     # fetch status by uuid
     by_uuid = s.task_status(watch.uuid)
     # fetch status by query
     by_query = s.task_status(self.db.scheduler_task.function_name == 'foo')
     self.assertEqual(by_id, by_uuid)
     self.assertEqual(by_id, by_query)
     # fetch status by anything else throws
     self.assertRaises(SyntaxError, s.task_status, *[[1, 2]])
     # adding output returns the joined set, plus "result"
     rtn = s.task_status(watch.id, output=True)
     self.assertEqual(set(rtn.keys()), set(['scheduler_run', 'scheduler_task', 'result']))
Пример #13
0
    def testHugeResult(self):
        s = Scheduler(self.db)
        huge_result = s.queue_task('demo10', retry_failed=1, period=1)
        self.db.commit()
        self.writefunction(r"""
def demo10():
    res = 'a' * 99999
    return dict(res=res)
""")
        ret = self.exec_sched()
        # process finished just fine
        self.assertEqual(ret, 0)
        # huge_result - checks
        task = s.task_status(huge_result.id, output=True)
        res = [("task status completed",
                task.scheduler_task.status == 'COMPLETED'),
               ("task times_run is 1", task.scheduler_task.times_run == 1),
               ("result is the correct one",
                task.result == dict(res='a' * 99999))]
        self.exec_asserts(res, 'HUGE_RESULT')
Пример #14
0
 def testJobGraphFailing(self):
     s = Scheduler(self.db)
     myjob = JobGraph(self.db, 'job_1')
     fname = 'foo'
     # We have a few items to wear, and there's an "order" to respect...
     # Items are: watch, jacket, shirt, tie, pants, undershorts, belt, shoes, socks
     # Now, we can't put on the tie without wearing the shirt first, etc...
     watch = s.queue_task(fname, task_name='watch')
     jacket = s.queue_task(fname, task_name='jacket')
     shirt = s.queue_task(fname, task_name='shirt')
     tie = s.queue_task(fname, task_name='tie')
     pants = s.queue_task(fname, task_name='pants')
     undershorts = s.queue_task(fname, task_name='undershorts')
     belt = s.queue_task(fname, task_name='belt')
     shoes = s.queue_task(fname, task_name='shoes')
     socks = s.queue_task(fname, task_name='socks')
     # before the tie, comes the shirt
     myjob.add_deps(tie.id, shirt.id)
     # before the belt too comes the shirt
     myjob.add_deps(belt.id, shirt.id)
     # before the jacket, comes the tie
     myjob.add_deps(jacket.id, tie.id)
     # before the belt, come the pants
     myjob.add_deps(belt.id, pants.id)
     # before the shoes, comes the pants
     myjob.add_deps(shoes.id, pants.id)
     # before the pants, comes the undershorts
     myjob.add_deps(pants.id, undershorts.id)
     # before the shoes, comes the undershorts
     myjob.add_deps(shoes.id, undershorts.id)
     # before the jacket, comes the belt
     myjob.add_deps(jacket.id, belt.id)
     # before the shoes, comes the socks
     myjob.add_deps(shoes.id, socks.id)
     # add a cyclic dependency, jacket to undershorts
     myjob.add_deps(undershorts.id, jacket.id)
     # no exceptions raised, but result None
     self.assertEqual(myjob.validate('job_1'), None)
     # and no deps added
     deps_inserted = self.db(self.db.scheduler_task_deps.id > 0).count()
     self.assertEqual(deps_inserted, 0)
Пример #15
0
response.meta.author = configuration.get('app.author')
response.meta.description = configuration.get('app.description')
response.meta.keywords = configuration.get('app.keywords')
response.meta.generator = configuration.get('app.generator')

# -------------------------------------------------------------------------
# your http://google.com/analytics id
# -------------------------------------------------------------------------
response.google_analytics_id = configuration.get('google.analytics_id')

# -------------------------------------------------------------------------
# maybe use the scheduler
# -------------------------------------------------------------------------
if configuration.get('scheduler.enabled'):
    from gluon.scheduler import Scheduler
    scheduler = Scheduler(db, heartbeat=configure.get('heartbeat'))

# -------------------------------------------------------------------------
# Define your tables below (or better in another model file) for example
#
# >>> db.define_table('mytable', Field('myfield', 'string'))
#
# Fields can be 'string','text','password','integer','double','boolean'
#       'date','time','datetime','blob','upload', 'reference TABLENAME'
# There is an implicit 'id integer autoincrement' field
# Consult manual for more options, validators, etc.
#
# More API examples for controllers:
#
# >>> db.mytable.insert(myfield='value')
# >>> rows = db(db.mytable.myfield == 'value').select(db.mytable.ALL)
Пример #16
0
            ) + " change from when you last checked. \n Here are the closing prices of the stocks you are currently following: \n"
            for x in follow_list:
                follow_string += "Ticker: " + x[0] + " Closing Price: " + str(
                    x[1]) + "\n"
            mail = auth.settings.mailer
            mail.settings.server = 'smtp.gmail.com:587'
            mail.settings.sender = '*****@*****.**'
            mail.settings.login = '******'
            mail.send(
                to=user.email,
                subject='Your Daily Stock Information Courtesy of SlugStock',
                message=(follow_string))


def nextday():
    day = db(db.day.day != None).select().first()
    day.update_record(day=(day.day + 1))
    db(db.recent.day <= (day.day - 5)).delete()
    db.commit()


from gluon.scheduler import Scheduler
scheduler = Scheduler(db,
                      tasks=dict(email=email_daily,
                                 updatePrices=updateYahooPrices,
                                 emergency_email=emergency_email,
                                 clear=remove_completed_tasks,
                                 nextday=nextday,
                                 csv_read=csv_read,
                                 csv_daily=csv_daily))
Пример #17
0
response.meta.description = configuration.get('app.description')
response.meta.keywords = configuration.get('app.keywords')
response.meta.generator = configuration.get('app.generator')
response.show_toolbar = configuration.get('app.toolbar')

# -------------------------------------------------------------------------
# your http://google.com/analytics id
# -------------------------------------------------------------------------
response.google_analytics_id = configuration.get('google.analytics_id')

# -------------------------------------------------------------------------
# maybe use the scheduler
# -------------------------------------------------------------------------
if configuration.get('scheduler.enabled'):
    from gluon.scheduler import Scheduler
    scheduler = Scheduler(db,
                          heartbeat=configuration.get('scheduler.heartbeat'))

# -------------------------------------------------------------------------
# Define your tables below (or better in another model file) for example
#
# >>> db.define_table('mytable', Field('myfield', 'string'))
#
# Fields can be 'string','text','password','integer','double','boolean'
#       'date','time','datetime','blob','upload', 'reference TABLENAME'
# There is an implicit 'id integer autoincrement' field
# Consult manual for more options, validators, etc.
#
# More API examples for controllers:
#
# >>> db.mytable.insert(myfield='value')
# >>> rows = db(db.mytable.myfield == 'value').select(db.mytable.ALL)
Пример #18
0
response.meta.author = configuration.get('app.author')
response.meta.description = configuration.get('app.description')
response.meta.keywords = configuration.get('app.keywords')
response.meta.generator = configuration.get('app.generator')

# -------------------------------------------------------------------------
# your http://google.com/analytics id
# -------------------------------------------------------------------------
response.google_analytics_id = configuration.get('google.analytics_id')

# -------------------------------------------------------------------------
# maybe use the scheduler
# -------------------------------------------------------------------------

from gluon.scheduler import Scheduler
scheduler = Scheduler(db)

# -------------------------------------------------------------------------
# Define your tables below (or better in another model file) for example
#
# >>> db.define_table('mytable', Field('myfield', 'string'))
#
# Fields can be 'string','text','password','integer','double','boolean'
#       'date','time','datetime','blob','upload', 'reference TABLENAME'
# There is an implicit 'id integer autoincrement' field
# Consult manual for more options, validators, etc.
#
# More API examples for controllers:
#
# >>> db.mytable.insert(myfield='value')
# >>> rows = db(db.mytable.myfield == 'value').select(db.mytable.ALL)
Пример #19
0
    ## if NOT running on Google App Engine use SQLite or other DB
    db = DAL('sqlite://storage.sqlite',
             pool_size=1,
             lazy_tables=True,
             migrate=True)
    db_sched = DAL('sqlite://storage_scheduler.sqlite')
else:
    ## connect to Google BigTable (optional 'google:datastore://namespace')
    db = DAL('google:datastore')
    ## store sessions and tickets there
    ## or store session in Memcache, Redis, etc.
    ## from gluon.contrib.memdb import MEMDB
    ## from google.appengine.api.memcache import Client
    ## session.connect(request, response, db = MEMDB(Client()))

scheduler = Scheduler(db_sched)

session.connect(request, response, db=db)

## by default give a view/generic.extension to all actions from localhost
## none otherwise. a pattern can be 'controller/function.extension'
# response.generic_patterns = ['*'] if request.is_local else ['*.rss']
response.generic_patterns = ['*.rss']
## (optional) optimize handling of static files
#response.optimize_css = 'concat,minify,inline'
#response.optimize_js = 'concat,minify,inline'

from gluon.tools import Auth, Crud, Service, PluginManager, prettydate

auth = Auth(db)
crud, service, plugins = Crud(db), Service(), PluginManager()
Пример #20
0
import os, os.path
import sys
import re
from paver.easy import sh
import logging
from pkg_resources import resource_string, resource_filename

import caliper
import requests, json, sys

from datetime import datetime

rslogger = logging.getLogger(settings.sched_logger)
rslogger.setLevel(settings.log_level)

scheduler = Scheduler(db, migrate='runestone_')

################
## This task will run as a scheduled task using the web2py scheduler.
## It's dispached from build() and build_custom() in controllers/designer.py
################
def run_sphinx(rvars=None, folder=None, application=None, http_host=None, base_course=None):
    # workingdir is the application folder
    workingdir = folder
    # sourcedir holds the all sources temporarily
    sourcedir = path.join(workingdir, 'build', rvars['projectname'])

    rslogger.debug("Starting to build {}".format(rvars['projectname']))

    # create the custom_courses dir if it doesn't already exist
    if not os.path.exists(path.join(workingdir, 'custom_courses')):
Пример #21
0
# Enable the scheduler
from gluon.scheduler import Scheduler

scheduler = Scheduler(
    db_scheduler,
    max_empty_runs=0,
    heartbeat=3,
    group_names=[
        'process_videos', 'create_home_directory', 'wamap_delete',
        'wamap_videos', 'misc', "download_videos"
    ],
    tasks=dict(
        process_media_file=process_media_file,
        process_wamap_video_links=process_wamap_video_links,
        create_home_directory=create_home_directory,
        remove_old_wamap_video_files=remove_old_wamap_video_files,
        download_wamap_qimages=download_wamap_qimages,
        refresh_all_ad_logins=refresh_all_ad_logins,
        update_media_database_from_json_files=
        update_media_database_from_json_files,
        pull_youtube_video=pull_youtube_video,
        update_document_database_from_json_files=
        update_document_database_from_json_files,
        flush_redis_keys=flush_redis_keys,
        pull_youtube_caption=pull_youtube_caption,
    ))
current.scheduler = scheduler

# Make sure to run the ad login refresh every hour or so
refresh_ad_login = current.cache.ram('refresh_ad_login',
Пример #22
0
from gluon.scheduler import Scheduler
scheduler = Scheduler(db, migrate=False)

from applications.vtraffic.modules.tools import EPOCH_M
from datetime import timedelta


def test_write():
	db.define_table('test_write',
		Field('value', 'integer' ),
		migrate=True
	)
	insert_id = db.test_write.insert(value=123)
	n_insert  = db(db.test_write).count()
	db.commit()
	return (insert_id, n_insert)

## For each possible origin/destination couple finds the matches
def run_all():
	print 'start'
	stations = db(db.station.id).select(db.station.id, orderby=db.station.id)
	total = 0
	for o in stations:
		for d in stations:
			if o.id != d.id:
				matches = find_matches(o.id, d.id)
				__save_match(matches)
				total   += len(matches)
				query = (db.match.station_id_orig == o.id) & (db.match.station_id_dest == d.id)
				#__get_blocks_scheduler(query, 900, reset_cache=True)
Пример #23
0
    for i in xrange(0,active_host_no):	
        host_ip_list.append(active_host_list[i].private_ip)
        host_name_list.append(active_host_name[i].host_name)
    logger.debug( host_ip_list)
    logger.debug( host_name_list)
    collect_data_from_host(host_ip_list,host_name_list)
    logger.debug("collected host networking data")    
     
# Defining scheduler tasks
from gluon.scheduler import Scheduler
vm_scheduler = Scheduler(db, tasks=dict(vm_task=process_task_queue, 
                                        clone_task=process_clone_task,
                                        snapshot_vm=process_snapshot_vm,
                                        vm_sanity=vm_sanity_check,
                                        vnc_access=check_vnc_access,
                                        host_sanity=host_sanity_check,
                                        vm_util_rrd=vm_utilization_rrd,
                                        vm_daily_checks=process_vmdaily_checks,
                                        vm_purge_unused=process_unusedvm_purge,
                    					memory_overload=overload_memory,
                    					networking_host=host_networking), 
                             group_names=['vm_task', 'vm_sanity', 'host_task', 'vm_rrd', 'snapshot_task'])


midnight_time = request.now.replace(hour=23, minute=59, second=59)

vm_scheduler.queue_task(TASK_SNAPSHOT, 
                    pvars = dict(snapshot_type = SNAPSHOT_DAILY),
                    repeats = 0, # run indefinitely
                    start_time = midnight_time, 
                    period = 24 * HOURS, # every 24h
                    timeout = 5 * MINUTES,
Пример #24
0
    def testNoReturn_and_Timeout_and_Progress(self):
        s = Scheduler(self.db)
        noret1 = s.queue_task('demo5')
        noret2 = s.queue_task('demo3')
        timeout1 = s.queue_task('demo4', timeout=5)
        timeout2 = s.queue_task('demo4')
        progress = s.queue_task('demo6', sync_output=2)
        termination = s.queue_task('termination')
        self.db.commit()
        self.writefunction(r"""
def demo3():
    time.sleep(3)
    print(1/0)
    return None

def demo4():
    time.sleep(15)
    print("I'm printing something")
    return dict(a=1, b=2)

def demo5():
    time.sleep(3)
    print("I'm printing something")
    rtn = dict(a=1, b=2)

def demo6():
    time.sleep(5)
    print('50%')
    time.sleep(5)
    print('!clear!100%')
    return 1
""")
        ret = self.exec_sched()
        self.assertEqual(ret, 0)
        # noreturn check
        task1, task_run1 = self.fetch_results(s, noret1)
        task2, task_run2 = self.fetch_results(s, noret2)
        res = [
            ("tasks no_returns1 completed", task1.status == 'COMPLETED'),
            ("tasks no_returns2 failed", task2.status == 'FAILED'),
            ("no_returns1 doesn't have a scheduler_run record",
             len(task_run1) == 0),
            ("no_returns2 has a scheduler_run record FAILED",
             (len(task_run2) == 1 and task_run2[0].status == 'FAILED')),
        ]
        self.exec_asserts(res, 'NO_RETURN')

        # timeout check
        task1 = s.task_status(timeout1.id, output=True)
        task2 = s.task_status(timeout2.id, output=True)
        res = [("tasks timeouts1 timeoutted",
                task1.scheduler_task.status == 'TIMEOUT'),
               ("tasks timeouts2 completed",
                task2.scheduler_task.status == 'COMPLETED')]
        self.exec_asserts(res, 'TIMEOUT')

        # progress check
        task1 = s.task_status(progress.id, output=True)
        res = [("tasks percentages completed",
                task1.scheduler_task.status == 'COMPLETED'),
               ("output contains only 100%",
                task1.scheduler_run.run_output.strip() == "100%")]
        self.exec_asserts(res, 'PROGRESS')
Пример #25
0
response.meta.keywords = configuration.get("app.keywords")
response.meta.generator = configuration.get("app.generator")
response.show_toolbar = configuration.get("app.toolbar")

# -------------------------------------------------------------------------
# your http://google.com/analytics id
# -------------------------------------------------------------------------
response.google_analytics_id = configuration.get("google.analytics_id")

# -------------------------------------------------------------------------
# maybe use the scheduler
# -------------------------------------------------------------------------
if configuration.get("scheduler.enabled"):
    from gluon.scheduler import Scheduler

    scheduler = Scheduler(db, heartbeat=configuration.get("scheduler.heartbeat"))

# -------------------------------------------------------------------------
# Define your tables below (or better in another model file) for example
#
# >>> db.define_table('mytable', Field('myfield', 'string'))
#
# Fields can be 'string','text','password','integer','double','boolean'
#       'date','time','datetime','blob','upload', 'reference TABLENAME'
# There is an implicit 'id integer autoincrement' field
# Consult manual for more options, validators, etc.
#
# More API examples for controllers:
#
# >>> db.mytable.insert(myfield='value')
# >>> rows = db(db.mytable.myfield == 'value').select(db.mytable.ALL)
Пример #26
0
            db2(tasks_to_delete).delete()
            db(db.global_settings.kkey == 'operation_key').delete()
            db.commit()
            db2.commit()
        except:
            rtn.append('exception')
            db.rollback()
            db2.rollback()

    return rtn


myscheduler = Scheduler(db2,
                        dict(check_season=check_season,
                             create_path=create_path,
                             add_series=add_series,
                             update_single_series=update_single_series,
                             bit_actualizer=bit_actualizer,
                             check_subs=check_season_subs,
                             down_epbanners=down_epbanners,
                             down_sebanners=down_sebanners,
                             update=update,
                             maintenance=maintenance,
                             ep_metadata=ep_metadata,
                             queue_torrents=queue_torrents,
                             down_torrents=down_torrents,
                             scoop_season=scoop_season,
                             series_metadata=series_metadata,
                             the_boss=the_boss),
                        migrate=MIGRATE)
Пример #27
0
# License Content: Creative Commons Attribution 3.0
#
# Also visit: www.web2py.com
# or Groups: http://groups.google.com/group/web2py
# For details on the web framework used for this development
#
# With thanks to Guido, Massimo and many other that make this sort of thing
# much easier than it used to be

from ndsfunctions import score_question, resulthtml, truncquest, getrundates
import datetime
from ndspermt import get_exclude_groups

from gluon.scheduler import Scheduler

scheduler = Scheduler(db, heartbeat=15)


def activity(id=0,
             resend=False,
             period='Week',
             format='html',
             source='default'):
    # This will be triggered from runactivity function below which figures out if
    # this needs to be run and on success rolls the run date forward for the next
    # period this just formats the message and formats for sending via email

    db = current.db

    if id > 0:
        rows = db(db.email_runs.id == id).select()
Пример #28
0

def import_all_nexpose_vulndata(overwrite=False, nexpose_server={}):
    """
    Import all vulnerability data from Nexpose
    """
    from skaldship.nexpose import import_all_vulndata

    import_all_vulndata(overwrite=overwrite, nexpose_server=nexpose_server)
    return True


##-------------------------------------------------------
def connect_exploits():
    """
    Process Nexpose exploits.xml file into the database
    """
    from skaldship.exploits import connect_exploits

    connect_exploits()
    return True


##----------------------------------------------------------------------------

scheduler = Scheduler(
    db=db,
    migrate=settings.migrate,
    group_names=[settings.scheduler_group_name],
)
from gluon.scheduler import Scheduler
from module_admin_functions import scan_box

# The scheduler is loaded and defined in a model, so that it can register the
# required tables with the database. The functions are defined in separate modules.

# Load the scheduler and set the task names. With only daily tasks, a slower
# heartbeat is fine, but with dataset checking, a snappier response is needed,
# so the default 3 second heartbeat is used. Note that individual queue tasks
# can set immediate=TRUE to get prompter running of a task, but that still might
# wait for one or two heartbeats to actually run.

scheduler = Scheduler(db, tasks=dict(scan_box=scan_box))

# These tasks then need to be queued using scheduler.queue_task or manually via
# the appadmin interface. Don't do it here as they'll be queued every time the
# model runs, which is basically every time a webpage is loaded! So,
# programatically, they can go in a controller which an admin can run once to
# get a defined set of queues going.
Пример #30
0
# -*- coding: utf-8 -*-
from gluon.scheduler import Scheduler

scheduler = Scheduler(db, heartbeat=int(myconf.get('scheduler.heartbeat')))


def __schedule_daemon_tasks():
    for t in DAEMON_TASKS:
        __schedule_daemon_task(t)


def __midnight(dt):
    from dateutil.relativedelta import relativedelta
    return (dt + relativedelta(days=1)).replace(hour=0,
                                                minute=0,
                                                second=0,
                                                microsecond=0)


def __schedule_daemon_task(task_tuple):
    task_name = task_tuple[0]
    task_period = task_tuple[1]
    tasks = db(db.scheduler_task.function_name == task_name).count()
    if not tasks:
        now = datetime.datetime.now()
        ttime = __midnight(now) if (task_name in MIDNIGHT_TASKS) else now
        t = scheduler.queue_task(
            task_name,
            start_time=ttime,
            next_run_time=ttime,
            pvars={},