Example #1
0
File: views.py Project: vegten/OIPA
def get_scheduled_tasks(request):
    from rq import use_connection
    from rq_scheduler import Scheduler
    import json

    use_connection() # Use RQ's default Redis connection
    scheduler = Scheduler() # Get a scheduler for the "default" queue
    list_of_job_instances = scheduler.get_jobs()

    jobdata = list()
    for job in list_of_job_instances:
        if "interval" in job.meta:
            interval = job.meta["interval"]
        else:
            interval = 0
        job_dict = { 'job_id': job._id, 'task': job.description, 'period': interval, 'args': job.args, 'queue': "default" }
        jobdata.append(job_dict)

    # scheduler = Scheduler('parser') # Get a scheduler for the "parser" queue
    # list_of_job_instances = scheduler.get_jobs()
    #
    # for job in list_of_job_instances:
    #     if "interval" in job.meta:
    #         interval = job.meta["interval"]
    #     else:
    #         interval = 0
    #     job_dict = { 'job_id': job._id, 'task': job.description, 'period': interval, 'queue': "parser" }
    #     jobdata.append(job_dict)

    data = json.dumps(jobdata)
    return HttpResponse(data, content_type='application/json')
Example #2
0
File: views.py Project: vegten/OIPA
def cancel_scheduled_task(request):
    job_id = request.GET.get('job_id')
    from rq_scheduler import Scheduler

    scheduler = Scheduler('parser')
    scheduler.cancel(job_id)
    return HttpResponse('Success')
Example #3
0
def update_scheduled_connection(connection):
    ''' schedule a new scrape of the connection source
    interval was changed
    or the last job was finished and the next needs to be scheduled
    '''
    repeating_task = connection.schedule
    # check to see if schedule is available -- abort if not
    # note that ready_to_connect does not verify this
    if not repeating_task.interval:
        return False
    
    # connect to the rq scheduler
    redis_config = app.config['REDIS_CONFIG']
    use_connection(Redis(redis_config['host'], redis_config['port']
            , password=redis_config['password']))
    scheduler = Scheduler()
    
    # see if this schedule had a job that was already enqueued
    if repeating_task.next_task_id:
        # instantiate the job
        job = Job(id=repeating_task.next_task_id)
        # cancel the old job
        scheduler.cancel(job)

    # determine how many seconds to wait
    delay = _calculate_schedule_delay(repeating_task.interval)
    
    # start a new job
    job = scheduler.enqueue_in(datetime.timedelta(seconds=int(delay))
        , connect_to_source, connection.id)
    
    # save this id and when it runs next
    repeating_task.update(set__next_task_id = job.id)
    repeating_task.update(set__next_run_time = (datetime.datetime.utcnow() 
        + datetime.timedelta(seconds=delay)))
Example #4
0
File: views.py Project: vegten/OIPA
def add_scheduled_task(request):

    task = request.GET.get('task')
    period = request.GET.get('period')
    queue = request.GET.get('queue')
    parameters = request.GET.get('parameters')


    from rq import use_connection
    from rq_scheduler import Scheduler
    from datetime import datetime

    use_connection() # Use RQ's default Redis connection
    scheduler = Scheduler(queue) # Get a scheduler for the "default" queue

    if parameters:
        scheduler.schedule(
            scheduled_time=datetime.now(),   # Time for first execution
            func=getattr(tasks, task),       # Function to be queued
            args=[int(parameters)],
            interval=period,                 # Time before the function is called again, in seconds
            repeat=None                      # Repeat this number of times (None means repeat forever)
        )
    else:
        scheduler.schedule(
            scheduled_time=datetime.now(),   # Time for first execution
            func=getattr(tasks, task),           # Function to be queued
            interval=period,                 # Time before the function is called again, in seconds
            repeat=None                      # Repeat this number of times (None means repeat forever)
        )
    return HttpResponse('Success')
Example #5
0
def replay_request(request_id, destination_id, retries=None):
    '''Replays request from source to destination, retrying when appropriate'''
    app = setup_app()
    if retries is None:
        retries = app.config.get('API_REPLAY_RETRIES')
    request = Request.get(request_id)
    destination = Destination.get(destination_id)
    headers = {
        'X-Original-{}'.format(k): v for k, v in request.headers.items()
    }
    headers['Content-Type'] = request.headers.get('Content-Type', '')
    # TODO: add exception handling here
    response = requests.request(
        method=request.method,
        url=destination.url,
        data=request.body,
        headers=headers,
    )
    if response.status_code < 200 or response.status_code > 300:
        if retries:
            with app.app_context():
                scheduler = Scheduler(connection=redis_store.connection)
                delay = app.config.get('API_REPLAY_BASE') ** (
                    app.config.get('API_REPLAY_RETRIES') - retries
                )
                scheduler.enqueue_at(
                    datetime.utcnow() + timedelta(seconds=delay),
                    replay_request,
                    request.id,
                    destination.id,
                    retries=retries - 1,
                )
        else:
            raise BadResponseException(response.status_code, response.text)
Example #6
0
    def handle(self, *args, **options):

        with Connection(redis.Redis(**settings.RQ_DATABASE)):
            scheduler = Scheduler('rss_collector')

            jobs = scheduler.get_jobs()
            for job in jobs:
                if job.func_name != 'collector.rss.fetch_rss':
                    continue

                if options.get('replace'):
                    job.cancel()
                    break
                else:
                    raise CommandError('RSS collector task already scheduled')

            try:
                scheduler.schedule(
                    datetime.datetime.now(),
                    fetch_rss,
                    interval=1200,
                    repeat=20000,
                )
            except redis.exceptions.ConnectionError:
                raise CommandError('Redis did not respond')
Example #7
0
def cancel_all(queue_name):
    scheduler = Scheduler()
    for job in scheduler.get_jobs():
        if job.origin == queue_name:
            scheduler.cancel(job)

    return dict(status='OK')
Example #8
0
def renderings(org_label, project_label, rendering_id):
    ''' deleting or downloading renderings
    /organizations/aquaya/projects/water-quality/renderings/4cmb1?delete=true
        : remove a rendering from the system and s3
    /organizations/aquaya/projects/water-quality/renderings/4cmb1?download=true
        : remove a rendering from the system and s3
    '''
    user = User.objects(email=session['email'])[0]
    
    orgs = Organization.objects(label=org_label)
    if not orgs:
        abort(404)
    org = orgs[0]

    # permission-check
    if org not in user.organizations and not user.admin_rights:
        app.logger.error('%s tried to view a project but was \
            denied for want of admin rights' % session['email'])
        abort(404)
    
    # find the project
    projects = Project.objects(label=project_label, organization=org) 
    if not projects:
        abort(404)
    
    # find the specified rendering
    renderings = Rendering.objects(id=rendering_id)
    if not renderings:
        abort(404)
    rendering = renderings[0]
    
    # save the report for later redirect
    report = rendering.report

    if request.args.get('delete', '') == 'true':
        # remove the rendering
        utilities.delete_rendering(rendering, user.email)

        flash('Rendering successfully deleted.', 'success')
        return redirect(url_for('reports'
            , org_label=report.project.organization.label
            , project_label=report.project.label, report_label=report.label))

    if request.args.get('download', '') == 'true':
        absolute_filename = utilities.download_rendering_from_s3(rendering)
        
        # delay the deletion so we have time to serve the file
        redis_config = app.config['REDIS_CONFIG']
        use_connection(Redis(redis_config['host'], redis_config['port']
                , password=redis_config['password']))
        scheduler = Scheduler()
        scheduler.enqueue_in(datetime.timedelta(seconds=60)
            , delete_local_file, absolute_filename)

        return send_file(absolute_filename, as_attachment=True)

    else:
        abort(404)
Example #9
0
 def test_create_job(self):
     """
     Ensure that jobs are created properly.
     """
     scheduler = Scheduler(connection=self.testconn)
     job = scheduler._create_job(say_hello)
     job_from_queue = Job.fetch(job.id, connection=self.testconn)
     self.assertEqual(job, job_from_queue)
     self.assertEqual(job_from_queue.func, say_hello)
Example #10
0
 def test_birth_and_death_registration(self):
     key = Scheduler.scheduler_key
     self.assertNotIn(key, self.testconn.keys('*'))
     scheduler = Scheduler(connection=self.testconn)
     scheduler.register_birth()
     self.assertIn(key, self.testconn.keys('*'))
     self.assertFalse(self.testconn.hexists(key, 'death'))
     self.assertRaises(ValueError, scheduler.register_birth)
     scheduler.register_death()
     self.assertTrue(self.testconn.hexists(key, 'death'))
def list_todo():
    scheduler = Scheduler(connection=Redis()) # Get a scheduler for the "default" queue

    response.content_type = 'text/plain; charset=utf-8'

    list_of_job_instances = scheduler.get_jobs()
    msg = "\n".join([ str(job) for job in list_of_job_instances])
    logging.info(msg)

    return msg
Example #12
0
File: job.py Project: pathcl/qpanel
def job_reset_stats_queue(queuename, when, hour):
    scheduler = Scheduler(connection=Redis())
    remove_jobs_not_config()
    if not exists_job_onqueue(queuename, when, hour):
        scheduler.schedule(
            scheduled_time=datetime_from_config(when, hour),
            func=reset_stats_queue,
            args=[queuename, when, hour],
            interval=seconds_from_config_interval(when)
        )
Example #13
0
 def test_create_scheduled_job(self):
     """
     Ensure that scheduled jobs are put in the scheduler queue with the right score
     """
     scheduled_time = datetime.now()
     scheduler = Scheduler(connection=self.testconn)
     job = scheduler.enqueue_at(scheduled_time, say_hello)
     self.assertEqual(job, Job.fetch(job.id, connection=self.testconn))
     self.assertIn(job.id, self.testconn.zrange(scheduler.scheduled_jobs_key, 0, 1))
     self.assertEqual(self.testconn.zscore(scheduler.scheduled_jobs_key, job.id),
                      int(scheduled_time.strftime('%s')))
Example #14
0
class TestSetupScheduledJobs(object):
    """Tests for setup function '_schedule_job'"""

    def setUp(self):
        self.connection = StrictRedis()
        self.connection.flushall()
        self.scheduler = Scheduler('test_queue', connection=self.connection)


    def test_adds_scheduled_job_with_interval(self):
        a_job['interval'] = 7
        _schedule_job(a_job, self.scheduler)
        sched_jobs = self.scheduler.get_jobs()

        assert len(sched_jobs) == 1, sched_jobs
        assert sched_jobs[0].meta['interval'] == 7 , sched_jobs[0].meta
        a_job['interval'] = 1


    def test_adds_several_jobs_(self):
        _schedule_job(a_job, self.scheduler)
        _schedule_job(another_job, self.scheduler)
        sched_jobs = self.scheduler.get_jobs()
        job_func_names = [job.func_name for job in sched_jobs]
        module_name = 'test_jobs.test_schedule_jobs'

        assert len(sched_jobs) == 2, sched_jobs
        assert module_name + '.a_function' in job_func_names, job_func_names
        assert module_name + '.another_function' in job_func_names, job_func_names


    def test_does_not_add_job_if_already_added(self):
        _schedule_job(a_job, self.scheduler)
        _schedule_job(a_job, self.scheduler)
        sched_jobs = self.scheduler.get_jobs()

        assert len(sched_jobs) == 1, sched_jobs


    def test_returns_log_messages(self):
        success_message = _schedule_job(a_job, self.scheduler)
        failure_message = _schedule_job(a_job, self.scheduler)

        assert success_message == 'Scheduled a_function([], {}) to run every 1 seconds'
        assert failure_message == 'WARNING: Job a_function([], {}) is already scheduled'


    def test_failed_attempt_to_schedule_does_not_polute_redis(self):
        _schedule_job(a_job, self.scheduler)
        _schedule_job(a_job, self.scheduler)
        stored_values = self.connection.keys('rq:job*')

        assert len(stored_values) == 1, len(stored_values)
Example #15
0
File: job.py Project: pathcl/qpanel
def exists_job_onqueue(queuename, when, hour):
    """
        Check if a job is present on queue
    """
    scheduler = Scheduler(connection=Redis())
    jobs = scheduler.get_jobs()
    for job in jobs:
        if 'reset_stats_queue' in job.func_name:
            args = job.args
            if queuename == args[0] and when == args[1] and hour == args[2]:
                return True
    return False
Example #16
0
def scheduled_jobs():
	from rq_scheduler import Scheduler
	import redis
	conn           = redis.from_url(app.config['REDIS_URL'])
	scheduler      = Scheduler(connection=conn)
	scheduled_jobs = scheduler.get_jobs(with_times=True)
	response       = json.dumps({
		"status"   : "ok",
		"count"    : len(scheduled_jobs),
		"jobs"     : [ dict(_[0].__dict__.items() + {"next_work":_[1]}.items() ) for _ in scheduled_jobs ],
	}, default=dthandler)
	return Response(response,  mimetype='application/json')
Example #17
0
 def test_enqueue_job_with_queue(self):
     """
     Ensure that job is enqueued correctly when the scheduler is bound
     to a queue object.
     """
     queue = Queue('foo', connection=self.testconn)
     scheduler = Scheduler(connection=self.testconn, queue=queue)
     job = scheduler._create_job(say_hello)
     scheduler_queue = scheduler.get_queue_for_job(job)
     self.assertEqual(queue, scheduler_queue)
     scheduler.enqueue_job(job)
     self.assertTrue(job.enqueued_at is not None)
     self.assertIn(job, queue.jobs)
     self.assertIn(queue, Queue.all())
Example #18
0
 def test_enqueue_in(self):
     """
     Ensure that jobs have the right scheduled time.
     """
     right_now = datetime.now()
     time_delta = timedelta(minutes=1)
     scheduler = Scheduler(connection=self.testconn)
     job = scheduler.enqueue_in(time_delta, say_hello)
     self.assertIn(job.id, self.testconn.zrange(scheduler.scheduled_jobs_key, 0, 1))
     self.assertEqual(self.testconn.zscore(scheduler.scheduled_jobs_key, job.id),
                      int((right_now + time_delta).strftime('%s')))
     time_delta = timedelta(hours=1)
     job = scheduler.enqueue_in(time_delta, say_hello)
     self.assertEqual(self.testconn.zscore(scheduler.scheduled_jobs_key, job.id),
                      int((right_now + time_delta).strftime('%s')))
Example #19
0
 def test_acquire_lock(self):
     """
     When scheduler acquires a lock, besides creating a key, it should
     also set an expiry that's a few seconds longer than it's polling
     interval so it automatically expires if scheduler is unexpectedly
     terminated.
     """
     key = '%s_lock' % Scheduler.scheduler_key
     self.assertNotIn(key, tl(self.testconn.keys('*')))
     scheduler = Scheduler(connection=self.testconn, interval=20)
     self.assertTrue(scheduler.acquire_lock())
     self.assertIn(key, tl(self.testconn.keys('*')))
     self.assertEqual(self.testconn.ttl(key), 30)
     scheduler.remove_lock()
     self.assertNotIn(key, tl(self.testconn.keys('*')))
Example #20
0
 def get_context_data(self, **kwargs):
     ctx = super(Stats, self).get_context_data(**kwargs)
     ctx.update({
         'queues': Queue.all(connection=self.connection),
         'workers': Worker.all(connection=self.connection),
         'title': 'RQ Status',
     })
     if Scheduler:
         scheduler = Scheduler(self.connection)
         get_queue = lambda job: job.origin
         all_jobs = sorted(scheduler.get_jobs(), key=get_queue)
         ctx['scheduler'] = scheduler
         ctx['scheduled_queues'] = [
             {'name': queue, 'job_count': len(list(jobs))}
             for queue, jobs in groupby(all_jobs, get_queue)]
     return ctx
Example #21
0
    def get_context_data(self, **kwargs):
        ctx = super(SchedulerDetails, self).get_context_data(**kwargs)
        if Scheduler is None:
            # rq_scheduler is not installed
            raise Http404
        scheduler = Scheduler(self.connection)
        queue = Queue(self.kwargs['queue'], connection=self.connection)
        jobs = filter(lambda (job, next_run): job.origin == queue.name,
                      scheduler.get_jobs(with_times=True))

        ctx.update({
            'queue': queue,
            'jobs': [serialize_scheduled_job(job, next_run)
                     for job, next_run in jobs],
            'title': "Jobs scheduled on '%s' queue" % queue.name,
        })
        return ctx
Example #22
0
 def test_birth_and_death_registration(self):
     """
     When scheduler registers it's birth, besides creating a key, it should
     also set an expiry that's a few seconds longer than it's polling
     interval so it automatically expires if scheduler is unexpectedly 
     terminated.
     """
     key = Scheduler.scheduler_key
     self.assertNotIn(key, tl(self.testconn.keys('*')))
     scheduler = Scheduler(connection=self.testconn, interval=20)
     scheduler.register_birth()
     self.assertIn(key, tl(self.testconn.keys('*')))
     self.assertEqual(self.testconn.ttl(key), 30)
     self.assertFalse(self.testconn.hexists(key, 'death'))
     self.assertRaises(ValueError, scheduler.register_birth)
     scheduler.register_death()
     self.assertTrue(self.testconn.hexists(key, 'death'))
Example #23
0
    def __init__(self):
        import rq
        import redis
        from rq_scheduler import Scheduler

        self.conn = redis.from_url(settings.REDIS_URL)
        self.queue = rq.Queue("default", connection=self.conn, default_timeout=RedisWorker.TIMEOUT)
        self.scheduler = Scheduler("high", connection=self.conn)
        rq.use_connection(self.conn)
Example #24
0
File: job.py Project: zerxis/qpanel
def remove_jobs_not_config():
    """
        Remove jobs on queue but not present on config.
        Prevent when in job for reset a queue stats is scheduled but
        after your config is modified or deleted
    """
    scheduler = Scheduler(connection=Redis())
    queue_for_reset = config.QPanelConfig().queues_for_reset_stats()
    jobs = scheduler.get_jobs()
    for job in jobs:
        if 'reset_stats_queue' in job.func_name:
            delete = True
            for qr in queue_for_reset:
                if qr in queue_for_reset:
                    if (queue_for_reset[qr]['when'] == job.args[1]
                            and queue_for_reset[qr]['hour'] == job.args[2]):
                        delete = False
                if delete:
                    job.delete()
Example #25
0
def remove_jobs_not_config():
    """
        Remove jobs on queue but not present on config.
        Prevent when in job for reset a queue stats is scheduled but
        after your config is modified or deleted

        TODO: Maybe this could reload by notified in config.ini change
    """
    scheduler = Scheduler(connection=Redis())
    jobs = scheduler.get_jobs()
    for job in jobs:
        if 'reset_stats_queue' in job.func_name:
            # The args for the job of reset_stats_queue are:
            queuename = job.args[0]
            when = job.args[1]
            hour = job.args[2]

            if not exists_job_onconfig(queuename, when, hour):
                job.delete()
Example #26
0
File: job.py Project: pathcl/qpanel
def start_jobs():
    """
        Check if processs enqueue_reset_stats is working on queue if not
        enqueue function
    """
    start_enqueue_reset_stats = False
    scheduler = Scheduler(connection=Redis())
    jobs = scheduler.get_jobs()
    for job in jobs:
        if 'enqueue_reset_stats' in job.func_name:
            start_enqueue_reset_stats = True
            break

    if start_enqueue_reset_stats is False:
        scheduler.schedule(
            scheduled_time=datetime.datetime.utcnow(),
            func=enqueue_reset_stats,
            interval=60
        )
Example #27
0
 def test_enqueue_job(self):
     """
     When scheduled job is enqueued, make sure:
     - Job is removed from the sorted set of scheduled jobs
     - "enqueued_at" attribute is properly set
     - Job appears in the right queue
     """
     now = datetime.now()
     queue_name = 'foo'
     scheduler = Scheduler(connection=self.testconn, queue_name=queue_name)
     job = scheduler.enqueue_at(now, say_hello)
     scheduler.enqueue_job(job)
     self.assertNotIn(job, self.testconn.zrange(scheduler.scheduled_jobs_key, 0, 10))
     job = Job.fetch(job.id, connection=self.testconn)
     self.assertTrue(job.enqueued_at is not None)
     queue = scheduler.get_queue_for_job(job)
     self.assertIn(job, queue.jobs)
     queue = Queue.from_queue_key('rq:queue:{0}'.format(queue_name))
     self.assertIn(job, queue.jobs)
Example #28
0
def create_schedules():
    from towerdashboard.jobs import refresh_github_branches
    scheduler = Scheduler(connection=Redis('redis'))
    for j in scheduler.get_jobs():
        scheduler.cancel(j)

    scheduler.schedule(scheduled_time=datetime.utcnow(),
                       func=refresh_github_branches,
                       interval=120, repeat=None, result_ttl=120)
Example #29
0
def start_data_manager():
    print("Starting BG Workers")
    print("Intitializing Redis Queue")
    if os.getenv('REDIS_URL'):
        redis_conn = Redis.from_url(os.getenv('REDIS_URL'), decode_responses=True)
    else:
        redis_conn = Redis()
    print("Connection Made")

    try:
        scheduler = Scheduler(connection=redis_conn)
    except:
        print("No Redis connection possible, exiting...")
        sys.exit(1)

    scheduler.schedule(
        scheduled_time=datetime.utcnow(),      # Time for first execution, in UTC timezone
        func=refresh_data,                     # Function to be queued
        interval=30,                            # Time before the function is called again, in seconds
    )
Example #30
0
    def test_enqueue_job(self):
        """
        When scheduled job is enqueued, make sure:
        - Job is removed from the sorted set of scheduled jobs
        - "enqueued_at" attribute is properly set
        - Job appears in the right queue
        """
        now = datetime.utcnow()
        queue_name = 'foo'
        scheduler = Scheduler(connection=self.testconn, queue_name=queue_name)

        job = scheduler.enqueue_at(now, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job, tl(self.testconn.zrange(scheduler.scheduled_jobs_key, 0, 10)))
        job = Job.fetch(job.id, connection=self.testconn)
        self.assertTrue(job.enqueued_at is not None)
        queue = scheduler.get_queue_for_job(job)
        self.assertIn(job, queue.jobs)
        queue = Queue.from_queue_key('rq:queue:{0}'.format(queue_name))
        self.assertIn(job, queue.jobs)
def add():
    #print str(request.query)

    url = request.query['url']
    minutes_delta = int(request.query['minutes_delta'])


    scheduler = Scheduler(connection=Redis()) # Get a scheduler for the "default" queue

    # Schedule a job to run 10 minutes, 1 hour and 1 day later
    job = scheduler.enqueue_in(timedelta(minutes=minutes_delta), request_url, **{"url":url})

    msg = u'[{}][scheduler/add] {} scheduled after {} minutes. job id {}\n'.format(datetime.now().isoformat()[:19], url, minutes_delta, job.id)
    logging.info(msg)

    os.chmod(filename_log, 777)
    with codecs.open(filename_log, "a", encoding="utf-8") as f:
        f.write(msg)

    return msg
Example #32
0
File: job.py Project: pathcl/qpanel
def remove_jobs_not_config():
    """
        Remove jobs on queue but not present on config.
        Prevent when in job for reset a queue stats is scheduled but
        after your config is modified or deleted
    """
    scheduler = Scheduler(connection=Redis())
    queue_for_reset = config.QPanelConfig().queues_for_reset_stats()
    jobs = scheduler.get_jobs()
    for job in jobs:
        if 'reset_stats_queue' in job.func_name:
            q = job.args[0]
            delete = True
            for qr in queue_for_reset:
                if qr in queue_for_reset:
                    if (queue_for_reset[qr]['when'] == job.args[1] and
                        queue_for_reset[qr]['hour'] == job.args[2]):
                        delete = False
                if delete:
                    job.delete()
def setup_redis(queue_name='default', host_name='127.0.0.1', port_no=6379):
    redis_scheduler = None
    try:
        log.info("Setting up Redis connection")
        redis_con = StrictRedis(host=host_name, port=port_no)
        redis_scheduler = Scheduler(queue_name, connection=redis_con)
        log.info("Redis connection is now setup successfully.")
    except Exception, exc:
        log.error("Error while setting up Redis connection to %s:%s\n %s" %
                  (host, port, exc.message),
                  exc_info=True)
Example #34
0
def perform_stop_job_execution(job, execution, logger, stop_schedule=True):
    if execution is None:
        if not job.executions:
            msg = "No executions found in job."

            return (
                False,
                return_error(msg,
                             "stop_job_execution",
                             status=400,
                             logger=logger),
            )

        execution = job.get_last_execution()

    if execution is not None and execution.status == JobExecution.Status.running:
        logger.debug("Stopping current execution...")
        executor = current_app.executor
        executor.stop_job(job.task, job, execution)
        logger.debug("Current execution stopped.")

    if "retries" in job.metadata:
        job.metadata["retry_count"] = job.metadata["retries"] + 1
        job.save()

    scheduler = Scheduler("jobs", connection=current_app.redis)

    if (stop_schedule and "enqueued_id" in job.metadata
            and job.metadata["enqueued_id"] in scheduler):
        scheduler.cancel(job.metadata["enqueued_id"])
        job.scheduled = False

    if execution.error is None:
        execution.error = ""
    execution.error += "\nUser stopped job execution manually."
    execution.status = JobExecution.Status.failed
    job.save()

    logger.debug("Job stopped.")

    return True, None
Example #35
0
def list_jobs(queue_name, page):
    current_page = int(page)

    scheduler = Scheduler(queue_name)
    jobs = scheduler.get_jobs(with_times=True)
    if queue_name:
        jobs = list(filter(lambda job_: job_[0].origin == queue_name, jobs))

    per_page = 5
    total_items = len(jobs)
    pages_numbers_in_window = pagination_window(
        total_items, current_page, per_page)
    pages_in_window = [
        dict(number=p, url=url_for('.overview', queue_name=queue_name, page=p))
        for p in pages_numbers_in_window
    ]
    last_page = int(ceil(total_items / float(per_page)))

    prev_page = None
    if current_page > 1:
        prev_page = dict(url=url_for(
            '.overview', queue_name=queue_name, page=(current_page - 1)))

    next_page = None
    if current_page < last_page:
        next_page = dict(url=url_for(
            '.overview', queue_name=queue_name, page=(current_page + 1)))

    pagination = remove_none_values(
        dict(
            pages_in_window=pages_in_window,
            next_page=next_page,
            prev_page=prev_page
        )
    )

    # scheduler.get_jobs(with_times=True)]
    offset = (current_page - 1) * per_page
    job_page = jobs[offset:offset+per_page]
    job_page = [serialize_job(job, at) for (job, at) in job_page]
    return dict(name=queue_name, jobs=job_page, pagination=pagination)
Example #36
0
 def __init__(self):
     #设置全局超时时间(如连接超时)
     self.timeout= 2
     # 建立redis单机连接
     if REDIS:
         self.redis = from_url(REDIS)
     else:
         raise RedisURLError("The value of the REDIS in the config.py file is not valid.")
     # 建立mysql连接
     self.mysql = create_mysql_engine()
     self.asyncQueueHigh = Queue(name='high', connection=self.redis)
     self.asyncScheduler = Scheduler(queue=self.asyncQueueHigh, connection=self.redis, interval=1)
Example #37
0
def create_app(config_name):
    """
    This is the method that initializes modules used in the app
    :param config_name: The key for the configuration to use
    :return: Flask app
    """
    global app
    if config_name not in app_config.keys():
        config_name = 'development'
    app.config.from_object(".".join(["config", app_config[config_name]]))

    db.init_app(app)
    cors.init_app(app)
    mail.init_app(app)
    bcrypt.init_app(app)
    login_manager.init_app(app)
    migrate.init_app(app, db)
    bootstrap.init_app(app)

    login_manager.login_message = "You must be logged in to access this page."

    from app import models, errors, views

    security.init_app(app, datastore=models.user_datastore)

    # enable logging
    errors.system_logging(
        'Monitoring - A web based home monitoring system for assisted living')

    # these are blueprints, they help to organize the projects into smaller logical components based on eg user roles
    from .home import home as home_blueprint
    from .admin import admin as admin_blueprint
    from .patient import patient as patient_blueprint
    from .relative import relative as relative_blueprint
    from .caregiver import caregiver as medical_blueprint

    app.register_blueprint(home_blueprint, url_prefix='/home')
    app.register_blueprint(medical_blueprint, url_prefix='/caregiver')
    app.register_blueprint(admin_blueprint, url_prefix='/admin')
    app.register_blueprint(patient_blueprint, url_prefix='/patient')
    app.register_blueprint(relative_blueprint, url_prefix='/relative')

    # Initialize Redis and RQ
    app.redis = Redis.from_url(app.config['REDIS_URL'])
    # The queue where tasks are submitted
    queue_name = 'monitoring_tasks'
    app.task_queue = rq.Queue(queue_name, connection=app.redis)

    # Instantiate Scheduler for schedule queue
    schedule_name = "monitoring_scheduler"
    app.scheduler = Scheduler(queue_name=schedule_name, connection=app.redis)

    return app
Example #38
0
def setup_scheduler(func, repeat_every=60):
    r = get_connection()
    scheduled_job_id = r.get(KEY)

    scheduler = Scheduler(connection=r)
    if scheduled_job_id:
        logger.info(f'Canceling old job {scheduled_job_id}')
        scheduler.cancel(
            scheduled_job_id)  # schedule old job before scheduling a new one

    job = scheduler.schedule(
        scheduled_time=datetime.utcnow(
        ),  # Time for first execution, in UTC timezone
        func=func,  # Function to be queued
        interval=
        repeat_every,  # Time before the function is called again, in seconds
        repeat=None  # Repeat this number of times (None means repeat forever)
    )
    logger.info("Scheduled function %s to be executed every %s seconds" %
                (func.__name__, repeat_every))
    r.set(KEY, job.id)
Example #39
0
def setup_scheduled_jobs(app):  # pragma: no cover
    """Setup scheduled jobs."""
    from datetime import datetime
    from pybossa.jobs import enqueue_periodic_jobs, schedule_job, \
        get_quarterly_date
    from rq_scheduler import Scheduler
    redis_conn = sentinel.master
    scheduler = Scheduler(queue_name='scheduled_jobs', connection=redis_conn)
    MINUTE = 60
    HOUR = 60 * 60
    MONTH = 30 * (24 * HOUR)
    first_quaterly_execution = get_quarterly_date(datetime.utcnow())
    JOBS = [
        dict(name=enqueue_periodic_jobs,
             args=['super'],
             kwargs={},
             interval=(10 * MINUTE),
             timeout=(10 * MINUTE)),
        dict(name=enqueue_periodic_jobs,
             args=['high'],
             kwargs={},
             interval=(1 * HOUR),
             timeout=(10 * MINUTE)),
        dict(name=enqueue_periodic_jobs,
             args=['medium'],
             kwargs={},
             interval=(12 * HOUR),
             timeout=(10 * MINUTE)),
        dict(name=enqueue_periodic_jobs,
             args=['low'],
             kwargs={},
             interval=(24 * HOUR),
             timeout=(10 * MINUTE)),
        dict(name=enqueue_periodic_jobs,
             args=['weekly'],
             kwargs={},
             interval=(7 * 24 * HOUR),
             timeout=(10 * MINUTE)),
        dict(name=enqueue_periodic_jobs,
             args=['monthly'],
             kwargs={},
             interval=(1 * MONTH),
             timeout=(30 * MINUTE)),
        dict(name=enqueue_periodic_jobs,
             args=['quaterly'],
             kwargs={},
             interval=(3 * MONTH),
             timeout=(30 * MINUTE),
             scheduled_time=first_quaterly_execution)
    ]

    for job in JOBS:
        schedule_job(job, scheduler)
Example #40
0
def add_scheduled_task(request):
    from rq_scheduler import Scheduler
    from datetime import datetime

    task = request.GET.get('task')
    period = request.GET.get('period')
    queue = request.GET.get('queue')
    parameters = request.GET.get('parameters')
    scheduler = Scheduler(queue_name=queue, connection=tasks.redis_conn)

    if parameters:
        scheduler.schedule(
            scheduled_time=datetime.utcnow(),   # Time for first execution
            func=getattr(tasks, task),       # Function to be queued
            args=(parameters,),
            # Time before the function is called again, in seconds
            interval=int(period),
            # Repeat this number of times (None means repeat forever)
            repeat=None
        )
    else:
        scheduler.schedule(
            scheduled_time=datetime.utcnow(),   # Time for first execution
            func=getattr(tasks, task),       # Function to be queued
            # Time before the function is called again, in seconds
            interval=int(period),
            # Repeat this number of times (None means repeat forever)
            repeat=None
        )
    return HttpResponse('Success')
Example #41
0
def schedule():
    """Creates scheduler object."""
    build_scheduler = Scheduler(connection=WorkerQueues.connection)
    log.info('scheduler created')

    cleanup_interval = int(os.getenv('RENKU_SVC_CLEANUP_INTERVAL', 60))
    log.info('cleanup interval set to {}'.format(cleanup_interval))

    build_scheduler.schedule(
        scheduled_time=datetime.utcnow(),
        queue_name=CLEANUP_QUEUE_FILES,
        func=cache_files_cleanup,
        interval=cleanup_interval,
        result_ttl=cleanup_interval + 1,
    )

    build_scheduler.schedule(
        scheduled_time=datetime.utcnow(),
        queue_name=CLEANUP_QUEUE_PROJECTS,
        func=cache_project_cleanup,
        interval=cleanup_interval,
        result_ttl=cleanup_interval + 1,
    )

    log_level = os.getenv('RQ_WORKER_LOG_LEVEL', 'INFO')
    setup_loghandlers(log_level)
    log.info('log level set to {}'.format(log_level))

    yield build_scheduler
Example #42
0
def add_scheduled_task(request):

    task = request.GET.get('task')
    period = request.GET.get('period')
    queue = request.GET.get('queue')
    parameters = request.GET.get('parameters')

    from rq import use_connection
    from rq_scheduler import Scheduler
    from datetime import datetime

    use_connection()  # Use RQ's default Redis connection
    scheduler = Scheduler(queue)  # Get a scheduler for the "default" queue

    if parameters:
        scheduler.schedule(
            scheduled_time=datetime.now(),  # Time for first execution
            func=getattr(tasks, task),  # Function to be queued
            args=[int(parameters)],
            interval=
            period,  # Time before the function is called again, in seconds
            repeat=
            None  # Repeat this number of times (None means repeat forever)
        )
    else:
        scheduler.schedule(
            scheduled_time=datetime.now(),  # Time for first execution
            func=getattr(tasks, task),  # Function to be queued
            interval=
            period,  # Time before the function is called again, in seconds
            repeat=
            None  # Repeat this number of times (None means repeat forever)
        )
    return HttpResponse('Success')
Example #43
0
    def create(cls, reason, at_time, number=None):
        """reason must me an object of some model which implements ActionMixin"""
        content_type = ContentType.objects.get_for_model(reason.__class__)
        if number is None:
            last_not = cls.objects.filter(content_type=content_type,
                                          object_id=reason.id).last()
            number = last_not.number + 1 if last_not is not None else 0
        else:
            if cls.objects.filter(content_type=content_type,
                                  object_id=reason.id,
                                  number__gte=number).exists():
                return None  # exception?
        #cls.objects.filter(content_type=content_type, object_id=)
        obj = cls.objects.create(time=at_time, caused_by=reason, number=number)
        scheduler = Scheduler(connection=Redis())
        job = scheduler.enqueue_at(at_time, notification_job, obj.pk)
        logger.info('Notification pk {}: enqueue job to scheduler'.format(
            obj.pk))
        obj.job_id = job.id
        obj.save()

        return obj
Example #44
0
def update_scheduled_send(schedule_id):
    ''' schedule a new sending
    interval was changed
    or the last job has finished and next needs to be scheduled
    '''
    schedules = Schedule.objects(id=schedule_id)
    if not schedules:
        return False
    schedule = schedules[0]

    # confirm that schedule is valid
    if not schedule.interval:
        return False
    
    # connect to the rq scheduler
    redis_config = app.config['REDIS_CONFIG']
    use_connection(Redis(redis_config['host'], redis_config['port']
            , password=redis_config['password']))
    scheduler = Scheduler()

    # see if this schedule had a job that was already enqueued
    if schedule.next_task_id:
        # instantiate the job
        job = Job(id=schedule.next_task_id)
        # cancel the old job
        # tried rescheduling but that was not working
        scheduler.cancel(job)
    
    # determine how many seconds to wait
    delay = _calculate_schedule_delay(schedule.interval)

    # start a new job
    job = scheduler.enqueue_in(datetime.timedelta(seconds=int(delay))
        , send_scheduled_report, schedule.id)

    # save the id of this job and when it next runs
    schedule.update(set__next_task_id = job.id)
    schedule.update(set__next_run_time = (datetime.datetime.utcnow() 
        + datetime.timedelta(seconds=delay)))
 def get_context_data(self, **kwargs):
     ctx = super(Stats, self).get_context_data(**kwargs)
     ctx.update({
         'queues':
         sorted(Queue.all(connection=self.connection), key=by_name),
         'workers':
         sorted(Worker.all(connection=self.connection), key=by_name),
         'has_permission':
         True,
         'title':
         'RQ Status',
     })
     if Scheduler:
         scheduler = Scheduler(self.connection)
         get_queue = lambda job: job.origin
         all_jobs = sorted(scheduler.get_jobs(), key=get_queue)
         ctx['scheduler'] = scheduler
         ctx['scheduled_queues'] = [{
             'name': queue,
             'job_count': len(list(jobs))
         } for queue, jobs in groupby(all_jobs, get_queue)]
     return ctx
Example #46
0
class RqClient:

    def __init__(self, conf: RqConfig, prefix: str = ''):
        self.redis_conn = Redis(host=conf.HOST, port=conf.PORT, db=conf.DB)
        self.queue = Queue(connection=self.redis_conn)
        self.prefix = prefix
        self.scheduler = Scheduler(connection=self.redis_conn, queue=self.queue)
        self.scheduler_conf_path = conf.SCHEDULER_CONF_PATH
        self.control = Control(self.redis_conn)

    def init_scheduler(self):
        # remove old scheduled tasks
        for job in self.scheduler.get_jobs():
            self.scheduler.cancel(job)

        # create new tasks from config file
        if self.scheduler_conf_path:
            with open(self.scheduler_conf_path) as f:
                for entry in json.load(f):
                    self.scheduler.cron(
                        entry['schedule'],
                        f'{self.prefix}.{entry["task"]}',
                        kwargs=entry['kwargs'] if 'kwargs' in entry else None
                    )

    def send_task(self, name, args=None, time_limit=None, soft_time_limit=None):
        try:
            job = self.queue.enqueue(f'{self.prefix}.{name}', ttl=time_limit, args=args)
            return ResultWrapper(job)
        except Exception as ex:
            logging.getLogger(__name__).error(ex)

    def AsyncResult(self, ident):
        try:
            return ResultWrapper(Job.fetch(ident, connection=self.redis_conn))
        except NoSuchJobError:
            logging.getLogger(__name__).warning(f'Job {ident} not found')
            return None
Example #47
0
def list_jobs(queue_name, page):
    current_page = int(page)

    scheduler = Scheduler(queue_name)
    jobs = scheduler.get_jobs(with_times=True)
    if queue_name:
        jobs = list(filter(lambda job_: job_[0].origin == queue_name, jobs))

    per_page = 5
    total_items = len(jobs)
    pages_numbers_in_window = pagination_window(total_items, current_page,
                                                per_page)
    pages_in_window = [
        dict(number=p, url=url_for('.overview', queue_name=queue_name, page=p))
        for p in pages_numbers_in_window
    ]
    last_page = int(ceil(total_items / float(per_page)))

    prev_page = None
    if current_page > 1:
        prev_page = dict(url=url_for(
            '.overview', queue_name=queue_name, page=(current_page - 1)))

    next_page = None
    if current_page < last_page:
        next_page = dict(url=url_for(
            '.overview', queue_name=queue_name, page=(current_page + 1)))

    pagination = remove_none_values(
        dict(pages_in_window=pages_in_window,
             next_page=next_page,
             prev_page=prev_page))

    # scheduler.get_jobs(with_times=True)]
    offset = (current_page - 1) * per_page
    job_page = jobs[offset:offset + per_page]
    job_page = [serialize_job(job, at) for (job, at) in job_page]
    return dict(name=queue_name, jobs=job_page, pagination=pagination)
Example #48
0
def register_scheduler():
    scheduler = Scheduler('lidarts-bulk', connection=Redis())
    list_of_job_instances = scheduler.get_jobs()
    for job in list_of_job_instances:
        scheduler.cancel(job)
    scheduler.schedule(
        scheduled_time=datetime.utcnow(),
        func='lidarts.tasks.bulk_update_last_seen',
        interval=5,
        repeat=None,
        ttl=10,
    )
Example #49
0
def test_move_scheduled_jobs(client):
    '''Test moving jobs to proper queue'''
    with client.application.app_context():
        app = client.application
        app.redis.flushall()

        task_id = str(uuid4())
        job_id = str(uuid4())

        past = datetime.now() - timedelta(days=10)
        future = datetime.now() + timedelta(days=10)

        scheduler = Scheduler(queue_name='jobs', connection=app.redis)

        scheduler.enqueue_at(past,
                             job_mod.run_job,
                             task_id,
                             job_id,
                             timeout=-1)

        scheduler.enqueue_at(future,
                             job_mod.run_job,
                             task_id,
                             job_id,
                             timeout=-1)

        q = 'rq:queue:jobs'
        res = app.redis.llen(q)
        expect(res).to_equal(0)

        qs = QueueScheduler('jobs', app)
        qs.move_jobs()

        q = 'rq:queue:jobs'
        res = app.redis.llen(q)
        expect(res).to_equal(1)

        scheduler.enqueue_at(past,
                             job_mod.run_job,
                             task_id,
                             job_id,
                             timeout=-1)

        qs = QueueScheduler('jobs', app)
        qs.move_jobs()

        res = app.redis.llen(q)
        expect(res).to_equal(2)
def _get_scheduler_sts(redis_conn):
    queue_list = _get_queue_list()
    scheduler_list = []

    for q in queue_list:
        scheduler = Scheduler(queue_name=q, connection=redis_conn)

        if (scheduler.connection.exists(scheduler.scheduler_key) and
            not scheduler.connection.hexists(scheduler.scheduler_key, 'death')):
            scheduler_list.append({'id': f'scheduler on {q}', 'status': 'running'})
        else:
            scheduler_list.append({'id': f'No running scheduler on {q}', 'status': 'N/A'})

    return scheduler_list
Example #51
0
    def start(self):

        self.pf.load_printers()
        #self.load_printers()
        
        self.queue = Queue(connection=Redis())
        self.scheduler = Scheduler(connection=Redis())

        self.app.add_url_rule('/', 'index', self.index)
        self.app.add_url_rule('/printers', 'printers', self.get_printers)
        self.app.add_url_rule('/printer/<id>/', 'printer', self.printer)

        self.api.add_rules(self.app)
        self.pf.add_rules(self.app)
Example #52
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument("experiment", help="The experiment")

    parser.add_argument("job",
                        choices=["intervene", "tidy"],
                        help="The job associated with the experiment")

    parser.add_argument(
        "interval",
        default=120,  # default 2 minutes
        help="Interval between tasks in seconds (default 2 minutes)")
    parser.add_argument(
        "-e",
        '--env',
        choices=['development', 'test', 'production'],
        required=False,
        help=
        "Run within a specific environment. Otherwise run under the environment defined in the environment variable CS_ENV"
    )

    args = parser.parse_args()

    # if the user specified the environment, set it here
    if args.env != None:
        os.environ['CS_ENV'] = args.env

    queue_name = os.environ['CS_ENV']
    scheduler = Scheduler(queue_name=os.environ['CS_ENV'], connection=Redis())

    ttl = 172800  ## two days in seconds
    if (ttl <= int(args.interval) + 3600):
        ttl = int(args.interval) + 3600

    experiment_file = os.path.join(
        BASE_DIR, "config", "experiments") + "/" + args.experiment + ".yml"
    if (os.path.isfile(experiment_file) == False):
        print("File {0} not found. Ignoring schedule command.".format(
            experiment_file))
        sys.exit(1)

    if (args.job == "intervene"):
        scheduler.schedule(
            scheduled_time=datetime.utcnow(),
            func=app.controller.conduct_sticky_comment_experiment,
            args=[args.experiment],
            interval=int(args.interval),
            repeat=None,
            result_ttl=ttl)
    elif (args.job == "tidy"):
        scheduler.schedule(scheduled_time=datetime.utcnow(),
                           func=app.controller.remove_experiment_replies,
                           args=[args.experiment],
                           interval=int(args.interval),
                           repeat=None,
                           result_ttl=ttl)
Example #53
0
def get_scheduled_tasks(request):
    from rq import use_connection
    from rq_scheduler import Scheduler
    import json

    use_connection()  # Use RQ's default Redis connection
    scheduler = Scheduler()  # Get a scheduler for the "default" queue
    list_of_job_instances = scheduler.get_jobs()

    jobdata = list()
    for job in list_of_job_instances:
        if "interval" in job.meta:
            interval = job.meta["interval"]
        else:
            interval = 0
        job_dict = {
            'job_id': job._id,
            'task': job.description,
            'period': interval,
            'args': job.args,
            'queue': "default"
        }
        jobdata.append(job_dict)

    # scheduler = Scheduler('parser') # Get a scheduler for the "parser" queue
    # list_of_job_instances = scheduler.get_jobs()
    #
    # for job in list_of_job_instances:
    #     if "interval" in job.meta:
    #         interval = job.meta["interval"]
    #     else:
    #         interval = 0
    #     job_dict = { 'job_id': job._id, 'task': job.description, 'period': interval, 'queue': "parser" }
    #     jobdata.append(job_dict)

    data = json.dumps(jobdata)
    return HttpResponse(data, content_type='application/json')
Example #54
0
    def __add_to_scheduler(self, crons: List[str]):
        rq_job_ids = []
        if crons:
            queue = Queue(SchedulerDB.submit_queue_name,
                          connection=self.con,
                          default_timeout=self.timeout)
            scheduler = Scheduler(queue=queue, connection=self.con)
            for cron in crons:
                rq_job = scheduler.cron(
                    cron,
                    func=self.submit,
                    # args=self.args,
                    # kwargs=self.kwargs,
                    repeat=None,
                    queue_name=queue.name,
                    timeout=self.timeout,
                )
                self.logger.debug(
                    f"{self} with cron '{cron}' has been added to rq scheduler with id {rq_job.id}"
                )
                rq_job_ids.append(rq_job.id)
            self.logger.debug(f"{self} has been added to the rq scheduler")

        return rq_job_ids
Example #55
0
def test_enqueue_missing1(client):
    """Test self-healing enqueueing missing monitor jobs"""
    with client.application.app_context():
        app = client.application
        app.redis.flushall()

        for status in [
                JobExecution.Status.enqueued,
                JobExecution.Status.pulling,
                JobExecution.Status.running,
                JobExecution.Status.done,
                JobExecution.Status.failed,
        ]:
            _, job, execution = JobExecutionFixture.new_defaults()
            execution.status = status
            job.save()

            if status == JobExecution.Status.pulling:
                scheduler = Scheduler("monitor", connection=app.redis)
                interval = timedelta(seconds=1)
                scheduler.enqueue_in(
                    interval,
                    job_mod.monitor_job,
                    job.task.task_id,
                    job.job_id,
                    execution.execution_id,
                )

        job_mod.enqueue_missing_monitor_jobs(app)

        hash_key = "rq:scheduler:scheduled_jobs"
        res = app.redis.exists(hash_key)
        expect(res).to_be_true()

        res = app.redis.zrange(hash_key, 0, -1)
        expect(res).to_length(2)
Example #56
0
    def handle(self, *args, **options):
        queue = rq.Queue('rq_log', connection=Redis())
        queue.enqueue(rq_task)

        scheduler = Scheduler(queue=queue, connection=Redis())
        scheduler.schedule(scheduled_time=datetime.utcnow(),
                           func=rq_task,
                           interval=5,
                           repeat=1)
        scheduler.run()
Example #57
0
 def test_birth_and_death_registration(self):
     key = Scheduler.scheduler_key
     self.assertNotIn(key, self.testconn.keys('*'))
     scheduler = Scheduler(connection=self.testconn)
     scheduler.register_birth()
     self.assertIn(key, self.testconn.keys('*'))
     self.assertFalse(self.testconn.hexists(key, 'death'))
     self.assertRaises(ValueError, scheduler.register_birth)
     scheduler.register_death()
     self.assertTrue(self.testconn.hexists(key, 'death'))
Example #58
0
def retry_job(task_id, job_id):
    logger = g.logger.bind(operation="retry", task_id=task_id, job_id=job_id)

    logger.debug("Getting job...")
    job = Job.get_by_id(task_id=task_id, job_id=job_id)

    if job is None:
        logger.error("Job not found in task.")
        abort(404)

        return

    execution = job.get_last_execution()

    if execution is None:
        logger.error("No execution yet to retry.")
        abort(Response(response="No execution yet to retry.", status=400))

        return

    scheduler = Scheduler("jobs", connection=current_app.redis)

    if "enqueued_id" in job.metadata and job.metadata[
            "enqueued_id"] in scheduler:
        msg = "Can't retry a scheduled job."
        logger.error(msg)
        abort(Response(response=msg, status=400))

        return

    if execution.status == JobExecution.Status.running:
        logger.debug("Stopping current execution...")
        executor = current_app.load_executor()
        executor.stop_job(job.task, job, execution)
        logger.debug("Current execution stopped.")

    execution.status = JobExecution.Status.failed
    job.save()

    logger.debug("Enqueuing job execution...")
    args = [task_id, job_id, execution.image, execution.command]
    result = current_app.job_queue.enqueue(run_job, *args, timeout=-1)
    job.metadata["enqueued_id"] = result.id
    job.save()
    logger.info("Job execution enqueued successfully.")

    return get_job_summary(task_id, job_id)
Example #59
0
 def test_get_jobs_to_queue(self):
     """
     Ensure that jobs scheduled the future are not queued.
     """
     now = datetime.now()
     scheduler = Scheduler(connection=self.testconn)
     job = scheduler.enqueue_at(now, say_hello)
     self.assertIn(job, scheduler.get_jobs_to_queue())
     future_time = now + timedelta(hours=1)
     job = scheduler.enqueue_at(future_time, say_hello)
     self.assertNotIn(job, scheduler.get_jobs_to_queue())