Example #1
0
    def test_worker_sets_job_status(self):
        """Ensure that worker correctly sets job status."""
        q = Queue()
        w = Worker([q])

        job = q.enqueue(say_hello)
        self.assertEqual(job.get_status(), Status.QUEUED)
        self.assertEqual(job.is_queued, True)
        self.assertEqual(job.is_finished, False)
        self.assertEqual(job.is_failed, False)

        w.work(burst=True)
        job = Job.fetch(job.id)
        self.assertEqual(job.get_status(), Status.FINISHED)
        self.assertEqual(job.is_queued, False)
        self.assertEqual(job.is_finished, True)
        self.assertEqual(job.is_failed, False)

        # Failed jobs should set status to "failed"
        job = q.enqueue(div_by_zero, args=(1,))
        w.work(burst=True)
        job = Job.fetch(job.id)
        self.assertEqual(job.get_status(), Status.FAILED)
        self.assertEqual(job.is_queued, False)
        self.assertEqual(job.is_finished, False)
        self.assertEqual(job.is_failed, True)
Example #2
0
def actions(request):
    scheduler = get_scheduler()

    if request.method == 'POST' and request.POST.get('action', False):
        # Confirmation page for selected Action
        if request.POST.get('_selected_action', False):
            context_data = {
                'action': request.POST['action'],
                'job_ids': request.POST.getlist('_selected_action'),
            }
            return render(request, 'rq_scheduler/templates/confirm_action.html', context_data)

        # Performing the actual action
        elif request.POST.get('job_ids', False):
            job_ids = request.POST.getlist('job_ids')

            if request.POST['action'] == 'delete':
                for job_id in job_ids:
                    job = Job.fetch(job_id, connection=scheduler.connection)
                    job.cancel()
                messages.info(request, 'You have successfully deleted %s jobs!' % len(job_ids))
            elif request.POST['action'] == 'enqueue':
                for job_id in job_ids:
                    job = Job.fetch(job_id, connection=scheduler.connection)
                    scheduler.enqueue_job(job)
                messages.info(request, 'You have successfully enqueued %d  jobs!' % len(job_ids))

    return redirect('rq_scheduler:jobs')
Example #3
0
    def test_result_ttl_is_persisted(self):
        """Ensure that job's result_ttl is set properly"""
        job = Job.create(func=fixtures.say_hello, args=('Lionel',), result_ttl=10)
        job.save()
        Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.result_ttl, 10)

        job = Job.create(func=fixtures.say_hello, args=('Lionel',))
        job.save()
        Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.result_ttl, None)
Example #4
0
File: test_job.py Project: nvie/rq
    def test_failure_ttl_is_persisted(self):
        """Ensure job.failure_ttl is set and restored properly"""
        job = Job.create(func=fixtures.say_hello, args=('Lionel',), failure_ttl=15)
        job.save()
        Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.failure_ttl, 15)

        job = Job.create(func=fixtures.say_hello, args=('Lionel',))
        job.save()
        Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.failure_ttl, None)
Example #5
0
    def test_description_is_persisted(self):
        """Ensure that job's custom description is set properly"""
        job = Job.create(func=say_hello, args=('Lionel',), description=u'Say hello!')
        job.save()
        Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.description, u'Say hello!')

        # Ensure job description is constructed from function call string
        job = Job.create(func=say_hello, args=('Lionel',))
        job.save()
        Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job.description, "tests.fixtures.say_hello('Lionel')")
Example #6
0
 def cancel(self, job):
     """
     Pulls a job from the scheduler queue. This function accepts either a
     job_id or a job instance.
     """
     if isinstance(job, Job):
         self.connection.zrem(self.scheduled_jobs_key, job.id)
         job.delete()
     else:
         self.connection.zrem(self.scheduled_jobs_key, job)
         try:
             Job.fetch(job, connection=self.connection).delete()
         except NoSuchJobError:
             pass
Example #7
0
    def test_job_dependency(self):
        """Enqueue waitlisted jobs only if their parents don't fail"""
        q = Queue()
        w = Worker([q])
        parent_job = q.enqueue(say_hello)
        job = q.enqueue_call(say_hello, after=parent_job)
        w.work(burst=True)
        job = Job.fetch(job.id)
        self.assertEqual(job.status, 'finished')

        parent_job = q.enqueue(div_by_zero)
        job = q.enqueue_call(say_hello, after=parent_job)
        w.work(burst=True)
        job = Job.fetch(job.id)
        self.assertNotEqual(job.status, 'finished')
Example #8
0
    def test_persistence_of_parent_job(self):
        """Storing jobs with parent job, either instance or key."""
        parent_job = Job.create(func=fixtures.some_calculation)
        parent_job.save()
        job = Job.create(func=fixtures.some_calculation, depends_on=parent_job)
        job.save()
        stored_job = Job.fetch(job.id)
        self.assertEqual(stored_job._dependency_id, parent_job.id)
        self.assertEqual(stored_job.dependency, parent_job)

        job = Job.create(func=fixtures.some_calculation, depends_on=parent_job.id)
        job.save()
        stored_job = Job.fetch(job.id)
        self.assertEqual(stored_job._dependency_id, parent_job.id)
        self.assertEqual(stored_job.dependency, parent_job)
Example #9
0
    def test_job_dependency(self):
        """Enqueue dependent jobs only if their parents don't fail"""
        q = Queue()
        w = Worker([q])
        parent_job = q.enqueue(say_hello)
        job = q.enqueue_call(say_hello, depends_on=parent_job)
        w.work(burst=True)
        job = Job.fetch(job.id)
        self.assertEqual(job.get_status(), Status.FINISHED)

        parent_job = q.enqueue(div_by_zero)
        job = q.enqueue_call(say_hello, depends_on=parent_job)
        w.work(burst=True)
        job = Job.fetch(job.id)
        self.assertNotEqual(job.get_status(), Status.FINISHED)
Example #10
0
    def test_work_fails(self):
        """Failing jobs are put on the failed queue."""
        q = Queue()
        failed_q = get_failed_queue()

        # Preconditions
        self.assertEquals(failed_q.count, 0)
        self.assertEquals(q.count, 0)

        # Action
        job = q.enqueue(div_by_zero)
        self.assertEquals(q.count, 1)

        # keep for later
        enqueued_at_date = strip_microseconds(job.enqueued_at)

        w = Worker([q])
        w.work(burst=True)  # should silently pass

        # Postconditions
        self.assertEquals(q.count, 0)
        self.assertEquals(failed_q.count, 1)

        # Check the job
        job = Job.fetch(job.id)
        self.assertEquals(job.origin, q.name)

        # Should be the original enqueued_at date, not the date of enqueueing
        # to the failed queue
        self.assertEquals(job.enqueued_at, enqueued_at_date)
        self.assertIsNotNone(job.exc_info)  # should contain exc_info
Example #11
0
    def dequeue_any(cls, queues, blocking, connection=None):
        """Class method returning the Job instance at the front of the given
        set of Queues, where the order of the queues is important.

        When all of the Queues are empty, depending on the `blocking` argument,
        either blocks execution of this function until new messages arrive on
        any of the queues, or returns None.
        """
        queue_keys = [q.key for q in queues]

        for queue_key, job_ids in cls.lpop(queue_keys, blocking, connection=connection):

            for job_id in job_ids:
                queue = cls.from_queue_key(queue_key, connection=connection)

                try:
                    job = Job.fetch(job_id, connection=connection)
                except NoSuchJobError:
                    pass
                except UnpickleError as e:
                    # Attach queue information on the exception for improved error
                    # reporting
                    e.job_id = job_id
                    e.queue = queue
                    raise e
                else:
                    yield job, queue
Example #12
0
 def test_periodic_job_sets_description(self):
     """
     Ensure that description is passed to RQ by schedule.
     """
     job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, description='description')
     job_from_queue = Job.fetch(job.id, connection=self.testconn)
     self.assertEqual('description', job.description)
Example #13
0
def getresults(jobkey):
	job = Job.fetch(jobkey, connection = conn)

	if (job.is_finished):
		return jsonify(job.result)
	else:
		return jsonify({"error":"error"}), 202
 def status(self):
     if Job.exists(self.task_id, connection=redis_connection):
         job = Job.fetch(self.task_id, connection=redis_connection)
         job.refresh()
         return job.status
     else:
         return "unknown"
Example #15
0
    def test_job_dependency_race_condition(self):
        """Dependencies added while the job gets finished shouldn't get lost."""

        # This patches the enqueue_dependents to enqueue a new dependency AFTER
        # the original code was executed.
        orig_enqueue_dependents = Queue.enqueue_dependents

        def new_enqueue_dependents(self, job, *args, **kwargs):
            orig_enqueue_dependents(self, job, *args, **kwargs)
            if hasattr(Queue, '_add_enqueue') and Queue._add_enqueue is not None and Queue._add_enqueue.id == job.id:
                Queue._add_enqueue = None
                Queue().enqueue_call(say_hello, depends_on=job)

        Queue.enqueue_dependents = new_enqueue_dependents

        q = Queue()
        w = Worker([q])
        with mock.patch.object(Worker, 'execute_job', wraps=w.execute_job) as mocked:
            parent_job = q.enqueue(say_hello, result_ttl=0)
            Queue._add_enqueue = parent_job
            job = q.enqueue_call(say_hello, depends_on=parent_job)
            w.work(burst=True)
            job = Job.fetch(job.id)
            self.assertEqual(job.get_status(), JobStatus.FINISHED)

            # The created spy checks two issues:
            # * before the fix of #739, 2 of the 3 jobs where executed due
            #   to the race condition
            # * during the development another issue was fixed:
            #   due to a missing pipeline usage in Queue.enqueue_job, the job
            #   which was enqueued before the "rollback" was executed twice.
            #   So before that fix the call count was 4 instead of 3
            self.assertEqual(mocked.call_count, 3)
Example #16
0
def get_results(job_key):
    job = Job.fetch(job_key, connection=conn)

    if job.is_finished:
        return str(job.result), 200
    else:
        return "Nay!", 202
Example #17
0
    def test_self_modification_persistence_with_error(self):
        """Make sure that any meta modification done by
        the job itself persists completely through the
        queue/worker/job stack -- even if the job errored"""
        q = Queue()
        # Also make sure that previously existing metadata
        # persists properly
        job = q.enqueue(modify_self_and_error, meta={'foo': 'bar', 'baz': 42},
                        args=[{'baz': 10, 'newinfo': 'waka'}])

        w = Worker([q])
        w.work(burst=True)

        # Postconditions
        self.assertEqual(q.count, 0)
        failed_job_registry = FailedJobRegistry(queue=q)
        self.assertTrue(job in failed_job_registry)
        self.assertEqual(w.get_current_job_id(), None)

        job_check = Job.fetch(job.id)
        self.assertEqual(set(job_check.meta.keys()),
                         set(['foo', 'baz', 'newinfo']))
        self.assertEqual(job_check.meta['foo'], 'bar')
        self.assertEqual(job_check.meta['baz'], 10)
        self.assertEqual(job_check.meta['newinfo'], 'waka')
Example #18
0
def actions(request, queue_index):
    queue_index = int(queue_index)
    queue = get_queue_by_index(queue_index)

    if request.method == "POST" and request.POST.get("action", False):
        # Confirmation page for selected Action
        if request.POST.get("_selected_action", False):
            context_data = {
                "title": "Are you sure?",
                "queue_index": queue_index,
                "action": request.POST["action"],
                "job_ids": request.POST.getlist("_selected_action"),
                "queue": queue,
            }
            return render(request, "django_rq/confirm_action.html", context_data)

        # Performing the actual action
        elif request.POST.get("job_ids", False):
            job_ids = request.POST.getlist("job_ids")

            if request.POST["action"] == "delete":
                for job_id in job_ids:
                    job = Job.fetch(job_id, connection=queue.connection)
                    # Remove job id from queue and delete the actual job
                    queue.connection._lrem(queue.key, 0, job.id)
                    job.delete()
                messages.info(request, "You have successfully deleted %s jobs!" % len(job_ids))
            elif request.POST["action"] == "requeue":  # pragma: no cover
                for job_id in job_ids:
                    requeue_job(job_id, connection=queue.connection)
                messages.info(request, "You have successfully requeued %d  jobs!" % len(job_ids))

    return redirect("rq_jobs", queue_index)
Example #19
0
    def run(self):
        redis_conn = Redis()
        # get all
        while True:
            incomplete = self.session.query(Trial).filter(Trial.end_date == None).filter(Trial.start_date!=None).all()
            for t in incomplete:
                try:
                    job = Job.fetch(t.job, connection=redis_conn)
                except:
                    print("Exception occurred. Moving on.")
                    sleep(1)
                    continue

                if job.result is not None:
                    print("Result for " + t.name + " found.")
                    t.result = job.result
                    t.end_date = datetime.now()

                    self.session.add(t)
                    self.session.commit()
                    self.session.expire(t)

            if self.stopped:
                self.session.close()
                return

            sleep(1)
Example #20
0
 def test_create_job_with_id(self):
     """
     Ensure that ID is passed to RQ.
     """
     job = self.scheduler._create_job(say_hello, id='id test', args=(), kwargs={})
     job_from_queue = Job.fetch(job.id, connection=self.testconn)
     self.assertEqual('id test', job_from_queue.id)
Example #21
0
 def obj_get(self, bundle, **kwargs):
     """
     Returns redis document from provided id.
     """
     queue = django_rq.get_queue('default')
     job = Job.fetch(kwargs['pk'], connection=queue.connection)
     return Document(**job.__dict__)
Example #22
0
    def get_jobs(self, until=None, with_times=False):
        """
        Returns a list of job instances that will be queued until the given time.
        If no 'until' argument is given all jobs are returned. This function
        accepts datetime and timedelta instances as well as integers representing
        epoch values.
        If with_times is True a list of tuples consisting of the job instance and
        it's scheduled execution time is returned.
        """
        def epoch_to_datetime(epoch):
            return datetime.fromtimestamp(float(epoch))

        if until is None:
            until = "+inf"
        elif isinstance(until, datetime):
            until = until.strftime('%s')
        elif isinstance(until, timedelta):
            until = (datetime.now() + until).strftime('%s')
        job_ids = self.connection.zrangebyscore(self.scheduled_jobs_key, 0,
                                                until, withscores=with_times,
                                                score_cast_func=epoch_to_datetime)
        if not with_times:
            job_ids = zip(job_ids, repeat(None))
        jobs = []
        for job_id, sched_time in job_ids:
            try:
                job = Job.fetch(job_id, connection=self.connection)
                if with_times:
                    jobs.append((job, sched_time))
                else:
                    jobs.append(job)
            except NoSuchJobError:
                # Delete jobs that aren't there from scheduler
                self.cancel(job_id)
        return jobs
Example #23
0
 def test_periodic_job_sets_id(self):
     """
     Ensure that ID is passed to RQ by schedule.
     """
     job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, id='id test')
     job_from_queue = Job.fetch(job.id, connection=self.testconn)
     self.assertEqual('id test', job.id)
Example #24
0
 def test_enqueue_preserves_result_ttl(self):
     """Enqueueing persists result_ttl."""
     q = Queue()
     job = q.enqueue(div_by_zero, args=(1, 2, 3), result_ttl=10)
     self.assertEqual(job.result_ttl, 10)
     job_from_queue = Job.fetch(job.id, connection=self.testconn)
     self.assertEqual(int(job_from_queue.result_ttl), 10)
Example #25
0
 def test_periodic_jobs_sets_ttl(self):
     """
     Ensure periodic jobs set result_ttl to infinite.
     """
     job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5)
     job_from_queue = Job.fetch(job.id, connection=self.testconn)
     self.assertEqual(job.result_ttl, -1)
Example #26
0
    def test_work_fails(self):
        """Failing jobs are put on the failed queue."""
        q = Queue()
        self.assertEqual(q.count, 0)

        # Action
        job = q.enqueue(div_by_zero)
        self.assertEqual(q.count, 1)

        # keep for later
        enqueued_at_date = str(job.enqueued_at)

        w = Worker([q])
        w.work(burst=True)  # should silently pass

        # Postconditions
        self.assertEqual(q.count, 0)
        failed_job_registry = FailedJobRegistry(queue=q)
        self.assertTrue(job in failed_job_registry)
        self.assertEqual(w.get_current_job_id(), None)

        # Check the job
        job = Job.fetch(job.id)
        self.assertEqual(job.origin, q.name)

        # Should be the original enqueued_at date, not the date of enqueueing
        # to the failed queue
        self.assertEqual(str(job.enqueued_at), enqueued_at_date)
        self.assertTrue(job.exc_info)  # should contain exc_info
Example #27
0
 def test_create_job_with_description(self):
     """
     Ensure that description is passed to RQ.
     """
     job = self.scheduler._create_job(say_hello, description='description', args=(), kwargs={})
     job_from_queue = Job.fetch(job.id, connection=self.testconn)
     self.assertEqual('description', job_from_queue.description)
Example #28
0
    def get_job(job_id):
        """ Get preview job details
        """
        try:

            job = Job.fetch(job_id, connection=Redis())

            if job:

                result = {
                    'id': job.id,
                    'status': job._status,
                    'meta': job.meta
                }

                # remove local path reference
                if 'path' in result['meta']:
                    del result['meta']['path']

                return jsonify(result=result)

            raise Exception('unable to find job')

        except Exception as ex:

            return jsonify(error=ex.message), 404
Example #29
0
 def test_create_job_with_ttl(self):
     """
     Ensure that TTL is passed to RQ.
     """
     job = self.scheduler._create_job(say_hello, ttl=2, args=(), kwargs={})
     job_from_queue = Job.fetch(job.id, connection=self.testconn)
     self.assertEqual(2, job_from_queue.ttl)
Example #30
0
    def test_custom_exc_handling(self):
        """Custom exception handling."""
        def black_hole(job, *exc_info):
            # Don't fall through to default behaviour (moving to failed queue)
            return False

        q = Queue()
        failed_q = get_failed_queue()

        # Preconditions
        self.assertEquals(failed_q.count, 0)
        self.assertEquals(q.count, 0)

        # Action
        job = q.enqueue(div_by_zero)
        self.assertEquals(q.count, 1)

        w = Worker([q], exc_handler=black_hole)
        w.work(burst=True)  # should silently pass

        # Postconditions
        self.assertEquals(q.count, 0)
        self.assertEquals(failed_q.count, 0)

        # Check the job
        job = Job.fetch(job.id)
        self.assertEquals(job.is_failed, True)
Example #31
0
    def _reschedule(self):
        """Listen for completed jobs and reschedule successful ones"""

        pubsub = self.conn.pubsub()
        pubsub.subscribe(CH_PUBSUB)

        for msg in pubsub.listen():
            logger.debug("New message received of type %s", str(msg['type']))

            if msg['type'] != 'message':
                continue

            data = pickle.loads(msg['data'])

            if data['status'] == 'finished':
                job = Job.fetch(data['job_id'], connection=self.conn)
                self._reschedule_job(job)
            elif data['status'] == 'failed':
                logging.debug("Job #%s failed", data['job_id'])
Example #32
0
def mets():
    REG = registry.FinishedJobRegistry('default', connection=CON)
    JOBS = REG.get_job_ids()

    for job_num in JOBS:

        job = Job.fetch(job_num, connection=CON)
        start = job.started_at
        finish = job.ended_at
        duration = finish - start
        #print "job number: ", job_num
        #print "job function name: ", job.func_name
        #print "job duration: ", duration.seconds
        #print "job status: ", job.status
        #print "job result: ", job.result
        REQUEST_LATENCY.labels('web_api',
                               job.func_name).observe(duration.seconds)
        print REQUEST_LATENCY
    return REQUEST_LATENCY
Example #33
0
 def run_on_worker(self, **kwargs):
     cache = get_cache(self.cache_name)
     if isinstance(cache, DummyCache):
         # No caching or queues with dummy cache.
         data = self._worker_func(**kwargs)
         return (100, data, {}) if self._return_job_meta else (100, data)
     key = _get_cache_key(self.cache_section, **kwargs)
     cached = cache.get(key)
     if cached is not None:
         progress, job_id, data = cached
         connection = django_rq.get_connection(self.queue_name)
         job = Job.fetch(job_id, connection)
         if progress < 100 and job_id is not None:
             if job.is_finished:
                 data = job.result
                 progress = 100
                 cache.set(
                     key,
                     (progress, job_id, data),
                     timeout=self.cache_final_result_timeout,
                 )
             elif job.is_failed:
                 data = None
                 progress = 100
                 cache.delete(key)
     else:
         queue = django_rq.get_queue(self.queue_name)
         job = queue.enqueue_call(
             func=self._worker_func,
             kwargs=kwargs,
             timeout=self.work_timeout,
             result_ttl=self.cache_final_result_timeout,
         )
         progress = 0
         data = None
         cache.set(
             key,
             (progress, job.id, data),
             timeout=self.cache_timeout,
         )
     if self._return_job_meta:
         return progress, data, job.meta
     return progress, data
Example #34
0
def get_result():

    job_id = request.args["job_id"]

    try:

        job = Job.fetch(job_id, connection=redis_conn)

    except Exception as exception:

        abort(404, description=exception)

    if not job.result:
        abort(
            404,
            description=
            f"No result found for job_id {job.id}. Try checking the job's status.",
        )
    return jsonify(job.result)
Example #35
0
 def test_enqueue_job(self):
     """
     When scheduled job is enqueued, make sure:
     - Job is removed from the sorted set of scheduled jobs
     - "enqueued_at" attribute is properly set
     - Job appears in the right queue
     """
     now = datetime.now()
     queue_name = 'foo'
     scheduler = Scheduler(connection=self.testconn, queue_name=queue_name)
     job = scheduler.enqueue_at(now, say_hello)
     scheduler.enqueue_job(job)
     self.assertNotIn(job, self.testconn.zrange(scheduler.scheduled_jobs_key, 0, 10))
     job = Job.fetch(job.id, connection=self.testconn)
     self.assertTrue(job.enqueued_at is not None)
     queue = scheduler.get_queue_for_job(job)
     self.assertIn(job, queue.jobs)
     queue = Queue.from_queue_key('rq:queue:{0}'.format(queue_name))
     self.assertIn(job, queue.jobs)
Example #36
0
def get_results(job_key):

    job = Job.fetch(job_key, connection=conn)

    if job.is_finished:
        result = Result.query.filter_by(id=job.result).first()
        #        results = sorted(
        #            result.result_no_stop_words.items(),
        #            key=operator.itemgetter(1),
        #            reverse=True
        #        )[:50]
        results_sorted = sorted(result.result_no_stop_words.items(),
                                key=lambda t: t[1],
                                reverse=True)[:30]
        results = OrderedDict(results_sorted)
        #        results = result.result_no_stop_words.most_common(40)
        return jsonify(results)
    else:
        return "Computing...", 202
Example #37
0
 def get(self, job_id):
     try:
         job = Job.fetch(job_id, connection=r_connection)
     except Exception as inst:
         log.error("No job with id {}".format(job_id))
         response = jsonify({'error': 'no such job'})
         response.status_code = 404
         return response
     job.refresh()
     status = job.get_status()
     finished = job.is_finished
     meta = job.meta
     response = jsonify({
         'status': status,
         'finished': finished,
         'meta': meta
     })
     response.status_code = 200
     return response
Example #38
0
def job(id):
    try:
        job = Job.fetch(id, connection=redis)
    except NoSuchJobError:
        return json.dumps({
            'success': False,
            'job': None,
            'message': "Job not found",
        })

    return json.dumps({
        'task': job.func_name,
        'args': job.args,
        'sttatus': job.get_status(),
        'result': job.result,
        'enqueued_at': str(job.enqueued_at),
        'started_at': str(job.started_at),
        'ended_at': str(job.ended_at),
    })
Example #39
0
    def test_fetch(self):
        """Fetching jobs."""
        # Prepare test
        self.testconn.hset(
            'rq:job:some_id', 'data',
            "(S'tests.fixtures.some_calculation'\nN(I3\nI4\nt(dp1\nS'z'\nI2\nstp2\n."
        )
        self.testconn.hset('rq:job:some_id', 'created_at',
                           '2012-02-07T22:13:24.123456Z')

        # Fetch returns a job
        job = Job.fetch('some_id')
        self.assertEqual(job.id, 'some_id')
        self.assertEqual(job.func_name, 'tests.fixtures.some_calculation')
        self.assertIsNone(job.instance)
        self.assertEqual(job.args, (3, 4))
        self.assertEqual(job.kwargs, dict(z=2))
        self.assertEqual(job.created_at,
                         datetime(2012, 2, 7, 22, 13, 24, 123456))
Example #40
0
def get_running_task_dicts(tasks):
    """Get dictionary representations of the unfinished tasks from the specified list.

    Parameters
    ----------
    tasks : list
        The tasks that should be converted to dictionaries.

    Returns
    -------
    unfinished_tasks : list
        The unfinished tasks in the form of dictionaries.

    """
    running_task_dicts = []
    with database.engine.begin() as connection:
        for task in tasks:
            print(json.loads(task.meta))
            job = Job.fetch(task.id, connection=redis_conn)
            project = connection.execute(
                select([
                    sqlalchemy.text('*')
                ]).select_from(models.projects).where(
                    models.projects.c.project_id == task.project_id)).first()
            task_dict = dict(id=task.id,
                             name=task.name,
                             description=task.description,
                             complete=task.complete,
                             result=task.result,
                             progress=task.get_progress(),
                             project_id=task.project_id)
            task_dict['meta'] = json.loads(
                task.meta) if task.meta is not None else {}

            if job:
                task_dict['status'] = job.get_status()
                # task_dict['started_at'] = datetime.datetime.fromtimestamp(
                #     task_dict['meta']['scheduled_at'])
                # print('scheduled_at: {}'.format(task_dict['started_at']))
            if project:
                task_dict['project_name'] = project['name']
            running_task_dicts.append(task_dict)
    return running_task_dicts
Example #41
0
    def test_custom_meta_is_rewriten_by_save_meta(self):
        """New meta data can be stored by save_meta."""
        job = Job.create(func=fixtures.say_hello, args=('Lionel', ))
        job.save()
        serialized = job.to_dict()

        job.meta['foo'] = 'bar'
        job.save_meta()

        raw_meta = self.testconn.hget(job.key, 'meta')
        self.assertEqual(loads(raw_meta)['foo'], 'bar')

        job2 = Job.fetch(job.id)
        self.assertEqual(job2.meta['foo'], 'bar')

        # nothing else was changed
        serialized2 = job2.to_dict()
        serialized2.pop('meta')
        self.assertDictEqual(serialized, serialized2)
Example #42
0
def job_detail(request, queue_index, job_id):
    queue_index = int(queue_index)
    queue = get_queue_by_index(queue_index)
    try:
        job = Job.fetch(job_id, connection=queue.connection)
    except NoSuchJobError:
        raise Http404("Couldn't find job with this ID: %s" % job_id)

    ###
    # Custom logic here
    use_actual_name(job)
    ##

    context_data = {
        'queue_index': queue_index,
        'job': job,
        'queue': queue,
    }
    return render(request, 'django_rq/job_detail.html', context_data)
Example #43
0
def delete_job(request, queue_index, job_id):
    queue_index = int(queue_index)
    queue = get_queue_by_index(queue_index)
    job = Job.fetch(job_id, connection=queue.connection)

    if request.method == 'POST':
        # Remove job id from queue and delete the actual job
        queue.connection.lrem(queue.key, 0, job.id)
        job.delete()
        messages.info(request, 'You have successfully deleted %s' % job.id)
        return redirect('rq_jobs', queue_index)

    context_data = {
        **admin.site.each_context(request),
        'queue_index': queue_index,
        'job': job,
        'queue': queue,
    }
    return render(request, 'django_rq/delete_job.html', context_data)
Example #44
0
    def test_self_modification_persistence(self):
        """Make sure that any meta modification done by
        the job itself persists completely through the
        queue/worker/job stack."""
        q = Queue()
        # Also make sure that previously existing metadata
        # persists properly
        job = q.enqueue(modify_self, meta={'foo': 'bar', 'baz': 42},
                        args=[{'baz': 10, 'newinfo': 'waka'}])

        w = Worker([q])
        w.work(burst=True)

        job_check = Job.fetch(job.id)
        self.assertEqual(set(job_check.meta.keys()),
                         set(['foo', 'baz', 'newinfo']))
        self.assertEqual(job_check.meta['foo'], 'bar')
        self.assertEqual(job_check.meta['baz'], 10)
        self.assertEqual(job_check.meta['newinfo'], 'waka')
Example #45
0
    def on_get(self, req, resp):
        """Get job IDs for in-progress indexing tasks."""
        indexing_job_ids = redis_client.smembers(
            self.keys.startup_indexing_job_ids())
        jobs = []

        if indexing_job_ids:
            for job_id in indexing_job_ids:
                try:
                    status = Job.fetch(job_id,
                                       connection=redis_client).get_status()
                except NoSuchJobError:
                    if job_id in registry.get_job_ids():
                        status = JOB_STARTED
                    else:
                        status = JOB_NOT_QUEUED
                jobs.append({"job_id": job_id, "status": status})

        resp.body = json.dumps({"jobs": jobs})
Example #46
0
def get_job_results(job_key):
    job = Job.fetch(job_key, connection=get_redis_client())
    if not job:
        return {
            "status": "Job not found or expired",
            "result": None,
            "started_at": None,
            "ended_at": None,
            "func_name": None
        }
    return {
        "status": job.get_status(),
        # avoid showing previous' job result if they use the same id
        "result": job.result if job.get_status() == "finished" else None,
        "started_at": job.started_at,
        "ended_at": job.ended_at,
        "func_name": job.func_name,
        "check_timestamp": datetime.datetime.now().timestamp(),
    }
Example #47
0
def get_average():

    # TODO: Code for case where there are no registries yet
    avg_time = {}
    redis = Redis()
    qs = ['high', 'default', 'low']
    for q in qs:
        registry = FinishedJobRegistry(name=q, connection=redis)
        averages = []
        for job_id in registry.get_job_ids():
            job = Job.fetch(job_id, connection=redis)
            averages.append((job.ended_at - job.enqueued_at).total_seconds())
        ave = sum(averages) / len(averages)
        avg_time[q] = time.strftime('%H:%M:%S', time.gmtime(ave))

    response = api.response_class(response=json.dumps(avg_time),
                                  status=200,
                                  mimetype='application/json')
    return response
Example #48
0
    def getjobliststatus(self, q):
        """provides a breakdown of all jobs in the queue"""
        log.info(f"getting jobs and status: {q}")
        try:
            if q:
                self.getqueue(q)
                task = self.local_queuedb[q]["queue"].get_job_ids()
                response_object = {
                    "status": "success",
                    "data": {
                        "task_id": []
                    }
                }
                # get startedjobs
                startedjobs = self.getstartedjobs(
                    self.local_queuedb[q]["queue"])
                for job in startedjobs:
                    task.append(job)

                # get finishedjobs
                finishedjobs = self.getfinishedjobs(
                    self.local_queuedb[q]["queue"])
                for job in finishedjobs:
                    task.append(job)

                # get failedjobs
                failedjobs = self.getfailedjobs(self.local_queuedb[q]["queue"])
                for job in failedjobs:
                    task.append(job)

                if task:
                    for job in task:
                        try:
                            jobstatus = Job.fetch(
                                job, connection=self.base_connection)
                            jobdata = self.render_task_response(jobstatus)
                            response_object["data"]["task_id"].append(jobdata)
                        except Exception as e:
                            return e
                            pass
                return response_object
        except Exception as e:
            return e
Example #49
0
 def cancel_job(self, name):
     print("Cancelling job %s ..." % name)
     jobs = self.get_jobs()
     name_list = [x['name'] for x in jobs]
     try:
         status = jobs[name_list.index(str(name))]['status']
     except ValueError:
         print("There is no job named %s" % name)
         return False
     job = Job.fetch(id=name, connection=self.conn)
     status = job.get_status()
     if status == "finished" or status == "failed":
         print("Cannot cancel a finished/failde job")
         return False
     else:
         cancel_job(name, connection=self.conn)
         self.change_status(name, 'canceled')
         print("Job %s canceled..." % name)
         return True
Example #50
0
def deferred_jobs(request, queue_index):
    queue_index = int(queue_index)
    queue = get_queue_by_index(queue_index)

    registry = DeferredJobRegistry(queue.name, queue.connection)

    items_per_page = 100
    num_jobs = len(registry)
    page = int(request.GET.get('page', 1))
    jobs = []

    if num_jobs > 0:
        last_page = int(ceil(num_jobs / items_per_page))
        page_range = range(1, last_page + 1)
        offset = items_per_page * (page - 1)
        job_ids = registry.get_job_ids(offset, items_per_page)

        for job_id in job_ids:
            try:
                jobs.append(Job.fetch(job_id, connection=queue.connection))
            except NoSuchJobError:
                pass

    else:
        page_range = []

    ###
    # Custom logic here
    for job in jobs:
        use_actual_name(job)
    ##

    context_data = {
        'queue': queue,
        'queue_index': queue_index,
        'jobs': jobs,
        'num_jobs': num_jobs,
        'page': page,
        'page_range': page_range,
        'job_status': 'Deferred',
    }
    return render(request, 'django_rq/jobs.html', context_data)
Example #51
0
def is_running(model_name, bug_id):
    # Check if there is a job
    mapping_key = get_mapping_key(model_name, bug_id)

    job_id = redis_conn.get(mapping_key)

    if not job_id:
        return False

    try:
        job = Job.fetch(job_id.decode("utf-8"), connection=redis_conn)
    except NoSuchJobError:
        # The job might have expired from redis
        return False

    job_status = job.get_status()
    if job_status in ("running", "started", "queued"):
        return True

    return False
Example #52
0
def home():
    result = {}
    error = ''
    if request.method == 'POST':
        if validate_url(request.form.get(
                "url")) == True and request.form.get("url") != '':
            res = Result.query.filter_by(url=request.form.get("url")).first()
            if res == None:
                url = request.form.get("url")
                job = queue.enqueue_call(count_words, args=(url, ))
                job = Job.fetch(job.get_id(), connection=conn)
                return redirect(url_for('get_results', job_key=job.get_id()))
            else:
                result = res.__dict__
                return render_template("home.html", result=result)
        else:
            return render_template(
                "home.html", result={},
                error="Invalid URL"), status.HTTP_400_BAD_REQUEST
    return render_template("home.html", result=result)
Example #53
0
def get_results(job_key):

    job = Job.fetch(job_key, connection=conn)

    if job.is_finished:
        today = job.result[0]
        tommorrow = job.result[1]
        difference = job.result[2]
        ticker = job.result[3]
        return jsonify({
            'ticker': str(ticker.upper()),
            'today': float(today),
            'tommorrow': float(tommorrow),
            'percentage_difference': float(difference),
        })
    else:
        return jsonify({
            'Message':
            "The job is still running - try again in a few seconds",
        })
Example #54
0
def cancel(job_id):
    job = Job.fetch(job_id, connection=redis)
    temp_blob = bucket.blob(os.path.join(TEMP, job_id))
    output_blob = bucket.blob(os.path.join(OUTPUT, job_id))
    if temp_blob.exists() : temp_blob.delete()
    if output_blob.exists() : output_blob.delete()
    try:
        if job.get_status() == "started":
            send_stop_job_command(redis, job_id)
            print ("- Stopped executing job", job_id)
            return True
        elif job.get_status() == "queued":
            queue.remove(job_id)
            print ("- Removed job from queue", job_id)
            return True
        else:
            print ("- No job found for", job_id)
    except:
        print ("- No job found for", job_id)
    return False
Example #55
0
    def test_crontab_persisted_correctly_with_local_timezone(self):
        """
        Ensure that crontab attribute gets correctly saved in Redis when using local TZ.
        """
        # create a job that runs one minute past each whole hour
        job = self.scheduler.cron("0 15 * * *",
                                  say_hello,
                                  use_local_timezone=True)
        job_from_queue = Job.fetch(job.id, connection=self.testconn)
        self.assertEqual(job_from_queue.meta['cron_string'], "0 15 * * *")

        # get the scheduled_time and convert it to a datetime object
        unix_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key,
                                         job.id)
        datetime_time = from_unix(unix_time)

        expected_datetime_in_local_tz = datetime.now(tzlocal()).replace(
            hour=15, minute=0, second=0, microsecond=0)
        assert datetime_time.time(
        ) == expected_datetime_in_local_tz.astimezone(UTC).time()
Example #56
0
 def handle(self, *args, **options):
     with Connection(REDIS_CLIENT):
         workers = Worker.all(REDIS_CLIENT)
         for worker in workers:
             send_kill_horse_command(REDIS_CLIENT, worker.name)
             send_shutdown_command(REDIS_CLIENT, worker.name)
             worker.register_death()
         job_ids = AsyncCronMail.objects.values_list('job_id').filter(started_at__isnull=False,status=False).first()
         if AsyncCronMail.objects.filter(started_at__isnull=False,status=False).count() > 0:
             try:
                 job = Job.fetch(job_ids[0], connection=REDIS_CLIENT)
                 DEFAULT_QUEUE.empty()
                 DEFAULT_QUEUE.enqueue_job(job)
             except:
                 print('Job does not exist')
         topper_registry = FailedJobRegistry(queue=TOPPER_QUEUE)
         for job_id in topper_registry.get_job_ids():
             topper_registry.remove(job_id, delete_job=True)
         w = Worker([DEFAULT_QUEUE,TOPPER_QUEUE], connection=REDIS_CLIENT, name='default_worker')
         w.work()
Example #57
0
def scanBufferGet(id):

    job2 = Job.fetch(id, connection=redis_conn)
    response = {}

    if job2.get_status() == 'started':
        response['status'] = 'pending'

    elif job2.get_status() == 'finished':
        response['status'] = 'ready'

        # for file in job2.result:
        #   base_Coll.insert_one({'md5': file['md5'], 'status': file['status']})

        response['result'] = job2.result

    elif job2.get_status() == 'failed':
        response['status'] = 'failed'

    return jsonify(response)
def restart_batch_job(batch_job_id):
    batch_job = job_repository.find_one(batch_job_id)
    redis_conn = server.get_redis().get_redis_conn()
    if batch_job is None:
        return {
            "success": False,
            "statusCode": 400,
            "error": f"Batch job not found with id {batch_job_id}",
        }
    all_jobs = batch_job["jobs"]
    for job_meta in all_jobs:
        if job_meta["status"] != "finished":
            log.info(f"For batch job {batch_job_id}, requeue job {job_meta['_id']}")
            job = Job.fetch(job_meta["_id"], redis_conn)
            job.requeue()
    job_repository.update_one(batch_job_id, {"finished": False})
    server.get_job_queue().enqueue_job(
        poll_batch_job, priority="low", args=(tuple([batch_job_id]))
    )
    return {"success": True, "batch_job_id": batch_job_id}
Example #59
0
def get_results(job_key: str):
    """
    Renders the progress report for a particular job or displays an error message if job is not found.
    @param job_key: the job key
    @return: renders the appropriate page.
    """
    uri_prefix = 'spotify:playlist:'
    try:
        job = Job.fetch(job_key, connection=current_app.redis)
    except NoSuchJobError:
        return render_template('results.html', job_error="Job not found")
    if job.is_finished:
        if job_is_successful(job, uri_prefix):
            uri = job.result[len(uri_prefix):]
            return render_template('results.html', result=uri)
        else:
            return render_template('results.html', job_error=str(job.result))
    else:
        return render_template('results.html',
                               job_error="Job is not finished. Try refreshing the page soon to see results.")
 def handle(self, *args, **options):
     import django_rq
     from rq.registry import FinishedJobRegistry
     from rq.exceptions import NoSuchJobError
     from rq.job import Job
     from datetime import datetime, timedelta
     day_before_yesterday = datetime.now() - timedelta(days=2)
     for index, config in enumerate(django_rq.settings.QUEUES_LIST):
         queue = django_rq.queues.get_queue_by_index(index)
         registry = FinishedJobRegistry(queue.name, queue.connection)
         for job_id in registry.get_job_ids():
             try:
                 job = Job.fetch(job_id, connection=queue.connection)
                 # delete jobs older 2 days
                 if job.ended_at > day_before_yesterday:
                     continue
             except NoSuchJobError:
                 # for some reason job already deleted but job key exists
                 pass
             registry.connection.zrem(registry.key, job_id)