Example #1
0
    def test_all_queues(self):
        """All queues"""
        q1 = Queue('first-queue')
        q2 = Queue('second-queue')
        q3 = Queue('third-queue')

        # Ensure a queue is added only once a job is enqueued
        self.assertEquals(len(Queue.all()), 0)
        q1.enqueue(say_hello)
        self.assertEquals(len(Queue.all()), 1)

        # Ensure this holds true for multiple queues
        q2.enqueue(say_hello)
        q3.enqueue(say_hello)
        names = [q.name for q in Queue.all()]
        self.assertEquals(len(Queue.all()), 3)

        # Verify names
        self.assertTrue('first-queue' in names)
        self.assertTrue('second-queue' in names)
        self.assertTrue('third-queue' in names)

        # Now empty two queues
        w = Worker([q2, q3])
        w.work(burst=True)

        # Queue.all() should still report the empty queues
        self.assertEquals(len(Queue.all()), 3)
Example #2
0
    def test_all_queues(self):
        """All queues"""
        q = Queue('first-queue')
        r = Queue('second-queue')
        s = Queue('third-queue')

        # Ensure a queue is added only once a job is enqueued
        self.assertEquals(len(Queue.all()), 0)
        q.enqueue(say_hello)
        self.assertEquals(len(Queue.all()), 1)

        # Ensure this holds true for multiple queues
        r.enqueue(say_hello)
        s.enqueue(say_hello)
        names = [q.name for q in Queue.all()]
        self.assertEquals(len(Queue.all()), 3)

        # Verify names
        self.assertTrue('first-queue' in names)
        self.assertTrue('second-queue' in names)
        self.assertTrue('third-queue' in names)

        # Ensure we no longer return queues whose keys do not exist
        self.testconn.srem(Queue.redis_queues_keys, s.key)
        self.assertEquals(len(Queue.all()), 2)
Example #3
0
    def test_all_queues(self):
        """All queues"""
        q1 = Queue('first-queue')
        q2 = Queue('second-queue')
        q3 = Queue('third-queue')

        # Ensure a queue is added only once a job is enqueued
        self.assertEqual(len(Queue.all()), 0)
        q1.enqueue(say_hello)
        self.assertEqual(len(Queue.all()), 1)

        # Ensure this holds true for multiple queues
        q2.enqueue(say_hello)
        q3.enqueue(say_hello)
        names = [q.name for q in Queue.all()]
        self.assertEqual(len(Queue.all()), 3)

        # Verify names
        self.assertTrue('first-queue' in names)
        self.assertTrue('second-queue' in names)
        self.assertTrue('third-queue' in names)

        # Now empty two queues
        w = Worker([q2, q3])
        w.work(burst=True)

        # Queue.all() should still report the empty queues
        self.assertEqual(len(Queue.all()), 3)
Example #4
0
    def test_all_queues(self):
        """All queues"""
        q = Queue('first-queue')
        r = Queue('second-queue')
        s = Queue('third-queue')

        # Ensure a queue is added only once a job is enqueued
        self.assertEquals(len(Queue.all()), 0)
        q.enqueue(say_hello)
        self.assertEquals(len(Queue.all()), 1)

        # Ensure this holds true for multiple queues
        r.enqueue(say_hello)
        s.enqueue(say_hello)
        names = [q.name for q in Queue.all()]
        self.assertEquals(len(Queue.all()), 3)

        # Verify names
        self.assertTrue('first-queue' in names)
        self.assertTrue('second-queue' in names)
        self.assertTrue('third-queue' in names)

        # Ensure we no longer return queues whose keys do not exist
        self.testconn.srem(Queue.redis_queues_keys, s.key)
        self.assertEquals(len(Queue.all()), 2)
Example #5
0
def get_all_queues():
    """
    Return list of all queues.

    Redefined in compat module to also return magic failed queue.
    """
    return Queue.all()
Example #6
0
def get_queues_status():
    return {
        queue.name: {
            "size": len(queue)
        }
        for queue in Queue.all(connection=rq_redis_connection)
    }
Example #7
0
def show_queues(queues, raw, by_queue):
    if queues:
        qs = list(map(Queue, queues))
    else:
        qs = Queue.all()

    num_jobs = 0
    termwidth, _ = click.get_terminal_size()
    chartwidth = min(20, termwidth - 20)

    max_count = 0
    counts = dict()
    for q in qs:
        count = q.count
        counts[q] = count
        max_count = max(max_count, count)
    scale = get_scale(max_count)
    ratio = chartwidth * 1.0 / scale

    for q in qs:
        count = counts[q]
        if not raw:
            chart = green('|' + '█' * int(ratio * count))
            line = '%-12s %s %d' % (q.name, chart, count)
        else:
            line = 'queue %s %d' % (q.name, count)
        click.echo(line)

        num_jobs += count

    # print summary when not in raw mode
    if not raw:
        click.echo('%d queues, %d jobs total' % (len(qs), num_jobs))
Example #8
0
def get_rqinfo(options):
    """获取rq队列信息
    """
    redis_conn = Redis.from_url(options.connection)
    push_connection(redis_conn)
    # RQ队列信息获取操作
    if options.status:
        workers = Worker.all()
        queues = Queue.all()
        return workers, queues
    if options.queue:
        queue = Queue(options.queue)
        return queue
    if options.cancel_job:
        cancel_job(options.cancel_job)
        return 'OK'
    if options.requeue_job:
        requeue_job(options.requeue_job)
        return 'OK'
    if options.requeue_all:
        return requeue_all()
    if options.empty_queue:
        empty_queue(options.empty_queue)
        return 'OK'
    if options.compact_queue:
        compact_queue(options.compact_queue)
        return 'OK'
    if options.queues:
        return list_queues()
    if options.jobs:
        return list_jobs(options.jobs)
    if options.workers:
        return list_workers()
    pop_connection()
Example #9
0
def show_queues(args):
    if len(args.queues):
        qs = map(Queue, args.queues)
    else:
        qs = Queue.all()

    num_jobs = 0
    termwidth, _ = gettermsize()
    chartwidth = min(20, termwidth - 20)

    max_count = 0
    counts = dict()
    for q in qs:
        count = q.count
        counts[q] = count
        max_count = max(max_count, count)
    scale = get_scale(max_count)
    ratio = chartwidth * 1.0 / scale

    for q in qs:
        count = counts[q]
        if not args.raw:
            chart = green('|' + '█' * int(ratio * count))
            line = '%-12s %s %d' % (q.name, chart, count)
        else:
            line = 'queue %s %d' % (q.name, count)
        print(line)

        num_jobs += count

    # Print summary when not in raw mode
    if not args.raw:
        print('%d queues, %d jobs total' % (len(qs), num_jobs))
Example #10
0
def jobs_overview(instance_number, queue_name, registry_name, per_page, page):
    if queue_name is None:
        queue = Queue()
    else:
        queue = Queue(queue_name)
    r = make_response(
        render_template(
            "rq_dashboard/jobs.html",
            current_instance=instance_number,
            instance_list=current_app.config.get("RQ_DASHBOARD_REDIS_URL"),
            queues=Queue.all(),
            queue=queue,
            per_page=per_page,
            page=page,
            registry_name=registry_name,
            rq_url_prefix=url_for(".queues_overview"),
            rq_dashboard_version=rq_dashboard_version,
            rq_version=rq_version,
            active_tab="jobs",
            scheduler_is_here=scheduler_is_here,
            deprecation_options_usage=current_app.config.get(
                "DEPRECATED_OPTIONS", False
            ),
        )
    )
    r.headers.set("Cache-Control", "no-store")
    return r
Example #11
0
def get_all_queues():
    """
    Return list of all queues.

    Redefined in compat module to also return magic failed queue.
    """
    return Queue.all()
Example #12
0
def rq_queues():
    return {
        q.name: {
            'name': q.name,
            'started': fetch_jobs(q, StartedJobRegistry(queue=q).get_job_ids()),
            'queued': len(q.job_ids)
        } for q in Queue.all(connection=redis_connection)}
Example #13
0
def show_queues(args):
    if len(args.queues):
        qs = map(Queue, args.queues)
    else:
        qs = Queue.all()

    num_jobs = 0
    termwidth, _ = gettermsize()
    chartwidth = min(20, termwidth - 20)

    max_count = 0
    counts = dict()
    for q in qs:
        count = q.count
        counts[q] = count
        max_count = max(max_count, count)
    scale = get_scale(max_count)
    ratio = chartwidth * 1.0 / scale

    for q in qs:
        count = counts[q]
        if not args.raw:
            chart = green('|' + '█' * int(ratio * count))
            line = '%-12s %s %d' % (q.name, chart, count)
        else:
            line = 'queue %s %d' % (q.name, count)
        print(line)

        num_jobs += count

    # Print summary when not in raw mode
    if not args.raw:
        print('%d queues, %d jobs total' % (len(qs), num_jobs))
Example #14
0
File: oorq.py Project: idadzie/oorq
 def read(self, cursor, uid, ids, fields=None, context=None):
     """Show connected workers.
     """
     setup_redis_connection()
     if not context:
         context = {}
     if 'queue' in context:
         queues = [Queue(context['queue'])]
     else:
         queues = Queue.all()
         try:
             queues.remove(Queue('failed'))
         except ValueError:
             pass
     jobs = []
     for qi, queue in enumerate(queues):
         jobs += [
             dict(id=int('%s%s' % (qi + 1, ji)),
                  jid=job.id,
                  queue=queue.name,
                  created_at=serialize_date(job.created_at),
                  enqueued_at=serialize_date(job.enqueued_at),
                  ended_at=serialize_date(job.ended_at),
                  origin=job.origin or False,
                  result=job._result or False,
                  exc_info=job.exc_info or False,
                  description=job.description or False)
             for ji, job in enumerate(queue.jobs)
         ]
     return jobs
def purge_failed_jobs():
    with Connection(rq_redis_connection):
        queues = [
            q for q in Queue.all() if q.name not in default_operational_queues
        ]
        for queue in queues:
            failed_job_ids = FailedJobRegistry(queue=queue).get_job_ids()
            failed_jobs = Job.fetch_many(failed_job_ids, rq_redis_connection)
            stale_jobs = []
            for failed_job in failed_jobs:
                # the job may not actually exist anymore in Redis
                if not failed_job:
                    continue
                # the job could have an empty ended_at value in case
                # of a worker dying before it can save the ended_at value,
                # in which case we also consider them stale
                if not failed_job.ended_at:
                    stale_jobs.append(failed_job)
                elif (datetime.utcnow() - failed_job.ended_at
                      ).total_seconds() > settings.JOB_DEFAULT_FAILURE_TTL:
                    stale_jobs.append(failed_job)

            for stale_job in stale_jobs:
                stale_job.delete()

            if stale_jobs:
                logger.info(
                    "Purged %d old failed jobs from the %s queue.",
                    len(stale_jobs),
                    queue.name,
                )
Example #16
0
def rq_job_ids():
    queues = Queue.all(connection=redis_connection)

    started_jobs = [StartedJobRegistry(queue=q).get_job_ids() for q in queues]
    queued_jobs = [q.job_ids for q in queues]

    return flatten(started_jobs + queued_jobs)
Example #17
0
def isalive():

    versions = dict()
    for name in ["bos-auto", "peerplays", "bookiesports", "bos-incidents", "bos-sync"]:
        try:
            versions[name] = pkg_resources.require(name)[0].version
        except Exception:
            versions[name] = "not installed"
    # queue status
    queue_status = {}
    with Connection(redis):
        q = Queue(connection=redis)
        for queue in q.all():
            queue_status[queue.name] = dict(count=queue.count)

    background_threads_dict = {}
    for t in background_threads:
        try:
            background_threads_dict[t.name] = {"running": t.is_alive()}
        except Exception as e:
            log.error("Error in background task: {}".format(str(e)))

    return jsonify(
        dict(
            versions=versions,
            queue=dict(status=queue_status),
            background=background_threads_dict,
        )
    )
Example #18
0
 async def jobs_overview(request,
                         instance_number,
                         queue_name=None,
                         registry_name="queued",
                         per_page=8,
                         page=1):
     if queue_name is None:
         queue = Queue()
     else:
         queue = Queue(queue_name)
     r = make_response(
         render_template(
             "rq_dashboard/jobs.html",
             current_instance=instance_number,
             instance_list=current_app.config.get("RQ_DASHBOARD_REDIS_URL"),
             queues=Queue.all(),
             queue=queue,
             per_page=per_page,
             page=page,
             registry_name=registry_name,
             rq_dashboard_version=rq_dashboard_version,
             rq_version=rq_version,
             active_tab="jobs",
             headers={"Cache-Control": "no-store"}))
     return r
Example #19
0
def print_status(status_message=""):
    global ypos
    global maxyx

    ypos = 0
    stdscr.clear()
    if curses.is_term_resized(*maxyx):
        maxyx = stdscr.getmaxyx()
        curses.resizeterm(*maxyx)

    print_autoy(datetime.now().strftime("%c"))
    print_autoy("")

    if status_message:
        for msg in status_message.split('\n'):
            print_autoy(msg)


    ws = Worker.all(connection=conn)
    print_autoy("WORKERS (%s): " % len(ws), yadd=1)
    if ws:
        for w in sorted(ws, key=lambda x: x.name):
            print_autoy("worker %s: %s" % (w.name, job_string(w.get_current_job())), xadd=2)
    else:
        print_autoy("no workers", xadd=2)

    qs = Queue.all(connection=conn)
    print_autoy("QUEUES: ", yadd=1)
    for q in sorted(qs, key=lambda x: x.name):
        print_autoy("%s (%s):" % (q.name, len(q)), xadd=2)

        for j in sorted(q.get_jobs(), key=lambda x: x.enqueued_at):
            print_autoy(job_string(j), xadd=4)

    stdscr.refresh()
Example #20
0
def show_queues(queues, raw, by_queue):
    if queues:
        qs = list(map(Queue, queues))
    else:
        qs = Queue.all()

    num_jobs = 0
    termwidth, _ = click.get_terminal_size()
    chartwidth = min(20, termwidth - 20)

    max_count = 0
    counts = dict()
    for q in qs:
        count = q.count
        counts[q] = count
        max_count = max(max_count, count)
    scale = get_scale(max_count)
    ratio = chartwidth * 1.0 / scale

    for q in qs:
        count = counts[q]
        if not raw:
            chart = green('|' + '█' * int(ratio * count))
            line = '%-12s %s %d' % (q.name, chart, count)
        else:
            line = 'queue %s %d' % (q.name, count)
        click.echo(line)

        num_jobs += count

    # print summary when not in raw mode
    if not raw:
        click.echo('%d queues, %d jobs total' % (len(qs), num_jobs))
Example #21
0
def get_info(show_failed=False):
	conn = get_redis_conn()
	queues = Queue.all(conn)
	workers = Worker.all(conn)
	jobs = []

	def add_job(j, name):
		if j.kwargs.get('site')==frappe.local.site:
			jobs.append({
				'job_name': j.kwargs.get('kwargs', {}).get('playbook_method') \
					or str(j.kwargs.get('job_name')),
				'status': j.status, 'queue': name,
				'creation': format_datetime(convert_utc_to_user_timezone(j.created_at)),
				'color': colors[j.status]
			})
			if j.exc_info:
				jobs[-1]['exc_info'] = j.exc_info

	for w in workers:
		j = w.get_current_job()
		if j:
			add_job(j, w.name)

	for q in queues:
		if q.name != 'failed':
			for j in q.get_jobs(): add_job(j, q.name)

	if cint(show_failed):
		for q in queues:
			if q.name == 'failed':
				for j in q.get_jobs()[:10]: add_job(j, q.name)

	return jobs
Example #22
0
File: oorq.py Project: gisce/oorq
 def read(self, cursor, uid, ids, fields=None, context=None):
     """Show connected workers.
     """
     setup_redis_connection()
     if not context:
         context = {}
     if 'queue' in context:
         queues = [Queue(context['queue'])]
     else:
         queues = Queue.all()
         try:
             queues.remove(Queue('failed'))
         except ValueError:
             pass
     jobs = []
     for qi, queue in enumerate(queues):
         jobs += [dict(
             id=int('%s%s' % (qi + 1, ji)),
             jid=job.id,
             queue=queue.name,
             created_at=serialize_date(job.created_at),
             enqueued_at=serialize_date(job.enqueued_at),
             ended_at=serialize_date(job.ended_at),
             origin=job.origin or False,
             result=job._result or False,
             exc_info=job.exc_info or False,
             description=job.description or False)
                  for ji, job in enumerate(queue.jobs)]
     return jobs
Example #23
0
def get_info(show_failed=False):
	conn = get_redis_conn()
	queues = Queue.all(conn)
	workers = Worker.all(conn)
	jobs = []

	def add_job(j, name):
		if j.kwargs.get('site')==frappe.local.site:
			jobs.append({
				'job_name': j.kwargs.get('kwargs', {}).get('playbook_method') \
					or str(j.kwargs.get('job_name')),
				'status': j.status, 'queue': name,
				'creation': format_datetime(j.created_at),
				'color': colors[j.status]
			})
			if j.exc_info:
				jobs[-1]['exc_info'] = j.exc_info

	for w in workers:
		j = w.get_current_job()
		if j:
			add_job(j, w.name)

	for q in queues:
		if q.name != 'failed':
			for j in q.get_jobs(): add_job(j, q.name)

	if cint(show_failed):
		for q in queues:
			if q.name == 'failed':
				for j in q.get_jobs()[:10]: add_job(j, q.name)

	return jobs
Example #24
0
 def get_context_data(self, **kwargs):
     ctx = super(Stats, self).get_context_data(**kwargs)
     ctx.update({
         'queues': Queue.all(connection=self.connection),
         'workers': Worker.all(connection=self.connection),
         'title': 'RQ Status',
     })
     return ctx
Example #25
0
def remove_failed_jobs():
    conn = get_redis_conn()
    queues = Queue.all(conn)
    for queue in queues:
        fail_registry = queue.failed_job_registry
        for job_id in fail_registry.get_job_ids():
            job = queue.fetch_job(job_id)
            fail_registry.remove(job, delete_job=True)
Example #26
0
 def get_context_data(self, **kwargs):
     ctx = super(Stats, self).get_context_data(**kwargs)
     ctx.update({
         'queues': Queue.all(connection=self.connection),
         'workers': Worker.all(connection=self.connection),
         'title': 'RQ Status',
     })
     return ctx
Example #27
0
    def test_all_custom_job(self):
        class CustomJob(Job):
            pass

        q = Queue('all-queue')
        q.enqueue(say_hello)
        queues = Queue.all(job_class=CustomJob)
        self.assertEqual(len(queues), 1)
        self.assertIs(queues[0].job_class, CustomJob)
Example #28
0
def rq_queues():
    return {
        q.name: {
            "name": q.name,
            "started": fetch_jobs(StartedJobRegistry(queue=q).get_job_ids()),
            "queued": len(q.job_ids),
        }
        for q in sorted(Queue.all(), key=lambda q: q.name)
    }
Example #29
0
def show_workers(queues, raw, by_queue):
    if queues:
        qs = list(map(Queue, queues))

        def any_matching_queue(worker):
            def queue_matches(q):
                return q in qs

            return any(map(queue_matches, worker.queues))

        # Filter out workers that don't match the queue filter
        ws = [w for w in Worker.all() if any_matching_queue(w)]

        def filter_queues(queue_names):
            return [qname for qname in queue_names if Queue(qname) in qs]

    else:
        qs = Queue.all()
        ws = Worker.all()
        filter_queues = (lambda x: x)

    if not by_queue:
        for w in ws:
            worker_queues = filter_queues(w.queue_names())
            if not raw:
                click.echo('%s %s: %s' % (w.name, state_symbol(
                    w.get_state()), ', '.join(worker_queues)))
            else:
                click.echo('worker %s %s %s' %
                           (w.name, w.get_state(), ','.join(worker_queues)))
    else:
        # Create reverse lookup table
        queues = dict([(q, []) for q in qs])
        for w in ws:
            for q in w.queues:
                if q not in queues:
                    continue
                queues[q].append(w)

        max_qname = max(map(lambda q: len(q.name),
                            queues.keys())) if queues else 0
        for q in queues:
            if queues[q]:
                queues_str = ", ".join(
                    sorted(
                        map(
                            lambda w: '%s (%s)' %
                            (w.name, state_symbol(w.get_state())),
                            queues[q])))  # noqa
            else:
                queues_str = '–'
            click.echo('%s %s' %
                       (pad(q.name + ':', max_qname + 1), queues_str))

    if not raw:
        click.echo('%d workers, %d queues' % (len(ws), len(qs)))
Example #30
0
def rq_queues():
    return {
        q.name: {
            "name": q.name,
            "started": fetch_jobs(q,
                                  StartedJobRegistry(queue=q).get_job_ids()),
            "queued": len(q.job_ids),
        }
        for q in Queue.all()
    }
Example #31
0
def queue_counts():
    #fq = rq.Queue("failed", connection=constants.redis_conn)
    all_queues = Queue.all(connection = con)
    count_dict = {}
    t = time.time()
    count_dict['time'] = t
    for queue in all_queues:
        a_queue = Queue(queue, connection = con)
        count = a_queue.count()
        count_dict[queue] = count
    return count_dict
Example #32
0
def queues_overview():
    r = make_response(
        render_template(
            'rq_dashboard/queues.html',
            queues=Queue.all(),
            rq_url_prefix=url_for('.overview'),
            rq_dashboard_version=rq_dashboard_version,
            rq_version=rq_version,
        ))
    r.headers.set('Cache-Control', 'no-store')
    return r
Example #33
0
File: oorq.py Project: idadzie/oorq
 def read(self, cursor, uid, ids, fields=None, context=None):
     """Show connected workers.
     """
     setup_redis_connection()
     queues = [
         dict(id=i + 1,
              name=queue.name,
              n_jobs=queue.count,
              is_emprty=queue.is_empty,
              __last_updadate=False) for i, queue in enumerate(Queue.all())
     ]
     return queues
Example #34
0
def remove_empty_failed_jobs():
    queues = Queue.all()
    count = len(queues)
    for queue in queues:
        job_ids = queue.failed_job_registry.get_job_ids()
        for job_id in job_ids:
            try:
                Job.fetch(id=job_id)
            except NoSuchJobError:
                queue.failed_job_registry.remove(job_id)

    return dict(status="OK", count=count)
Example #35
0
File: oorq.py Project: gisce/oorq
 def read(self, cursor, uid, ids, fields=None, context=None):
     """Show connected workers.
     """
     setup_redis_connection()
     queues = [dict(
         id=i + 1,
         name=queue.name,
         n_jobs=queue.count,
        is_emprty=queue.is_empty,
         __last_updadate=False
     ) for i, queue in enumerate(Queue.all())]
     return queues
Example #36
0
def get_info(show_failed=False) -> List[Dict]:
    if isinstance(show_failed, str):
        show_failed = json.loads(show_failed)

    conn = get_redis_conn()
    queues = Queue.all(conn)
    workers = Worker.all(conn)
    jobs = []

    def add_job(job: 'Job', name: str) -> None:
        if job.kwargs.get('site') == frappe.local.site:
            job_info = {
                'job_name':
                job.kwargs.get('kwargs', {}).get('playbook_method')
                or job.kwargs.get('kwargs', {}).get('job_type')
                or str(job.kwargs.get('job_name')),
                'status':
                job.get_status(),
                'queue':
                name,
                'creation':
                format_datetime(convert_utc_to_user_timezone(job.created_at)),
                'color':
                JOB_COLORS[job.get_status()]
            }

            if job.exc_info:
                job_info['exc_info'] = job.exc_info

            jobs.append(job_info)

    # show worker jobs
    for worker in workers:
        job = worker.get_current_job()
        if job:
            add_job(job, worker.name)

    for queue in queues:
        # show active queued jobs
        if queue.name != 'failed':
            for job in queue.jobs:
                add_job(job, queue.name)

        # show failed jobs, if requested
        if show_failed:
            fail_registry = queue.failed_job_registry
            for job_id in fail_registry.get_job_ids():
                job = queue.fetch_job(job_id)
                if job:
                    add_job(job, queue.name)

    return jobs
Example #37
0
def get_info(show_failed=False) -> List[Dict]:
    if isinstance(show_failed, str):
        show_failed = json.loads(show_failed)

    conn = get_redis_conn()
    queues = Queue.all(conn)
    workers = Worker.all(conn)
    jobs = []

    def add_job(job: "Job", name: str) -> None:
        if job.kwargs.get("site") == frappe.local.site:
            job_info = {
                "job_name":
                job.kwargs.get("kwargs", {}).get("playbook_method")
                or job.kwargs.get("kwargs", {}).get("job_type")
                or str(job.kwargs.get("job_name")),
                "status":
                job.get_status(),
                "queue":
                name,
                "creation":
                format_datetime(convert_utc_to_user_timezone(job.created_at)),
                "color":
                JOB_COLORS[job.get_status()],
            }

            if job.exc_info:
                job_info["exc_info"] = job.exc_info

            jobs.append(job_info)

    # show worker jobs
    for worker in workers:
        job = worker.get_current_job()
        if job:
            add_job(job, worker.name)

    for queue in queues:
        # show active queued jobs
        if queue.name != "failed":
            for job in queue.jobs:
                add_job(job, queue.name)

        # show failed jobs, if requested
        if show_failed:
            fail_registry = queue.failed_job_registry
            for job_id in fail_registry.get_job_ids():
                job = queue.fetch_job(job_id)
                if job:
                    add_job(job, queue.name)

    return jobs
Example #38
0
def get_queue_info(r, q_2_w, timestamp):
    def get_q_info(q):
        wip_key = 'rq:wip:{q.name}'.format(q=q)
        expired_count = r.zcount(wip_key, 0, timestamp-1)
        wip_count = r.zcount(wip_key, timestamp, '+inf')
        return {
                'q': q,
                'workers': q_2_w[q.name],
                'wip_count': wip_count,
                'expired_count': expired_count,
                'skip': all(x == 0 for x in (q.count, q_2_w[q.name], wip_count, expired_count)),
        }
    return [get_q_info(q) for q in Queue.all()]
Example #39
0
def remove_unused_queues(redis):
    '''
    Remove queues in RQ that are not defined in this file.
    This is useful for removing queues that used to be defined but were later
    removed.
    '''
    queue_names = {q.name for q in globals().values() if type(q) is Queue}

    with Connection(redis):
        for queue in Queue.all():
            if queue.name not in queue_names:
                queue.empty()
                redis.srem('rq:queues', 'rq:queue:{}'.format(queue.name))
Example #40
0
    async def queues_overview(request, instance_number=0):

        r = make_response(render_template(
            "rq_dashboard/queues.html",
            current_instance=instance_number,
            instance_list=current_app.config.get("RQ_DASHBOARD_REDIS_URL"),
            queues=Queue.all(),
            rq_dashboard_version=rq_dashboard_version,
            rq_version=rq_version,
            active_tab="queues",
        ),
                          headers={"Cache-Control": "no-store"})
        return r
Example #41
0
def get_jobs_by_queue():
    """Get the current jobs by queue.

    Returns:
        dict: Dictionary of job count by status for each queue

    Raises:
        redis.exceptions.RedisError: On Redis connection errors

    """
    queues = Queue.all()

    return {q.name: get_queue_jobs(q.name) for q in queues}
Example #42
0
    def test_requeue_job(self):
        """Requeueing existing jobs."""
        job = Job.create(func=div_by_zero, args=(1, 2, 3))
        job.origin = 'fake'
        job.save()
        get_failed_queue().quarantine(job, Exception('Some fake error'))  # noqa

        self.assertEqual(Queue.all(), [get_failed_queue()])  # noqa
        self.assertEquals(get_failed_queue().count, 1)

        get_failed_queue().requeue(job.id)

        self.assertEquals(get_failed_queue().count, 0)
        self.assertEquals(Queue('fake').count, 1)
Example #43
0
 def _get_queues_info(self):
     queues = []
     current_queues_names = set()
     for queue in Queue.all(connection=self.redis_connection):
         queues.append({
             'name': queue.name,
             'jobs_count': len(queue.jobs),
         })
         current_queues_names.add(queue.name)
         self.registered_queues_names.add(queue.name)
     for queue_name in self.registered_queues_names:
         if queue_name not in current_queues_names:
             queues.append({'name': queue_name, 'jobs_count': 0})
     return sorted(queues, key=lambda queue: queue["name"])
Example #44
0
 def test_enqueue_job_with_queue(self):
     """
     Ensure that job is enqueued correctly when the scheduler is bound
     to a queue object.
     """
     queue = Queue('foo', connection=self.testconn)
     scheduler = Scheduler(connection=self.testconn, queue=queue)
     job = scheduler._create_job(say_hello)
     scheduler_queue = scheduler.get_queue_for_job(job)
     self.assertEqual(queue, scheduler_queue)
     scheduler.enqueue_job(job)
     self.assertTrue(job.enqueued_at is not None)
     self.assertIn(job, queue.jobs)
     self.assertIn(queue, Queue.all())
Example #45
0
def queues_table(_class="table table-bordered"):
    T = current.T
    queues = Queue.all()
    table = TABLE(
                THEAD(
                    TR(TH(T('Queue')), TH(T('Jobs')))
                    ),
                    _class=_class)
    if queues:
        for q in queues:
            qlink = A(q.name, _href=URL(args=['overview', q.name]))
            table.append(TR(TD(qlink), TD(q.count)))
    else:
        table.append(TD(T('No queues'), _colspan="2"))
    return table
Example #46
0
File: cli.py Project: Chu2015/rq
def empty(url, all, queues):
    """Empty given queues."""
    conn = connect(url)

    if all:
        queues = Queue.all(connection=conn)
    else:
        queues = [Queue(queue, connection=conn) for queue in queues]

    if not queues:
        click.echo("Nothing to do")

    for queue in queues:
        num_jobs = queue.empty()
        click.echo("{0} jobs removed from {1} queue".format(num_jobs, queue.name))
Example #47
0
def overview(queue_name):
    if queue_name is None:
        # Show the failed queue by default if it contains any jobs
        failed = Queue('failed')
        if not failed.is_empty():
            queue = failed
        else:
            queue = Queue()
    else:
        queue = Queue(queue_name)

    return render_template('rq_dashboard/dashboard.html',
            workers=Worker.all(),
            queue=queue,
            queues=Queue.all())
Example #48
0
 def get_context_data(self, **kwargs):
     ctx = super(Stats, self).get_context_data(**kwargs)
     ctx.update({
         'queues': Queue.all(connection=self.connection),
         'workers': Worker.all(connection=self.connection),
         'title': 'RQ Status',
     })
     if Scheduler:
         scheduler = Scheduler(self.connection)
         get_queue = lambda job: job.origin
         all_jobs = sorted(scheduler.get_jobs(), key=get_queue)
         ctx['scheduler'] = scheduler
         ctx['scheduled_queues'] = [
             {'name': queue, 'job_count': len(list(jobs))}
             for queue, jobs in groupby(all_jobs, get_queue)]
     return ctx
Example #49
0
def show_workers(args):
    if len(args.queues):
        qs = map(Queue, args.queues)

        def any_matching_queue(worker):
            def queue_matches(q):
                return q in qs

            return any(map(queue_matches, worker.queues))

        # Filter out workers that don't match the queue filter
        ws = [w for w in Worker.all() if any_matching_queue(w)]

        def filter_queues(queue_names):
            return [qname for qname in queue_names if Queue(qname) in qs]

    else:
        qs = Queue.all()
        ws = Worker.all()
        filter_queues = lambda x: x

    if not args.by_queue:
        for w in ws:
            worker_queues = filter_queues(w.queue_names())
            if not args.raw:
                print "%s %s: %s" % (w.name, state_symbol(w.state), ", ".join(worker_queues))
            else:
                print "worker %s %s %s" % (w.name, w.state, ",".join(worker_queues))
    else:
        # Create reverse lookup table
        queues = dict([(q, []) for q in qs])
        for w in ws:
            for q in w.queues:
                if not q in queues:
                    continue
                queues[q].append(w)

        max_qname = max(map(lambda q: len(q.name), queues.keys())) if queues else 0
        for q in queues:
            if queues[q]:
                queues_str = ", ".join(sorted(map(lambda w: "%s (%s)" % (w.name, state_symbol(w.state)), queues[q])))
            else:
                queues_str = "–"
            print "%s %s" % (pad(q.name + ":", max_qname + 1), queues_str)

    if not args.raw:
        print "%d workers, %d queues" % (len(ws), len(qs))
Example #50
0
def show_workers(queues, raw, by_queue):
    if queues:
        qs = list(map(Queue, queues))

        def any_matching_queue(worker):
            def queue_matches(q):
                return q in qs
            return any(map(queue_matches, worker.queues))

        # Filter out workers that don't match the queue filter
        ws = [w for w in Worker.all() if any_matching_queue(w)]

        def filter_queues(queue_names):
            return [qname for qname in queue_names if Queue(qname) in qs]

    else:
        qs = Queue.all()
        ws = Worker.all()
        filter_queues = (lambda x: x)

    if not by_queue:
        for w in ws:
            worker_queues = filter_queues(w.queue_names())
            if not raw:
                click.echo('%s %s: %s' % (w.name, state_symbol(w.get_state()), ', '.join(worker_queues)))
            else:
                click.echo('worker %s %s %s' % (w.name, w.get_state(), ','.join(worker_queues)))
    else:
        # Create reverse lookup table
        queues = dict([(q, []) for q in qs])
        for w in ws:
            for q in w.queues:
                if q not in queues:
                    continue
                queues[q].append(w)

        max_qname = max(map(lambda q: len(q.name), queues.keys())) if queues else 0
        for q in queues:
            if queues[q]:
                queues_str = ", ".join(sorted(map(lambda w: '%s (%s)' % (w.name, state_symbol(w.get_state())), queues[q])))  # noqa
            else:
                queues_str = '–'
            click.echo('%s %s' % (pad(q.name + ':', max_qname + 1), queues_str))

    if not raw:
        click.echo('%d workers, %d queues' % (len(ws), len(qs)))
Example #51
0
def overview(queue_name, page):
    if queue_name is None:
        # Show the failed queue by default if it contains any jobs
        failed = Queue('failed')
        if not failed.is_empty():
            queue = failed
        else:
            queue = Queue()
    else:
        queue = Queue(queue_name)

    extension = current_app.extensions['rq-dashboard']
    return render_template('rq_dashboard/dashboard.html',
            workers=Worker.all(),
            queue=queue,
            page=page,
            queues=Queue.all(),
            rq_url_prefix=extension.url_prefix)
Example #52
0
def overview(queue_name, page):
    if queue_name is None:
        # Show the failed queue by default if it contains any jobs
        failed = Queue('failed')
        if not failed.is_empty():
            queue = failed
        else:
            queue = Queue()
    else:
        queue = Queue(queue_name)

    return render_template('dashboard.html',
                           workers=Worker.all(),
                           queue=queue,
                           page=page,
                           queues=Queue.all(),
                           rq_url_prefix=url_for('.overview'),
                           sitename=current_app.config['SITENAME'])
Example #53
0
def overview(queue_name, page):
    if queue_name is None:
        # Show the failed queue by default if it contains any jobs
        failed = Queue("failed")
        if not failed.is_empty():
            queue = failed
        else:
            queue = Queue()
    else:
        queue = Queue(queue_name)

    return render_template(
        "rq_dashboard/dashboard.html",
        workers=Worker.all(),
        queue=queue,
        page=page,
        queues=Queue.all(),
        rq_url_prefix=url_for(".overview"),
    )
Example #54
0
def overview(queue_name, page):
    queue = Queue()
    if queue_name is None:
        # Show the failed queue by default if it contains any jobs
        failed = Queue('failed')
        if not failed.is_empty():
            queue = failed
        else:
            queue = Queue()
    else:
        queue = Queue(queue_name)

    return render_template(
        'rq_scheduler_dashboard/dashboard.html',
        queue=queue,
        page=page,
        queues=Queue.all(),
        rq_url_prefix=url_for('.overview')
    )
Example #55
0
def put_data(args):
    "Get RQ data and send to CloudWatch"
    cw = cloudwatch.connect_to_region(args['--region'])
    log.info('put_data {}'.format(args))
    def put_metrics(metrics, dimensions):
        dimensions['env'] = args['--env']
        log.info('dims: {} ==> {}'.format(dimensions, metrics))
        cw.put_metric_data('RQ',
                           list(metrics.keys()),
                           list(metrics.values()),
                           unit='Count', dimensions=dimensions)

    redis = StrictRedis.from_url(args['--url'])

    # all workers
    workers = Worker.all(connection=redis)

    # queues and their workers
    queues = {q:[] for q in Queue.all(connection=redis)}

    # populate list of workers for each queue
    for w in workers:
        for q in w.queues:
            if queues.get(q) is not None:
                queues[q].append(w)

    for q in queues:
        put_metrics({'jobs': len(q), 'workers': len(queues[q])},
                    {'queue': q.name})

        states = {}
        for w in queues[q]:
            count = states.get(w.state, 0) + 1
            states[w.state] = count

        for state in states:
            put_metrics({'workers': states[state]},
                        {
                            'queue': q.name,
                            'state': state.decode(),
                        })
Example #56
0
    def test_enqueue_job(self):
        """
        When scheduled job is enqueued, make sure:
        - Job is removed from the sorted set of scheduled jobs
        - "enqueued_at" attribute is properly set
        - Job appears in the right queue
        - Queue is recognized by rq's Queue.all()
        """
        now = datetime.utcnow()
        queue_name = 'foo'
        scheduler = Scheduler(connection=self.testconn, queue_name=queue_name)

        job = scheduler.enqueue_at(now, say_hello)
        self.scheduler.enqueue_job(job)
        self.assertNotIn(job, tl(self.testconn.zrange(scheduler.scheduled_jobs_key, 0, 10)))
        job = Job.fetch(job.id, connection=self.testconn)
        self.assertTrue(job.enqueued_at is not None)
        queue = scheduler.get_queue_for_job(job)
        self.assertIn(job, queue.jobs)
        queue = Queue.from_queue_key('rq:queue:{0}'.format(queue_name))
        self.assertIn(job, queue.jobs)
        self.assertIn(queue, Queue.all())
Example #57
0
def overview(queue_name, page):
    if queue_name is None:
        # Show the failed queue by default if it contains any jobs
        failed = Queue('failed')
        if not failed.is_empty():
            queue = failed
        else:
            queue = Queue()
    else:
        queue = Queue(queue_name)

    return render_template(
        'rq_dashboard/dashboard.html',
        workers=Worker.all(),
        queue=queue,
        page=page,
        queues=Queue.all(),
        static_endpoint=current_app.config.get(
            'RQ_DASHBOARD_STATIC_ENDPOINT', 'rq_dashboard.static'),
        asset_prefix=current_app.config.get('RQ_DASHBOARD_ASSET_PREFIX', ''),
        rq_url_prefix=url_for('.overview')
    )
Example #58
0
def retry_handler(job, exc_type, exc_value, traceback):
    # Returning True moves the job to the failed queue (or continue to
    # the next handler)

    job.meta.setdefault('failures', 1)
    job.meta['failures'] += 1
    # if job.meta['failures'] > 3 or isinstance(exc_type, (LookupError, CorruptImageError)):
    if job.meta['failures'] > 3:
        logger.info("job failed > 3 times, so don't retry")
        job.save()
        return True

    logger.info("job failed, now retry it")
    job.status = JobStatus.QUEUED
    for queue_ in Queue.all():
        if queue_.name == job.origin:
            queue_.enqueue_job(job)
            break
    else:
        return True  # Queue has disappeared, fail job

    return False  # Job is handled. Stop the handler chain.
 def get(self, queue_name=None, page=None):
     if not queue_name:
         queue_name = None
     if not page:
         page = 1
     else:
         page = int(page)
     if queue_name is None:
         # Show the failed queue by default if it contains any jobs
         failed = Queue('failed')
         if not failed.is_empty():
             queue = failed
         else:
             queue = Queue()
     else:
         queue = Queue(queue_name)
     self.render('rq_dashboard/dashboard.html',
                 workers=Worker.all(),
                 queue=queue,
                 page=page,
                 queues=Queue.all(),
                 rq_url_prefix=options.url_prefix,
                 poll_interval=options.poll_interval,)
Example #60
0
def list_queues():
    queues = serialize_queues(sorted(Queue.all()))
    return dict(queues=queues)