Exemple #1
0
def restart(path_q_name, file_q_name, target, root, path, hdlr):
    try:
        logger.info("***************** restart *****************")
        r = StrictRedis()
        path_q = Queue(path_q_name, connection=r)
        file_q = Queue(file_q_name, connection=r)
        path_q_workers = Worker.all(queue=path_q)
        file_q_workers = Worker.all(queue=file_q)

        job_id = get_current_job().id

        def all_not_busy(ws):
            return all(w.get_state() != WorkerStatus.BUSY
                       or w.get_current_job_id() == job_id for w in ws)

        # this doesn't guarantee that there is only one tree walk, but it prevents tree walk when the file queue is not empty
        if path_q.is_empty() and file_q.is_empty() and all_not_busy(
                path_q_workers) and all_not_busy(file_q_workers):
            logger.info("queue empty and worker not busy")
            path_q.enqueue(sync_path, path_q_name, file_q_name, target, root,
                           path, hdlr)
        else:
            logger.info("queue not empty or worker busy")

    except OSError as err:
        logger.warning("Warning: " + str(err))
    except Exception as err:
        logger.error("Unexpected error: " + str(err))
        raise
Exemple #2
0
def show_workers(queues, raw, by_queue):
    if queues:
        qs = list(map(Queue, queues))

        def any_matching_queue(worker):
            def queue_matches(q):
                return q in qs

            return any(map(queue_matches, worker.queues))

        # Filter out workers that don't match the queue filter
        ws = [w for w in Worker.all() if any_matching_queue(w)]

        def filter_queues(queue_names):
            return [qname for qname in queue_names if Queue(qname) in qs]

    else:
        qs = Queue.all()
        ws = Worker.all()
        filter_queues = (lambda x: x)

    if not by_queue:
        for w in ws:
            worker_queues = filter_queues(w.queue_names())
            if not raw:
                click.echo('%s %s: %s' % (w.name, state_symbol(
                    w.get_state()), ', '.join(worker_queues)))
            else:
                click.echo('worker %s %s %s' %
                           (w.name, w.get_state(), ','.join(worker_queues)))
    else:
        # Create reverse lookup table
        queues = dict([(q, []) for q in qs])
        for w in ws:
            for q in w.queues:
                if q not in queues:
                    continue
                queues[q].append(w)

        max_qname = max(map(lambda q: len(q.name),
                            queues.keys())) if queues else 0
        for q in queues:
            if queues[q]:
                queues_str = ", ".join(
                    sorted(
                        map(
                            lambda w: '%s (%s)' %
                            (w.name, state_symbol(w.get_state())),
                            queues[q])))  # noqa
            else:
                queues_str = '–'
            click.echo('%s %s' %
                       (pad(q.name + ':', max_qname + 1), queues_str))

    if not raw:
        click.echo('%d workers, %d queues' % (len(ws), len(qs)))
def test_start_worker():
    
    ctx = reload.context()
    p = Process(target = reload.startWorker)
    workers = Worker.all(connection=reload.redisQueue())
    assert len(list(workers)) == 0
    p.start()
    time.sleep(WAIT_PERIOD)
    workers = Worker.all(connection=reload.redisQueue())
    assert len(list(workers)) == 1
    p.terminate()
Exemple #4
0
    def check_workers(self, check_queues: typing.Union[str,
                                                       typing.Sequence[str]]):
        """Check that workers are alive.

        :param check_queues:  List or sequence of strings indicating queues
                              to check. See queue_name_list for format if
                              you want to pass a string instead of list.

        ~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-

        :return:  The string 'OK' if workers are alive or raises ValueError
                  if something is wrong.

        ~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-

        PURPOSE:  Go through queues in check_queues and see if there is
                  a worker in python rq to work on that queue.

        """
        queue_counts = {}
        worker_list = Worker.all(connection=Redis())
        for worker in worker_list:
            for qname in worker.queue_names():
                queue_counts[qname] = 1 + queue_counts.get(qname, 0)
        for qname in self.queue_name_list(check_queues):
            if not queue_counts.get(qname, None):
                raise ValueError('No workers found for queue "%s"' % qname)
        return 'OK'
Exemple #5
0
def main(machine, instances, queues=['high', 'default', 'low']):
    r = StrictRedis.from_url(REDIS_URL_RQ)
    machine_workers = [worker
            for worker in Worker.all(connection=r)
            if is_local(machine, worker.name) and \
                any(works_on(worker, queue) for queue in queues)]

    print "%d workers running on %s" % (len(machine_workers), machine)
    if len(machine_workers):
        print '\n'.join(
            map(
                lambda m: "%s\t%s\t%s" % (m.name, m.get_state(), "stopped"
                                          if m.stopped else "running"),
                machine_workers))

    machine_info = workers(machine)
    rem = SshMachine(machine_info['hostname'],
                     ssh_opts=SSH_OPTS,
                     **machine_info.get('kwargs', {}))
    dir = rem.path(machine_info['rqdir'])

    with rem.cwd(dir):
        for i in xrange(0, instances - len(machine_workers)):
            rem["./worker.sh"](' '.join(queues))
            print "Worker spawned"
Exemple #6
0
def get_info(show_failed=False):
	conn = get_redis_conn()
	queues = Queue.all(conn)
	workers = Worker.all(conn)
	jobs = []

	def add_job(j, name):
		if j.kwargs.get('site')==frappe.local.site:
			jobs.append({
				'job_name': j.kwargs.get('kwargs', {}).get('playbook_method') \
					or str(j.kwargs.get('job_name')),
				'status': j.status, 'queue': name,
				'creation': format_datetime(j.created_at),
				'color': colors[j.status]
			})
			if j.exc_info:
				jobs[-1]['exc_info'] = j.exc_info

	for w in workers:
		j = w.get_current_job()
		if j:
			add_job(j, w.name)

	for q in queues:
		if q.name != 'failed':
			for j in q.get_jobs(): add_job(j, q.name)

	if cint(show_failed):
		for q in queues:
			if q.name == 'failed':
				for j in q.get_jobs()[:10]: add_job(j, q.name)

	return jobs
Exemple #7
0
def list_workers():
    def serialize_queue_names(worker):
        return [q.name for q in worker.queues]

    workers = [dict(name=worker.name, queues=serialize_queue_names(worker),
        state=worker.state) for worker in Worker.all()]
    return dict(workers=workers)
Exemple #8
0
 def deregister_worker(self, container):
     """finds and deregisters an rq worker"""
     # purge all workers still running on this container
     workers = Worker.all(connection=self.base_connection)
     for worker in workers:
         if worker.hostname == f"{container}":
             worker.register_death()
Exemple #9
0
def runWorker():
    p = Process(target=startWorker)
    workers = Worker.all(connection=redisQueue())
    assert len(list(workers)) == 0
    p.start()
    time.sleep(10)
    p.terminate()
Exemple #10
0
 def get_workers(self):
     """returns stats about all running rq workers"""
     try:
         workers = Worker.all(connection=self.base_connection)
         result = []
         for w in workers:
             w_bd = str(w.birth_date)
             w_lhb = str(w.last_heartbeat)
             birth_d = datetime.datetime.strptime(w_bd,
                                                  "%Y-%m-%d %H:%M:%S.%f")
             last_hb = datetime.datetime.strptime(w_lhb,
                                                  "%Y-%m-%d %H:%M:%S.%f")
             result.append(
                 WorkerResponse(
                     hostname=w.hostname,
                     pid=w.pid,
                     name=w.name,
                     last_heartbeat=last_hb,
                     birth_date=birth_d,
                     successful_job_count=w.successful_job_count,
                     failed_job_count=w.failed_job_count,
                     total_working_time=w.total_working_time).dict())
         return result
     except Exception as e:
         log.error(f"get_workers: {e}")
         return e
Exemple #11
0
def get_rqinfo(options):
    """获取rq队列信息
    """
    redis_conn = Redis.from_url(options.connection)
    push_connection(redis_conn)
    # RQ队列信息获取操作
    if options.status:
        workers = Worker.all()
        queues = Queue.all()
        return workers, queues
    if options.queue:
        queue = Queue(options.queue)
        return queue
    if options.cancel_job:
        cancel_job(options.cancel_job)
        return 'OK'
    if options.requeue_job:
        requeue_job(options.requeue_job)
        return 'OK'
    if options.requeue_all:
        return requeue_all()
    if options.empty_queue:
        empty_queue(options.empty_queue)
        return 'OK'
    if options.compact_queue:
        compact_queue(options.compact_queue)
        return 'OK'
    if options.queues:
        return list_queues()
    if options.jobs:
        return list_jobs(options.jobs)
    if options.workers:
        return list_workers()
    pop_connection()
Exemple #12
0
def health(request):
    secret = settings.HEALTH_SECRET
    if secret is not None and not request.user.is_superuser:
        token = request.META.get('HTTP_X_TOKEN', None)
        if token is None or not constant_time_compare(token, secret):
            raise PermissionDenied()
    conn = get_redis_connection()
    workers = Worker.all(connection=conn)

    queues = defaultdict(lambda: defaultdict(int))
    for worker in workers:
        for queue in worker.queues:
            queues[queue.name]['workers'] += 1
            queues[queue.name]['tasks'] = queue.count

    data = {
        'queues': queues,
        'users': {
            'total': User.objects.all().count(),
            'active': User.objects.filter(is_suspended=False).count(),
        },
        'feeds': {
            'total': Feed.objects.all().count(),
            'unique': UniqueFeed.objects.all().count(),
        },
    }
    response = HttpResponse(json.dumps(data))
    response['Content-Type'] = 'application/json'
    return response
Exemple #13
0
def collect_workers_by_connection(queues):
    """
    Collects, into a list, dictionaries of connections_config and its workers.

    This function makes an association between some configurarion and the
    workers it uses.
    What the return may looks like:

        workers_collection = [
            {
                'config': {'DB': 0, 'PORT': 6379, 'HOST': 'localhost'},
                'all_workers': [worker1, worker2],
            },
            {
                'config': {'DB': 1, 'PORT': 6379, 'HOST': 'localhost'},
                'all_workers': [worker1]
            }
        ]

    Use `get_all_workers_by_configuration()` to select a worker group from the
    collection returned by this function.
    """
    workers_collections = []
    for item in queues:
        connection_params = filter_connection_params(item['connection_config'])
        if connection_params not in [c['config'] for c in workers_collections]:
            connection = get_connection(item['name'])
            collection = {
                'config': connection_params,
                'all_workers': Worker.all(connection=connection)
            }
            workers_collections.append(collection)
    return workers_collections
Exemple #14
0
def get_workers(request):

    workers = Worker.all(connection=tasks.redis_conn)

    workerdata = list()
    # serialize workers
    for w in workers:
        cj = w.get_current_job()

        if cj:
            cjinfo = {
                'id': cj.id,
                'args': cj.args,
                'enqueued_at': cj.enqueued_at.strftime("%a, %d %b %Y %H:%M:%S +0000"),
                'description': cj.description}
        else:
            cjinfo = None

        worker_dict = {
            'pid': w.pid,
            'name': w.name,
            'state': w.get_state(),
            'current_job': cjinfo}

        workerdata.append(worker_dict)
    data = json.dumps(workerdata)
    return HttpResponse(data, content_type='application/json')
Exemple #15
0
def health(request):
    secret = settings.HEALTH_SECRET
    if secret is not None and not request.user.is_superuser:
        token = request.META.get('HTTP_X_TOKEN', None)
        if token is None or not constant_time_compare(token, secret):
            raise PermissionDenied()
    conn = get_redis_connection()
    workers = Worker.all(connection=conn)

    queues = defaultdict(lambda: defaultdict(int))
    for worker in workers:
        for queue in worker.queues:
            queues[queue.name]['workers'] += 1
            queues[queue.name]['tasks'] = queue.count

    data = {
        'queues': queues,
        'users': {
            'total': User.objects.all().count(),
            'active': User.objects.filter(is_suspended=False).count(),
        },
        'feeds': {
            'total': Feed.objects.all().count(),
            'unique': UniqueFeed.objects.all().count(),
        },
    }
    response = HttpResponse(json.dumps(data))
    response['Content-Type'] = 'application/json'
    return response
Exemple #16
0
def overview(queue_name, page, state=None):
    if queue_name == 'failed':
        queue = get_failed_queue()
    elif queue_name is None:
        # Show the failed queue by default if it contains any jobs
        failed = get_failed_queue()
        if not failed.is_empty():
            queue = failed
        else:
            queue = Queue()
    else:
        queue = Queue(queue_name)

    r = make_response(
        render_template('rq_dashboard/dashboard.html',
                        workers=Worker.all(),
                        queue=queue,
                        page=page,
                        state=state,
                        queues=get_all_queues(),
                        rq_url_prefix=url_for('.overview'),
                        newest_top=current_app.config.get(
                            'RQ_DASHBOARD_JOB_SORT_ORDER') == '-age'))
    r.headers.set('Cache-Control', 'no-store')
    return r
Exemple #17
0
def get_info(show_failed=False):
	conn = get_redis_conn()
	queues = Queue.all(conn)
	workers = Worker.all(conn)
	jobs = []

	def add_job(j, name):
		if j.kwargs.get('site')==frappe.local.site:
			jobs.append({
				'job_name': j.kwargs.get('kwargs', {}).get('playbook_method') \
					or str(j.kwargs.get('job_name')),
				'status': j.status, 'queue': name,
				'creation': format_datetime(convert_utc_to_user_timezone(j.created_at)),
				'color': colors[j.status]
			})
			if j.exc_info:
				jobs[-1]['exc_info'] = j.exc_info

	for w in workers:
		j = w.get_current_job()
		if j:
			add_job(j, w.name)

	for q in queues:
		if q.name != 'failed':
			for j in q.get_jobs(): add_job(j, q.name)

	if cint(show_failed):
		for q in queues:
			if q.name == 'failed':
				for j in q.get_jobs()[:10]: add_job(j, q.name)

	return jobs
Exemple #18
0
def hello():
    with connect_to_redis(REDIS_URL_RQ) as r:
        with Connection(connection=r):
            ### redis info
            client_list = r.client_list()
            local_clients = [cl for cl in client_list if is_local(cl['addr'])]
            remote_clients = [cl for cl in client_list if not is_local(cl['addr'])]

            ws = Worker.all()
            q_2_w = defaultdict(int)
            for w in ws:
                for qn in w.queue_names():
                    q_2_w[qn] += 1

            timestamp = current_timestamp()

            return render_template(
                'home.html',
                queues=get_queue_info(r, q_2_w, timestamp),
                info={
                    'timestamp': timestamp,
                    'local_clients': len(local_clients),
                    'remote_clients': len(remote_clients),
                    'workers': len(ws)
                },
            )
Exemple #19
0
def print_status(status_message=""):
    global ypos
    global maxyx

    ypos = 0
    stdscr.clear()
    if curses.is_term_resized(*maxyx):
        maxyx = stdscr.getmaxyx()
        curses.resizeterm(*maxyx)

    print_autoy(datetime.now().strftime("%c"))
    print_autoy("")

    if status_message:
        for msg in status_message.split('\n'):
            print_autoy(msg)


    ws = Worker.all(connection=conn)
    print_autoy("WORKERS (%s): " % len(ws), yadd=1)
    if ws:
        for w in sorted(ws, key=lambda x: x.name):
            print_autoy("worker %s: %s" % (w.name, job_string(w.get_current_job())), xadd=2)
    else:
        print_autoy("no workers", xadd=2)

    qs = Queue.all(connection=conn)
    print_autoy("QUEUES: ", yadd=1)
    for q in sorted(qs, key=lambda x: x.name):
        print_autoy("%s (%s):" % (q.name, len(q)), xadd=2)

        for j in sorted(q.get_jobs(), key=lambda x: x.enqueued_at):
            print_autoy(job_string(j), xadd=4)

    stdscr.refresh()
Exemple #20
0
def start_searchable_activities_task(counter=0):
    workers = Worker.all(connection=redis_conn)
    queue = django_rq.get_queue("parser")

    has_other_jobs = False
    already_running_update = False

    for w in workers:
        if len(w.queues):
            if w.queues[0].name == "parser":
                current_job = w.get_current_job()
                if current_job:
                    if ('start_searchable_activities_task'
                            not in current_job.description):
                        has_other_jobs = True
                    if ('update_searchable_activities'
                            in current_job.description):
                        already_running_update = True

    if already_running_update:
        # update_searchable_activities already running or other
        # start_searchable_activities_task running, invalidate task
        pass
    elif not has_other_jobs:
        queue.enqueue(update_searchable_activities)
    elif counter > 180:
        raise Exception(
            "Waited for 30 min, still jobs runnings so invalidating this task. \
                    If this happens please contact OIPA devs!")
    else:
        counter += 1
        time.sleep(120)
        queue.enqueue(start_searchable_activities_task,
                      args=(counter,), timeout=300)
def overview(queue_name, page):
    if queue_name == 'failed':
        queue = get_failed_queue()
    elif queue_name is None:
        # Show the failed queue by default if it contains any jobs
        failed = get_failed_queue()
        if not failed.is_empty():
            queue = failed
        else:
            queue = Queue()
    else:
        queue = Queue(queue_name)
    r = make_response(
        render_template(
            'rq_dashboard/dashboard.html',
            workers=Worker.all(),
            queue=queue,
            page=page,
            queues=get_all_queues(),
            rq_url_prefix=url_for('.overview'),
            rq_dashboard_version=rq_dashboard_version,
            rq_version=rq_version,
        ))
    r.headers.set('Cache-Control', 'no-store')
    return r
Exemple #22
0
def start_searchable_activities_task(counter=0):
    workers = Worker.all(connection=redis_conn)
    queue = django_rq.get_queue("parser")

    has_other_jobs = False
    already_running_update = False

    for w in workers:
        if len(w.queues):
            if w.queues[0].name == "parser":
                current_job = w.get_current_job()
                if current_job:
                    if ('start_searchable_activities_task'
                            not in current_job.description):
                        has_other_jobs = True
                    if ('update_searchable_activities'
                            in current_job.description):
                        already_running_update = True

    if already_running_update:
        # update_searchable_activities already running or other
        # start_searchable_activities_task running, invalidate task
        pass
    elif not has_other_jobs:
        queue.enqueue(update_searchable_activities)
    elif counter > 180:
        raise Exception(
            "Waited for 30 min, still jobs runnings so invalidating this task. \
                    If this happens please contact OIPA devs!")
    else:
        counter += 1
        time.sleep(120)
        queue.enqueue(start_searchable_activities_task,
                      args=(counter,), timeout=300)
Exemple #23
0
def show_workers(args):
    if len(args.queues):
        qs = map(Queue, args.queues)

        def any_matching_queue(worker):
            def queue_matches(q):
                return q in qs

            return any(map(queue_matches, worker.queues))

        # Filter out workers that don't match the queue filter
        ws = [w for w in Worker.all() if any_matching_queue(w)]

        def filter_queues(queue_names):
            return [qname for qname in queue_names if Queue(qname) in qs]

    else:
        qs = Queue.all()
        ws = Worker.all()
        filter_queues = lambda x: x

    if not args.by_queue:
        for w in ws:
            worker_queues = filter_queues(w.queue_names())
            if not args.raw:
                print "%s %s: %s" % (w.name, state_symbol(w.state), ", ".join(worker_queues))
            else:
                print "worker %s %s %s" % (w.name, w.state, ",".join(worker_queues))
    else:
        # Create reverse lookup table
        queues = dict([(q, []) for q in qs])
        for w in ws:
            for q in w.queues:
                if not q in queues:
                    continue
                queues[q].append(w)

        max_qname = max(map(lambda q: len(q.name), queues.keys())) if queues else 0
        for q in queues:
            if queues[q]:
                queues_str = ", ".join(sorted(map(lambda w: "%s (%s)" % (w.name, state_symbol(w.state)), queues[q])))
            else:
                queues_str = "–"
            print "%s %s" % (pad(q.name + ":", max_qname + 1), queues_str)

    if not args.raw:
        print "%d workers, %d queues" % (len(ws), len(qs))
Exemple #24
0
def listhosts():
    r = StrictRedis.from_url(REDIS_URL_RQ)
    machines = list(
        set([
            worker.name.partition('.')[0]
            for worker in Worker.all(connection=r)
        ]))
    print '\n'.join(machines)
Exemple #25
0
def show_workers(queues, raw, by_queue):
    if queues:
        qs = list(map(Queue, queues))

        def any_matching_queue(worker):
            def queue_matches(q):
                return q in qs
            return any(map(queue_matches, worker.queues))

        # Filter out workers that don't match the queue filter
        ws = [w for w in Worker.all() if any_matching_queue(w)]

        def filter_queues(queue_names):
            return [qname for qname in queue_names if Queue(qname) in qs]

    else:
        qs = Queue.all()
        ws = Worker.all()
        filter_queues = (lambda x: x)

    if not by_queue:
        for w in ws:
            worker_queues = filter_queues(w.queue_names())
            if not raw:
                click.echo('%s %s: %s' % (w.name, state_symbol(w.get_state()), ', '.join(worker_queues)))
            else:
                click.echo('worker %s %s %s' % (w.name, w.get_state(), ','.join(worker_queues)))
    else:
        # Create reverse lookup table
        queues = dict([(q, []) for q in qs])
        for w in ws:
            for q in w.queues:
                if q not in queues:
                    continue
                queues[q].append(w)

        max_qname = max(map(lambda q: len(q.name), queues.keys())) if queues else 0
        for q in queues:
            if queues[q]:
                queues_str = ", ".join(sorted(map(lambda w: '%s (%s)' % (w.name, state_symbol(w.get_state())), queues[q])))  # noqa
            else:
                queues_str = '–'
            click.echo('%s %s' % (pad(q.name + ':', max_qname + 1), queues_str))

    if not raw:
        click.echo('%d workers, %d queues' % (len(ws), len(qs)))
Exemple #26
0
    def test_worker_all(self):
        """Worker.all() works properly"""
        foo_queue = Queue('foo')
        bar_queue = Queue('bar')

        w1 = Worker([foo_queue, bar_queue], name='w1')
        w1.register_birth()
        w2 = Worker([foo_queue], name='w2')
        w2.register_birth()

        self.assertEqual(set(Worker.all(connection=foo_queue.connection)),
                         set([w1, w2]))
        self.assertEqual(set(Worker.all(queue=foo_queue)), set([w1, w2]))
        self.assertEqual(set(Worker.all(queue=bar_queue)), set([w1]))

        w1.register_death()
        w2.register_death()
Exemple #27
0
 def get_context_data(self, **kwargs):
     ctx = super(Stats, self).get_context_data(**kwargs)
     ctx.update({
         'queues': Queue.all(connection=self.connection),
         'workers': Worker.all(connection=self.connection),
         'title': 'RQ Status',
     })
     return ctx
Exemple #28
0
 def get_context_data(self, **kwargs):
     ctx = super(Stats, self).get_context_data(**kwargs)
     ctx.update({
         'queues': Queue.all(connection=self.connection),
         'workers': Worker.all(connection=self.connection),
         'title': 'RQ Status',
     })
     return ctx
Exemple #29
0
def wait_for_workers(db: Redis[bytes], nworkers: int) -> None:
    """Wait till nworkers are connected."""
    while True:
        workers = Worker.all(connection=db)
        if len(workers) == nworkers:
            break
        else:
            time.sleep(0.1)
Exemple #30
0
def get_active_jobs():
    """Gets the current job for all active rq workers."""
    jobs = []
    for worker in Worker.all(queue=q):
        current_job = worker.get_current_job()
        if current_job:
            jobs.append(current_job)
    return jobs
    def list_workers(self):
	with Connection(self.redis_conn):
	  l=[]
	  for w in Worker.all():
	    l.append({
	      'name': w.name,
              'state': w.get_state(),
              'queues': [q.name for q in w.queues]})
	  return l
Exemple #32
0
def run_worker():
    redis_url = REDIS_URL
    redis_connection = redis.from_url(redis_url)
    with Connection(redis_connection):
        #worker = Worker.count(connection = redis_connection)
        worker = Worker.all()
        worker = Worker(['default'])
        #print(worker)
        worker.work()
Exemple #33
0
 def _get_workers_info(self):
     return sorted(
         [{
             'state': worker.state,
             'name': worker.name,
             'queue_names': ', '.join(worker.queue_names()),
         } for worker in Worker.all(connection=self.redis_connection)],
         key=lambda worker: worker["name"],
     )
def main(args, redis_connection):
    command = args.command

    workers_by_name = {
        worker.name: worker
        for worker in Worker.all(redis_connection)
    }
    for gpu in args.gpu_ids:
        for n in args.n_procs:
            worker_name = 'worker_{}_{}'.format(gpu, n)
            log_file = '{}.log'.format(os.path.join(args.logdir, worker_name))

            proc = find_worker_process(worker_name)
            worker = workers_by_name.get(worker_name)
            if proc and worker:
                # Worker registered and process running
                if command == 'start':
                    logging.debug('Worker %s already running as pid %s',
                                  worker_name, proc.pid)
                    continue
                elif command in ('stop', 'restart'):
                    logging.info('Terminating worker %s (pid %s)', worker_name,
                                 proc.pid)
                    proc.kill()
                    stop_worker(worker)
            elif proc is None and worker:
                # Zombie worker: No process running
                logging.error(
                    'Worker %s is not associated with a running process, cleaning up',
                    worker_name)
                stop_worker(worker)
            elif proc and not worker:
                # Zombie process: No worker registered
                logging.error(
                    'Process %s is not associated with a Worker, stopping',
                    worker_name)
                proc.kill()
            elif proc is None and worker is None:
                # No workers / processes running
                pass

            if command in ('start', 'restart'):
                worker_command = f'nohup rq worker -w {args.worker_class} -j {args.job_class} -n {worker_name} >> {log_file} 2>&1 &'
                logging.debug('Starting worker %s with command %s',
                              worker_name, worker_command)
                _ = subprocess.Popen(worker_command,
                                     cwd=args.workdir,
                                     shell=True,
                                     preexec_fn=os.setpgrp)
                time.sleep(1.0)
                proc = find_worker_process(worker_name)
                if proc:
                    logging.info('Started worker %s (pid %s)', worker_name,
                                 proc.pid)
                else:
                    logging.error('Failed to start worker %s, see %s',
                                  worker_name, log_file)
Exemple #35
0
    def consume(self,
                data=None,
                cleanup=None,
                split_count=None,
                synchronous=False,
                timeout=None,
                **node_contexts):
        """Setup node contexts and consume data with the pipeline

        Parameters
        ----------
        data : iterable, optional
            Iterable of data to consume
        cleanup : dict, optional
            A mapping of arg names to clean up functions to be run after
            data processing is complete.
        split_count : int, optional
            How many slices to split the data into for parallel processing. Default
            is the number of workers in the provided queue.
        synchronous : bool, optional
            If False, return Jobs. If True, wait for jobs to complete and
            return their results, if any.
        timeout : int or float, optional
            If waiting for results, raise an exception if polling for all
            results takes longer than timeout seconds.
        **node_contexts
            Keyword arguments that are node_name->param_dict

        """
        if not split_count:
            dbg("determining split count from rq worker count")
            workers = Worker.all(queue=self.queue)
            split_count = len(workers)

        split_count = split_count_helper(data, split_count)
        if data is None:
            splits = [None for s in range(split_count)]
        else:
            splits = divide_data(data, split_count)

        dbg("%s: data len: %s, splits: %d" %
            (self.__class__.__name__, size(data, "n/a"), split_count))

        async_results = []
        for split in splits:
            async_results.append(
                self.queue.enqueue(
                    rq_consume,
                    args=(self.pipeline, split),
                    kwargs=dict(cleanup=cleanup, **node_contexts),
                ))

        if synchronous:
            return get_async_results(async_results, timeout=timeout)

        return async_results
Exemple #36
0
    def test_worker_all(self):
        """Worker.all() works properly"""
        foo_queue = Queue('foo')
        bar_queue = Queue('bar')

        w1 = Worker([foo_queue, bar_queue], name='w1')
        w1.register_birth()
        w2 = Worker([foo_queue], name='w2')
        w2.register_birth()

        self.assertEqual(
            set(Worker.all(connection=foo_queue.connection)),
            set([w1, w2])
        )
        self.assertEqual(set(Worker.all(queue=foo_queue)), set([w1, w2]))
        self.assertEqual(set(Worker.all(queue=bar_queue)), set([w1]))

        w1.register_death()
        w2.register_death()
Exemple #37
0
	def start_work(queue_names):

		# Provide queue names to listen to as arguments to this script,
		with Connection():
			working_queues = [queue.name for worker in Worker.all() for queue in worker.queues]
			queues_to_start = [queue for queue in queue_names if not queue in working_queues]

			# if there's not already a worker for that queue name, then start one up!
			if len(queues_to_start) > 0:
				[ProcessWorker(queue).run() for queue in queues_to_start]
Exemple #38
0
def are_workers_done():
    workers_working = 0
    for worker in Worker.all(connection=helper.redis_con):
        if worker.get_current_job():
            workers_working = workers_working + 1
    if workers_working > 0:
        print "WE HAVE {} WORKERS WORKING".format(workers_working)
        return False
    else:
        return True
Exemple #39
0
def workersrq():
    workers = []
    for worker in Worker.all(connection=conn1):
        workers.append(worker.name)
        print(worker.name, worker.state)
    if len(workers) == 0:
        print("No WORKERS!!!")
        return None
    else:
        return workers[0]
Exemple #40
0
    def startWorkers(self):
        # Find the number of current workers
        queues = getattr(settings, 'RQ_QUEUES', {})
        default = queues['default'] if 'default' in queues else None
        variants = queues['variants'] if 'variants' in queues else None

        if not (queues and default and variants):
            log.warning('RQ_QUEUES settings could not be found')
            return

        # Create connections to redis to identify the workers
        def_connection = redis.Redis(host=default['HOST'],
                                     port=default['PORT'],
                                     db=default['DB'])
        var_connection = redis.Redis(host=variants['HOST'],
                                     port=variants['PORT'],
                                     db=variants['DB'])

        # Get all the workers connected with our redis server
        try:
            all_workers = Worker.all(def_connection) + \
                Worker.all(var_connection)
        except ConnectionError:
            log.warning('Could not connect to redis server to create workers. '
                        'Please make sure Redis server is running')
            return

        found_default = False
        found_variant = False

        # Loop through all the workers (even duplicates)
        for worker in all_workers:
            found_default = found_default or 'default' in worker.queue_names()
            found_variant = found_variant or 'variants' in worker.queue_names()

        # Start the required worker
        if not found_variant:
            log.debug('Did not find variants worker. Starting ... ')
            get_worker('variants').work(burst=True)

        if not found_default:
            log.debug('Did not find default worker. Starting ... ')
            get_worker('default').work(burst=True)
Exemple #41
0
 def list_workers(self):
     with Connection(self.redis_conn):
         l = []
         for w in Worker.all():
             l.append({
                 'name': w.name,
                 'state': w.get_state(),
                 'queues': [q.name for q in w.queues]
             })
         return l
Exemple #42
0
 def form_valid(self, form, ):
     redis_conn = django_rq.get_connection('default')
     if len([x for x in Worker.all(connection=redis_conn) if settings.DJANGO_TEST_RQ_LOW_QUEUE in x.queue_names()]) == 0:
         messages.add_message(self.request, messages.ERROR, 'No active workers for queue!')
         return HttpResponseRedirect(reverse('long_tasks'))
         
     form.instance.result = 'QUEUED'
     long_task = form.save()
     long_runnig_task.delay(long_task)
     messages.info(self.request, 'Long task started.')
     return HttpResponseRedirect(reverse('long_tasks'))
Exemple #43
0
 def read(self, cursor, uid, ids, fields=None, context=None):
     """Show connected workers.
     """
     setup_redis_connection()
     workers = [dict(
         id=worker.pid,
         name=worker.name,
         queues=', '.join([q.name for q in worker.queues]),
         state=worker.state,
         __last_updadate=False
     ) for worker in Worker.all()]
     return workers
Exemple #44
0
def overview(queue_name):
    if queue_name is None:
        # Show the failed queue by default if it contains any jobs
        failed = Queue("failed")
        if not failed.is_empty():
            queue = failed
        else:
            queue = Queue()
    else:
        queue = Queue(queue_name)

    return render_template("rq_dashboard/dashboard.html", workers=Worker.all(), queue=queue, queues=Queue.all())
Exemple #45
0
def workers(request, queue_index):
    queue_index = int(queue_index)
    queue = get_queue_by_index(queue_index)
    all_workers = Worker.all(queue.connection)
    workers = [worker for worker in all_workers
               if queue.name in worker.queue_names()]

    context_data = {
        'queue': queue,
        'queue_index': queue_index,
        'workers': workers,
    }
    return render(request, 'django_rq/workers.html', context_data)
Exemple #46
0
def list_workers():
    def serialize_queue_names(worker):
        return [q.name for q in worker.queues]

    workers = [
        dict(
            name=worker.name,
            queues=serialize_queue_names(worker),
            state=str(worker.get_state()),
            job=worker.get_current_job().get_call_string() if worker.get_current_job() is not None else ' '
        )
        for worker in Worker.all()
    ]
    return dict(workers=workers)
Exemple #47
0
def list_workers():
    """rq admin method
    """

    def serialize_queue_names(worker):
        """rq admin method
        """
        return [q.name for q in worker.queues]

    workers = [dict(name=worker.name,
                    queues=serialize_queue_names(worker),
                    state=worker.get_state())
               for worker in Worker.all()]
    return pretty_table(['state', 'queues', 'name'], workers)
Exemple #48
0
def list_workers():
    def serialize_queue_names(worker):
        return [q.name for q in worker.queues]

    workers = sorted((
        dict(
            name=worker.name,
            queues=serialize_queue_names(worker),
            state=str(worker.get_state()),
            current_job=serialize_current_job(
                worker.get_current_job()),
        )
        for worker in Worker.all()),
        key=lambda w: (w['state'], w['name']))
    return dict(workers=workers)
Exemple #49
0
def stats(request):
    queues = []
    for name in QUEUES:
        queue = get_queue(name)
        connection = get_connection(name)
        all_workers = Worker.all(connection=connection)
        queue_workers = [worker for worker in all_workers if queue in worker.queues]
        stat = {
            'name': name,
            'jobs': queue.count,
            'workers': len(queue_workers)
        }
        queues.append(stat)
    context_data = { 'queues': queues }
    return render(request, 'django_rq/stats.html', context_data)
Exemple #50
0
def stats(request):
    queues = []
    for index, config in enumerate(QUEUES_LIST):
        queue = get_queue_by_index(index)
        queue_data = {"name": queue.name, "jobs": queue.count, "index": index}
        if queue.name == "failed":
            queue_data["workers"] = "-"
        else:
            connection = get_connection(queue.name)
            all_workers = Worker.all(connection=connection)
            queue_workers = [worker for worker in all_workers if queue in worker.queues]
            queue_data["workers"] = len(queue_workers)
        queues.append(queue_data)

    context_data = {"queues": queues}
    return render(request, "django_rq/stats.html", context_data)
Exemple #51
0
def cleanup_ghosts():
    """
    RQ versions < 0.3.6 suffered from a race condition where workers, when
    abruptly terminated, did not have a chance to clean up their worker
    registration, leading to reports of ghosted workers in `rqinfo`.  Since
    0.3.6, new worker registrations automatically expire, and the worker will
    make sure to refresh the registrations as long as it's alive.

    This function will clean up any of such legacy ghosted workers.
    """
    conn = get_current_connection()
    for worker in Worker.all():
        if conn.ttl(worker.key) == -1:
            ttl = worker.default_worker_ttl
            conn.expire(worker.key, ttl)
            logger.info('Marked ghosted worker {0} to expire in {1} seconds.'.format(worker.name, ttl))
Exemple #52
0
 def get_context_data(self, **kwargs):
     ctx = super(Stats, self).get_context_data(**kwargs)
     ctx.update({
         'queues': Queue.all(connection=self.connection),
         'workers': Worker.all(connection=self.connection),
         'title': 'RQ Status',
     })
     if Scheduler:
         scheduler = Scheduler(self.connection)
         get_queue = lambda job: job.origin
         all_jobs = sorted(scheduler.get_jobs(), key=get_queue)
         ctx['scheduler'] = scheduler
         ctx['scheduled_queues'] = [
             {'name': queue, 'job_count': len(list(jobs))}
             for queue, jobs in groupby(all_jobs, get_queue)]
     return ctx
Exemple #53
0
def split_ajax(fr, to, day, time, time_ret):
    bottle.response.set_header('Cache-Control', 'max-age=0')
    context = context_init(fr, to, day, time, time_ret)

    q = MyQueue(connection=R)
    job = q.fetch_job(get_job_id(context))

    done = job and (job.is_finished or job.is_failed)
    include_me = 1 if job else 0
    busy_workers = len([ w for w in Worker.all(connection=R) if w.get_state() == 'busy' ]) - include_me
    busy_workers += q.count
    return {
        'done': done,
        'refresh': max(1, busy_workers),
        'queue_size': max(0, busy_workers),
    }
 def execute(self):
     # It is always possible that the Redis connection is not yet set 
     print "ENTER"
     if not get_current_connection():
         conn = Redis('localhost', settings.REDIS_PORT)
         use_connection(conn)
     if not get_current_connection():
         log.error(u'Unable to create redis connection')
     # use the 'default' queue. We only used this one;
     q = Queue()
     # if the queue is not empty then some old idle workers may have to be cleaned
     if not q.is_empty():
         for w in Worker.all():
             if w.state == 'idle' and q in w.queues:
                 log.info(u'Work %s will die gently' % w.name)
                 w.register_death()