示例#1
0
文件: test_queue.py 项目: bradleyy/rq
    def test_all_queues(self):
        """All queues"""
        q1 = Queue('first-queue')
        q2 = Queue('second-queue')
        q3 = Queue('third-queue')

        # Ensure a queue is added only once a job is enqueued
        self.assertEquals(len(Queue.all()), 0)
        q1.enqueue(say_hello)
        self.assertEquals(len(Queue.all()), 1)

        # Ensure this holds true for multiple queues
        q2.enqueue(say_hello)
        q3.enqueue(say_hello)
        names = [q.name for q in Queue.all()]
        self.assertEquals(len(Queue.all()), 3)

        # Verify names
        self.assertTrue('first-queue' in names)
        self.assertTrue('second-queue' in names)
        self.assertTrue('third-queue' in names)

        # Now empty two queues
        w = Worker([q2, q3])
        w.work(burst=True)

        # Queue.all() should still report the empty queues
        self.assertEquals(len(Queue.all()), 3)
示例#2
0
def main():
    worker = Worker(
        ['high', 'default', 'low'],
        connection=redis_client
    )
    worker.work()
    return
示例#3
0
文件: test_cli.py 项目: luisbc92/rq
    def test_cli_enqueue_schedule_in(self):
        """rq enqueue -u <url> tests.fixtures.say_hello --schedule-in 1s"""
        queue = Queue(connection=self.connection)
        registry = ScheduledJobRegistry(queue=queue)
        worker = Worker(queue)
        scheduler = RQScheduler(queue, self.connection)

        self.assertTrue(len(queue) == 0)
        self.assertTrue(len(registry) == 0)

        runner = CliRunner()
        result = runner.invoke(main, [
            'enqueue', '-u', self.redis_url, 'tests.fixtures.say_hello',
            '--schedule-in', '10s'
        ])
        self.assert_normal_execution(result)

        scheduler.acquire_locks()
        scheduler.enqueue_scheduled_jobs()

        self.assertTrue(len(queue) == 0)
        self.assertTrue(len(registry) == 1)

        self.assertFalse(worker.work(True))

        sleep(11)

        scheduler.enqueue_scheduled_jobs()

        self.assertTrue(len(queue) == 1)
        self.assertTrue(len(registry) == 0)

        self.assertTrue(worker.work(True))
示例#4
0
    def test_job_execution(self):
        """Job is removed from StartedJobRegistry after execution."""
        registry = StartedJobRegistry(connection=self.testconn)
        queue = Queue(connection=self.testconn)
        worker = Worker([queue])

        job = queue.enqueue(say_hello)
        self.assertTrue(job.is_queued)

        worker.prepare_job_execution(job)
        self.assertIn(job.id, registry.get_job_ids())
        self.assertTrue(job.is_started)

        worker.perform_job(job, queue)
        self.assertNotIn(job.id, registry.get_job_ids())
        self.assertTrue(job.is_finished)

        # Job that fails
        job = queue.enqueue(div_by_zero)

        worker.prepare_job_execution(job)
        self.assertIn(job.id, registry.get_job_ids())

        worker.perform_job(job, queue)
        self.assertNotIn(job.id, registry.get_job_ids())
示例#5
0
文件: test_cli.py 项目: luisbc92/rq
    def test_cli_enqueue_with_serializer(self):
        """rq enqueue -u <url> -S rq.serializers.JSONSerializer tests.fixtures.say_hello"""
        queue = Queue(connection=self.connection, serializer=JSONSerializer)
        self.assertTrue(queue.is_empty())

        runner = CliRunner()
        result = runner.invoke(main, [
            'enqueue', '-u', self.redis_url, '-S',
            'rq.serializers.JSONSerializer', 'tests.fixtures.say_hello'
        ])
        self.assert_normal_execution(result)

        prefix = 'Enqueued tests.fixtures.say_hello() with job-id \''
        suffix = '\'.\n'

        self.assertTrue(result.output.startswith(prefix))
        self.assertTrue(result.output.endswith(suffix))

        job_id = result.output[len(prefix):-len(suffix)]
        queue_key = 'rq:queue:default'
        self.assertEqual(self.connection.llen(queue_key), 1)
        self.assertEqual(
            self.connection.lrange(queue_key, 0, -1)[0].decode('ascii'),
            job_id)

        worker = Worker(queue, serializer=JSONSerializer)
        worker.work(True)
        self.assertEqual(
            Job(job_id, serializer=JSONSerializer).result,
            'Hi there, Stranger!')
示例#6
0
文件: test_cli.py 项目: luisbc92/rq
    def test_cli_enqueue_args(self):
        """rq enqueue -u <url> tests.fixtures.echo hello ':[1, {"key": "value"}]' json:=["abc"] nojson=def"""
        queue = Queue(connection=self.connection)
        self.assertTrue(queue.is_empty())

        runner = CliRunner()
        result = runner.invoke(main, [
            'enqueue', '-u', self.redis_url, 'tests.fixtures.echo', 'hello',
            ':[1, {"key": "value"}]', ':@tests/test.json', '%1, 2',
            'json:=[3.0, true]', 'nojson=abc', 'file=@tests/test.json'
        ])
        self.assert_normal_execution(result)

        job_id = self.connection.lrange('rq:queue:default', 0,
                                        -1)[0].decode('ascii')

        worker = Worker(queue)
        worker.work(True)

        args, kwargs = Job(job_id).result

        self.assertEqual(args, ('hello', [1, {
            'key': 'value'
        }], {
            "test": True
        }, (1, 2)))
        self.assertEqual(
            kwargs, {
                'json': [3.0, True],
                'nojson': 'abc',
                'file': '{\n    "test": true\n}\n'
            })
示例#7
0
文件: test_job.py 项目: friedcell/rq
 def test_job_access_within_job_function(self):
     """The current job is accessible within the job function."""
     q = Queue()
     q.enqueue(fixtures.access_self)  # access_self calls get_current_job() and asserts
     w = Worker([q])
     w.work(burst=True)
     assert get_failed_queue(self.testconn).count == 0
示例#8
0
    def test_all_queues(self):
        """All queues"""
        q1 = Queue('first-queue')
        q2 = Queue('second-queue')
        q3 = Queue('third-queue')

        # Ensure a queue is added only once a job is enqueued
        self.assertEqual(len(Queue.all()), 0)
        q1.enqueue(say_hello)
        self.assertEqual(len(Queue.all()), 1)

        # Ensure this holds true for multiple queues
        q2.enqueue(say_hello)
        q3.enqueue(say_hello)
        names = [q.name for q in Queue.all()]
        self.assertEqual(len(Queue.all()), 3)

        # Verify names
        self.assertTrue('first-queue' in names)
        self.assertTrue('second-queue' in names)
        self.assertTrue('third-queue' in names)

        # Now empty two queues
        w = Worker([q2, q3])
        w.work(burst=True)

        # Queue.all() should still report the empty queues
        self.assertEqual(len(Queue.all()), 3)
示例#9
0
 def test_job_access_within_job_function(self):
     """The current job is accessible within the job function."""
     q = Queue()
     q.enqueue(fixtures.access_self
               )  # access_self calls get_current_job() and asserts
     w = Worker([q])
     w.work(burst=True)
示例#10
0
文件: test_job.py 项目: nvie/rq
 def test_job_access_within_job_function(self):
     """The current job is accessible within the job function."""
     q = Queue()
     job = q.enqueue(fixtures.access_self)
     w = Worker([q])
     w.work(burst=True)
     # access_self calls get_current_job() and executes successfully
     self.assertEqual(job.get_status(), JobStatus.FINISHED)
示例#11
0
 def test_job_access_within_job_function(self):
     """The current job is accessible within the job function."""
     q = Queue()
     job = q.enqueue(fixtures.access_self)
     w = Worker([q])
     w.work(burst=True)
     # access_self calls get_current_job() and executes successfully
     self.assertEqual(job.get_status(), JobStatus.FINISHED)
示例#12
0
    def test_can_enqueue_job_if_dependency_is_deleted(self):
        queue = Queue(connection=self.testconn)

        dependency_job = queue.enqueue(fixtures.say_hello, result_ttl=0)

        w = Worker([queue])
        w.work(burst=True)

        assert queue.enqueue(fixtures.say_hello, depends_on=dependency_job)
示例#13
0
文件: fixtures.py 项目: luisbc92/rq
def start_worker(queue_name, conn_kwargs, worker_name, burst):
    """
    Start a worker. We accept only serializable args, so that this can be
    executed via multiprocessing.
    """
    # Silence stdout (thanks to <https://stackoverflow.com/a/28321717/14153673>)
    with open(os.devnull, 'w') as devnull:
        with contextlib.redirect_stdout(devnull):
            w = Worker([queue_name], name=worker_name, connection=Redis(**conn_kwargs))
            w.work(burst=burst)
    def __init__(self):
        load_dotenv(verbose=True)

        listen = ['high', 'default', 'low']
        redis_host = os.getenv("REDIS_HOST", "localhost")
        redis_port = os.getenv("REDIS_PORT", "6379")
        redis_password = os.getenv("REDIS_PASSWORD", "")
        conn = Redis(host=redis_host, port=redis_port, password=redis_password, db=0)
        with Connection(conn):
            self.worker = Worker(map(Queue, listen))
示例#15
0
    def test_requeue_with_serializer(self):
        """FailedJobRegistry.requeue works properly (with serializer)"""
        queue = Queue(connection=self.testconn, serializer=JSONSerializer)
        job = queue.enqueue(div_by_zero, failure_ttl=5)

        worker = Worker([queue], serializer=JSONSerializer)
        worker.work(burst=True)

        registry = FailedJobRegistry(connection=worker.connection,
                                     serializer=JSONSerializer)
        self.assertTrue(job in registry)

        registry.requeue(job.id)
        self.assertFalse(job in registry)
        self.assertIn(job.id, queue.get_job_ids())

        job.refresh()
        self.assertEqual(job.get_status(), JobStatus.QUEUED)
        self.assertEqual(job.started_at, None)
        self.assertEqual(job.ended_at, None)

        worker.work(burst=True)
        self.assertTrue(job in registry)

        # Should also work with job instance
        registry.requeue(job)
        self.assertFalse(job in registry)
        self.assertIn(job.id, queue.get_job_ids())

        job.refresh()
        self.assertEqual(job.get_status(), JobStatus.QUEUED)

        worker.work(burst=True)
        self.assertTrue(job in registry)

        # requeue_job should work the same way
        requeue_job(job.id,
                    connection=self.testconn,
                    serializer=JSONSerializer)
        self.assertFalse(job in registry)
        self.assertIn(job.id, queue.get_job_ids())

        job.refresh()
        self.assertEqual(job.get_status(), JobStatus.QUEUED)

        worker.work(burst=True)
        self.assertTrue(job in registry)

        # And so does job.requeue()
        job.requeue()
        self.assertFalse(job in registry)
        self.assertIn(job.id, queue.get_job_ids())

        job.refresh()
        self.assertEqual(job.get_status(), JobStatus.QUEUED)
示例#16
0
    def test_job_execution(self):
        """Job is removed from StartedJobRegistry after execution."""
        registry = StartedJobRegistry(connection=self.testconn)
        queue = Queue(connection=self.testconn)
        worker = Worker([queue])

        job = queue.enqueue(say_hello)
        self.assertTrue(job.is_queued)

        worker.prepare_job_execution(job)
        self.assertIn(job.id, registry.get_job_ids())
        self.assertTrue(job.is_started)

        worker.perform_job(job, queue)
        self.assertNotIn(job.id, registry.get_job_ids())
        self.assertTrue(job.is_finished)

        # Job that fails
        job = queue.enqueue(div_by_zero)

        worker.prepare_job_execution(job)
        self.assertIn(job.id, registry.get_job_ids())

        worker.perform_job(job, queue)
        self.assertNotIn(job.id, registry.get_job_ids())
示例#17
0
    def test_dependents_are_met_if_dependency_is_deleted(self):
        queue = Queue(connection=self.testconn)

        dependency_job = queue.enqueue(fixtures.say_hello, result_ttl=0)
        dependent_job = queue.enqueue(fixtures.say_hello, depends_on=dependency_job)

        w = Worker([queue])
        w.work(burst=True, max_jobs=1)

        assert dependent_job.dependencies_are_met()
        assert dependent_job.get_status() == JobStatus.QUEUED
示例#18
0
文件: test_job.py 项目: f0cker/rq
    def test_dependencies_are_met_at_execution_time(self):
        queue = Queue(connection=self.testconn)

        queue.enqueue(fixtures.say_hello, job_id="A")
        queue.enqueue(fixtures.say_hello, job_id="B")
        job_C = queue.enqueue(fixtures.check_dependencies_are_met, job_id="C", depends_on=["A", "B"])

        w = Worker([queue])
        w.work(burst=True)

        assert job_C.result
示例#19
0
    def test_work(self):
        queue = Queue(connection=self.testconn)
        worker = Worker(queues=[queue], connection=self.testconn)
        p = Process(target=kill_worker, args=(os.getpid(), False, 5))

        p.start()
        queue.enqueue_at(datetime(2019, 1, 1, tzinfo=timezone.utc), say_hello)
        worker.work(burst=False, with_scheduler=True)
        p.join(1)
        self.assertIsNotNone(worker.scheduler)
        registry = FinishedJobRegistry(queue=queue)
        self.assertEqual(len(registry), 1)
示例#20
0
    def test_info_only_workers(self):
        """rq info -u <url> --only-workers (-W)"""
        runner = CliRunner()
        result = runner.invoke(
            main, ['info', '-u', self.redis_url, '--only-workers'])
        self.assert_normal_execution(result)
        self.assertIn('0 workers, 0 queue', result.output)

        result = runner.invoke(
            main,
            ['info', '--by-queue', '-u', self.redis_url, '--only-workers'])
        self.assert_normal_execution(result)
        self.assertIn('0 workers, 0 queue', result.output)

        queue = Queue(connection=self.connection)
        queue.enqueue(say_hello)
        result = runner.invoke(
            main, ['info', '-u', self.redis_url, '--only-workers'])
        self.assert_normal_execution(result)
        self.assertIn('0 workers, 1 queues', result.output)

        foo_queue = Queue(name='foo', connection=self.connection)
        foo_queue.enqueue(say_hello)

        bar_queue = Queue(name='bar', connection=self.connection)
        bar_queue.enqueue(say_hello)

        worker = Worker([foo_queue, bar_queue], connection=self.connection)
        worker.register_birth()

        worker_2 = Worker([foo_queue, bar_queue], connection=self.connection)
        worker_2.register_birth()
        worker_2.set_state(WorkerStatus.BUSY)

        result = runner.invoke(
            main,
            ['info', 'foo', 'bar', '-u', self.redis_url, '--only-workers'])

        self.assert_normal_execution(result)
        self.assertIn('2 workers, 2 queues', result.output)

        result = runner.invoke(main, [
            'info', 'foo', 'bar', '--by-queue', '-u', self.redis_url,
            '--only-workers'
        ])

        self.assert_normal_execution(result)
        # Ensure both queues' workers are shown
        self.assertIn('foo:', result.output)
        self.assertIn('bar:', result.output)
        self.assertIn('2 workers, 2 queues', result.output)
示例#21
0
    def test_job_deletion(self):
        """Ensure job is removed from StartedJobRegistry when deleted."""
        registry = StartedJobRegistry(connection=self.testconn)
        queue = Queue(connection=self.testconn)
        worker = Worker([queue])

        job = queue.enqueue(say_hello)
        self.assertTrue(job.is_queued)

        worker.prepare_job_execution(job)
        self.assertIn(job.id, registry.get_job_ids())

        job.delete()
        self.assertNotIn(job.id, registry.get_job_ids())
示例#22
0
    def test_job_deletion(self):
        """Ensure job is removed from StartedJobRegistry when deleted."""
        registry = StartedJobRegistry(connection=self.testconn)
        queue = Queue(connection=self.testconn)
        worker = Worker([queue])

        job = queue.enqueue(say_hello)
        self.assertTrue(job.is_queued)

        worker.prepare_job_execution(job)
        self.assertIn(job.id, registry.get_job_ids())

        job.delete()
        self.assertNotIn(job.id, registry.get_job_ids())
示例#23
0
def worker(worker_num, backend):
    import subprocess
    print('Worker %i started' % worker_num)
    if backend == 'pq':
        subprocess.call('django-admin.py pqworker benchmark -b', shell=True)
    elif backend == 'rq':
        from rq.worker import Worker
        from redis import Redis
        from rq import Queue
        q = Queue('benchmark', connection=Redis())
        w = Worker(q, connection=Redis())
        w.work(burst=False)
    print('Worker %i fin' % worker_num)
    return
示例#24
0
    def test_jobs_are_put_in_registry(self):
        """Completed jobs are added to FinishedJobRegistry."""
        self.assertEqual(self.registry.get_job_ids(), [])
        queue = Queue(connection=self.testconn)
        worker = Worker([queue])

        # Completed jobs are put in FinishedJobRegistry
        job = queue.enqueue(say_hello)
        worker.perform_job(job)
        self.assertEqual(self.registry.get_job_ids(), [job.id])

        # Failed jobs are not put in FinishedJobRegistry
        failed_job = queue.enqueue(div_by_zero)
        worker.perform_job(failed_job)
        self.assertEqual(self.registry.get_job_ids(), [job.id])
示例#25
0
文件: bp.py 项目: eyecat/rqmonitor
def list_workers_api():
    workers_list = Worker.all()
    rq_workers = []
    for worker in workers_list:
        host_ip_using_name = "N/A"
        try:
            host_ip_using_name = socket.gethostbyname(worker.hostname)
        except socket.gaierror as addr_error:
            pass

        rq_workers.append({
            'worker_name':
            worker.name,
            'listening_on':
            ', '.join(queue.name for queue in worker.queues),
            'status':
            worker.get_state()
            if not is_suspended(get_current_connection()) else "suspended",
            'host_ip':
            host_ip_using_name,
            'current_job_id':
            worker.get_current_job_id(),
            'failed_jobs':
            worker.failed_job_count,
        })
    return {
        'data': rq_workers,
    }
示例#26
0
文件: views.py 项目: SIDERMIT/backend
    def cancel_optimization(self, request, public_id=None):
        transport_network_obj = self.get_object()
        if transport_network_obj.optimization_status in [
                TransportNetwork.STATUS_ERROR, TransportNetwork.STATUS_FINISHED
        ]:
            raise ValidationError('Optimization is not running or queued')

        redis_conn = get_connection()
        workers = Worker.all(redis_conn)
        for worker in workers:
            if worker.state == WorkerStatus.BUSY and \
                    worker.get_current_job_id() == str(transport_network_obj.job_id):
                send_kill_horse_command(redis_conn, worker.name)

        # remove from queue
        cancel_job(str(transport_network_obj.job_id), connection=redis_conn)

        transport_network_obj.optimization_status = None
        transport_network_obj.optimization_ran_at = None
        transport_network_obj.optimization_error_message = None
        transport_network_obj.save()

        return Response(
            TransportNetworkSerializer(transport_network_obj).data,
            status.HTTP_200_OK)
示例#27
0
def worker_details(request, queue_index, key):
    queue_index = int(queue_index)
    queue = get_queue_by_index(queue_index)
    worker = Worker.find_by_key(key, connection=queue.connection)

    try:
        # Convert microseconds to milliseconds
        worker.total_working_time = worker.total_working_time / 1000
    except AttributeError:
        # older version of rq do not have `total_working_time`
        worker.total_working_time = "-"

    queue_names = ', '.join(worker.queue_names())

    def get_job_graceful(worker):
        if not worker:
            return None
        try:
            return worker.get_current_job()
        except NoSuchJobError:
            return None

    context_data = {
        'queue': queue,
        'queue_index': queue_index,
        'worker': worker,
        'queue_names': queue_names,
        'job': get_job_graceful(worker)
    }
    return render(request, 'django_rq/worker_details.html', context_data)
示例#28
0
def workers():
    counter = Counter()
    for w in Worker.all(connection=worker.connection):
        for q in w.queues:
            counter[q.name] += 1
    import pprint
    pprint.pprint(dict(counter))
示例#29
0
    def get(self, request):
        # Gather the version numbers from all installed Django apps
        installed_apps = {}
        for app_config in apps.get_app_configs():
            app = app_config.module
            version = getattr(app, "VERSION", getattr(app, "__version__",
                                                      None))
            if version:
                if type(version) is tuple:
                    version = ".".join(str(n) for n in version)
                installed_apps[app_config.name] = version
        installed_apps = {k: v for k, v in sorted(installed_apps.items())}

        return Response({
            "django-version":
            DJANGO_VERSION,
            "installed-apps":
            installed_apps,
            "peering-manager-version":
            settings.VERSION,
            "python-version":
            platform.python_version(),
            "rq-workers-running":
            Worker.count(get_connection("default")),
        })
示例#30
0
文件: views.py 项目: matt852/nautobot
    def get(self, request):
        # Gather the version numbers from all installed Django apps
        installed_apps = {}
        for app_config in apps.get_app_configs():
            app = app_config.module
            version = getattr(app, "VERSION", getattr(app, "__version__", None))
            if version:
                if isinstance(version, tuple):
                    version = ".".join(str(n) for n in version)
            installed_apps[app_config.name] = version
        installed_apps = {k: v for k, v in sorted(installed_apps.items())}

        # Gather installed plugins
        plugins = {}
        for plugin_name in settings.PLUGINS:
            plugin_name = plugin_name.rsplit(".", 1)[-1]
            plugin_config = apps.get_app_config(plugin_name)
            plugins[plugin_name] = getattr(plugin_config, "version", None)
        plugins = {k: v for k, v in sorted(plugins.items())}

        # Gather Celery workers
        workers = celery_app.control.inspect().active()  # list or None
        worker_count = len(workers) if workers is not None else 0

        return Response(
            {
                "django-version": DJANGO_VERSION,
                "installed-apps": installed_apps,
                "nautobot-version": settings.VERSION,
                "plugins": plugins,
                "python-version": platform.python_version(),
                "rq-workers-running": RQWorker.count(get_rq_connection("default")),
                "celery-workers-running": worker_count,
            }
        )
示例#31
0
def rq_workers(queue=None):
    # type: (Queue) -> List[Worker]
    """
    Returns the list of current rq ``Worker``s.
    """

    return Worker.all(connection=Redis(), queue=queue)
示例#32
0
    def get(self, request):
        # Gather the version numbers from all installed Django apps
        installed_apps = {}
        for app_config in apps.get_app_configs():
            app = app_config.module
            version = getattr(app, 'VERSION', getattr(app, '__version__',
                                                      None))
            if version:
                if type(version) is tuple:
                    version = '.'.join(str(n) for n in version)
                installed_apps[app_config.name] = version
        installed_apps = {k: v for k, v in sorted(installed_apps.items())}

        # Gather installed plugins
        plugins = {}
        for plugin_name in settings.PLUGINS:
            plugin_name = plugin_name.rsplit('.', 1)[-1]
            plugin_config = apps.get_app_config(plugin_name)
            plugins[plugin_name] = getattr(plugin_config, 'version', None)
        plugins = {k: v for k, v in sorted(plugins.items())}

        return Response({
            'django-version':
            DJANGO_VERSION,
            'installed-apps':
            installed_apps,
            'netbox-version':
            settings.VERSION,
            'plugins':
            plugins,
            'python-version':
            platform.python_version(),
            'rq-workers-running':
            Worker.count(get_connection('default')),
        })
示例#33
0
def worker(worker_num, backend):
    import subprocess

    print("Worker %i started" % worker_num)
    if backend == "pq":
        subprocess.call("django-admin.py pqworker benchmark -b", shell=True)
    elif backend == "rq":
        from rq.worker import Worker
        from redis import Redis
        from rq import Queue

        q = Queue("benchmark", connection=Redis())
        w = Worker(q, connection=Redis())
        w.work(burst=False)
    print("Worker %i fin" % worker_num)
    return
示例#34
0
def delete_workers(worker_ids, signal_to_pass=signal.SIGINT):
    """
    Expect worker ID without RQ REDIS WORKER NAMESPACE PREFIX of rq:worker:
    By default performs warm shutdown

    :param worker_id: list of worker id's to delete
    :param signal_to_pass:
    :return:
    """

    # find worker instance by key, refreshes worker implicitly
    def attach_rq_worker_prefix(worker_id):
        return Worker.redis_worker_namespace_prefix + worker_id

    try:
        for worker_instance in [
                Worker.find_by_key(attach_rq_worker_prefix(worker_id))
                for worker_id in worker_ids
        ]:
            # kill if on same instance
            if socket.gethostname() == worker_instance.hostname.decode(
                    'utf-8'):
                os.kill(worker_instance.pid, signal_to_pass)
    except ValueError:
        logger.warning('Problem in deleting workers {0}'.format(worker_ids))
        return False

    return True
示例#35
0
def workers():
    counter = Counter()
    for w in Worker.all(connection=worker.connection):
        for q in w.queues:
            counter[q.name] += 1
    import pprint
    pprint.pprint(dict(counter))
示例#36
0
文件: utils.py 项目: matt852/nautobot
def get_worker_count(request=None):
    """
    Return a count of the active Celery workers.
    """
    # Inner imports so we don't risk circular imports
    from nautobot.core.celery import app  # noqa
    from rq.worker import Worker  # noqa
    from django_rq.queues import get_connection  # noqa

    # Try RQ first since, it's faster.
    rq_count = Worker.count(get_connection("default"))

    # Celery next, since it's slower.
    inspect = app.control.inspect()
    active = inspect.active()  # None if no active workers
    celery_count = len(active) if active is not None else 0

    if rq_count and not celery_count:
        if request:
            messages.warning(
                request,
                "RQ workers are deprecated. Please migrate your workers to Celery."
            )

    return celery_count
示例#37
0
    def get(self, request):
        # Gather the version numbers from all installed Django apps
        installed_apps = {}
        for app_config in apps.get_app_configs():
            app = app_config.module
            version = getattr(app, "VERSION", getattr(app, "__version__",
                                                      None))
            if version:
                if type(version) is tuple:
                    version = ".".join(str(n) for n in version)
                installed_apps[app_config.name] = version
        installed_apps = {k: v for k, v in sorted(installed_apps.items())}

        # Gather installed plugins
        plugins = {}
        for plugin_name in settings.PLUGINS:
            plugin_name = plugin_name.rsplit(".", 1)[-1]
            plugin_config = apps.get_app_config(plugin_name)
            plugins[plugin_name] = getattr(plugin_config, "version", None)
        plugins = {k: v for k, v in sorted(plugins.items())}

        return Response({
            "django-version":
            DJANGO_VERSION,
            "installed-apps":
            installed_apps,
            "nautobot-version":
            settings.VERSION,
            "plugins":
            plugins,
            "python-version":
            platform.python_version(),
            "rq-workers-running":
            Worker.count(get_connection("default")),
        })
示例#38
0
def delete_workers(worker_ids, signal_to_pass=signal.SIGINT):
    """
    Expect worker ID without RQ REDIS WORKER NAMESPACE PREFIX of rq:worker:
    By default performs warm shutdown

    :param worker_id: list of worker id's to delete
    :param signal_to_pass:
    :return:
    """
    # find worker instance by key, refreshes worker implicitly
    def attach_rq_worker_prefix(worker_id):
        return Worker.redis_worker_namespace_prefix + worker_id

    for worker_instance in [Worker.find_by_key(attach_rq_worker_prefix(worker_id))
                            for worker_id in worker_ids]:
        requested_hostname = worker_instance.hostname
        requested_hostname = requested_hostname.decode('utf-8')
        # kill if on same instance
        if socket.gethostname() == requested_hostname:
            os.kill(worker_instance.pid, signal_to_pass)
        else:
            required_host_ip = socket.gethostbyname(requested_hostname)
            fabric_config_wrapper = Config()
            # loads from user level ssh config (~/.ssh/config) and system level
            # config /etc/ssh/ssh_config
            fabric_config_wrapper.load_ssh_config()
            # to use its ssh_config parser abilities
            paramiko_ssh_config = fabric_config_wrapper.base_ssh_config
            for hostname in paramiko_ssh_config.get_hostnames():
                ssh_info = paramiko_ssh_config.lookup(hostname)
                available_host_ip = ssh_info.get('hostname')
                if available_host_ip == required_host_ip:
                    process_owner = None
                    # make connection via fabric and send SIGINT for now
                    ssh_connection = Connection(hostname)
                    try:
                        #find owner of process https://unix.stackexchange.com/questions/284934/return-owner-of-process-given-pid
                        process_owner = ssh_connection.run('ps -o user= -p {0}'.format(worker_instance.pid))
                        # have permission to kill so this works without sudo
                        # need to plan for other cases
                        process_owner = process_owner.stdout.strip(' \n\t')
                        result_kill = ssh_connection.run('kill -{0} {1}'.format(2, worker_instance.pid), hide=True)
                        if result_kill.failed:
                            raise RQMonitorException("Some issue occured on running command {0.command!r} "
                                                     "on {0.connection.host}, we got stdout:\n{0.stdout}"
                                                     "and stderr:\n{0.stderr}".format(result_kill))
                    except UnexpectedExit as e:
                        stdout, stderr = e.streams_for_display()
                        # plan to accept password from user and proceed with sudo in future
                        if "Operation not permitted" in stderr.strip(' \n\t'):
                            raise RQMonitorException('Logged in user {0} does not have permission to kill worker'
                                                     ' process with pid {1} on {2} because it is owned '
                                                     ' by user {3}'.format(ssh_info.get('user'), worker_instance.pid,
                                                                           required_host_ip, process_owner))
                        raise RQMonitorException('Invoke\'s UnexpectedExit occurred with'
                                                 'stdout: {0}\nstderr: {1}\nresult: {2}\nreason {3}'.format(stdout.strip(' \n\t'),
                                                                                                            stderr.strip(' \n\t'),
                                                                                                            e.result, e.reason))
                    return
示例#39
0
def workers():
    """Show information on salactus workers. (slow)"""
    counter = Counter()
    for w in Worker.all(connection=worker.connection):
        for q in w.queues:
            counter[q.name] += 1
    import pprint
    pprint.pprint(dict(counter))
示例#40
0
def workers():
    """Show information on salactus workers. (slow)"""
    counter = Counter()
    for w in Worker.all(connection=worker.connection):
        for q in w.queues:
            counter[q.name] += 1
    import pprint
    pprint.pprint(dict(counter))
示例#41
0
def worker(worker_num, backend):
    import subprocess

    print('Worker %i started' % worker_num)
    if backend == 'pq':
        subprocess.call(
            'django-admin.py pqworker benchmark -b', shell=True)
    elif backend == 'rq':
        from rq.worker import Worker
        from redis import Redis
        from rq import Queue

        q = Queue('benchmark', connection=Redis())
        w = Worker(q, connection=Redis())
        w.work(burst=False)
    print('Worker %i fin' % worker_num)
    return
示例#42
0
文件: test_registry.py 项目: nvie/rq
    def test_requeue(self):
        """FailedJobRegistry.requeue works properly"""
        queue = Queue(connection=self.testconn)
        job = queue.enqueue(div_by_zero, failure_ttl=5)

        worker = Worker([queue])
        worker.work(burst=True)

        registry = FailedJobRegistry(connection=worker.connection)
        self.assertTrue(job in registry)

        registry.requeue(job.id)
        self.assertFalse(job in registry)
        self.assertIn(job.id, queue.get_job_ids())

        job.refresh()
        self.assertEqual(job.get_status(), JobStatus.QUEUED)

        worker.work(burst=True)
        self.assertTrue(job in registry)

        # Should also work with job instance
        registry.requeue(job)
        self.assertFalse(job in registry)
        self.assertIn(job.id, queue.get_job_ids())

        job.refresh()
        self.assertEqual(job.get_status(), JobStatus.QUEUED)

        worker.work(burst=True)
        self.assertTrue(job in registry)

        # requeue_job should work the same way
        requeue_job(job.id, connection=self.testconn)
        self.assertFalse(job in registry)
        self.assertIn(job.id, queue.get_job_ids())

        job.refresh()
        self.assertEqual(job.get_status(), JobStatus.QUEUED)

        worker.work(burst=True)
        self.assertTrue(job in registry)

        # And so does job.requeue()
        job.requeue()
        self.assertFalse(job in registry)
        self.assertIn(job.id, queue.get_job_ids())

        job.refresh()
        self.assertEqual(job.get_status(), JobStatus.QUEUED)
示例#43
0
    def test_jobs_are_put_in_registry(self):
        """Completed jobs are added to FinishedJobRegistry."""
        self.assertEqual(self.registry.get_job_ids(), [])
        queue = Queue(connection=self.testconn)
        worker = Worker([queue])

        # Completed jobs are put in FinishedJobRegistry
        job = queue.enqueue(say_hello)
        worker.perform_job(job, queue)
        self.assertEqual(self.registry.get_job_ids(), [job.id])

        # When job is deleted, it should be removed from FinishedJobRegistry
        self.assertEqual(job.get_status(), JobStatus.FINISHED)
        job.delete()
        self.assertEqual(self.registry.get_job_ids(), [])

        # Failed jobs are not put in FinishedJobRegistry
        failed_job = queue.enqueue(div_by_zero)
        worker.perform_job(failed_job, queue)
        self.assertEqual(self.registry.get_job_ids(), [])
示例#44
0
文件: utils.py 项目: ui/django-rq
def get_statistics():
    queues = []
    for index, config in enumerate(QUEUES_LIST):

        queue = get_queue_by_index(index)
        connection = queue.connection
        connection_kwargs = connection.connection_pool.connection_kwargs

        # Raw access to the first item from left of the redis list.
        # This might not be accurate since new job can be added from the left
        # with `at_front` parameters.
        # Ideally rq should supports Queue.oldest_job
        last_job_id = connection.lindex(queue.key, 0)
        last_job = queue.fetch_job(last_job_id.decode('utf-8')) if last_job_id else None
        if last_job:
            oldest_job_timestamp = to_localtime(last_job.enqueued_at)\
                .strftime('%Y-%m-%d, %H:%M:%S')
        else:
            oldest_job_timestamp = "-"

        # parse_class and connection_pool are not needed and not JSON serializable
        connection_kwargs.pop('parser_class', None)
        connection_kwargs.pop('connection_pool', None)

        queue_data = {
            'name': queue.name,
            'jobs': queue.count,
            'oldest_job_timestamp': oldest_job_timestamp,
            'index': index,
            'connection_kwargs': connection_kwargs
        }

        if queue.name == 'failed':
            queue_data['workers'] = '-'
            queue_data['finished_jobs'] = '-'
            queue_data['started_jobs'] = '-'
            queue_data['deferred_jobs'] = '-'

        else:
            connection = get_connection(queue.name)
            queue_data['workers'] = Worker.count(queue=queue)

            finished_job_registry = FinishedJobRegistry(queue.name, connection)
            started_job_registry = StartedJobRegistry(queue.name, connection)
            deferred_job_registry = DeferredJobRegistry(queue.name, connection)
            failed_job_registry = FailedJobRegistry(queue.name, connection)
            queue_data['finished_jobs'] = len(finished_job_registry)
            queue_data['started_jobs'] = len(started_job_registry)
            queue_data['deferred_jobs'] = len(deferred_job_registry)
            queue_data['failed_jobs'] = len(failed_job_registry)

        queues.append(queue_data)
    return {'queues': queues}
示例#45
0
文件: test_registry.py 项目: nvie/rq
    def test_worker_handle_job_failure(self):
        """Failed jobs are added to FailedJobRegistry"""
        q = Queue(connection=self.testconn)

        w = Worker([q])
        registry = FailedJobRegistry(connection=w.connection)

        timestamp = current_timestamp()

        job = q.enqueue(div_by_zero, failure_ttl=5)
        w.handle_job_failure(job)
        # job is added to FailedJobRegistry with default failure ttl
        self.assertIn(job.id, registry.get_job_ids())
        self.assertLess(self.testconn.zscore(registry.key, job.id),
                        timestamp + DEFAULT_FAILURE_TTL + 5)

        # job is added to FailedJobRegistry with specified ttl
        job = q.enqueue(div_by_zero, failure_ttl=5)
        w.handle_job_failure(job)
        self.assertLess(self.testconn.zscore(registry.key, job.id),
                        timestamp + 7)
示例#46
0
    def quantity(self):
        """
        Returns the aggregated number of tasks of the proc queues.
        """
        count = sum([client.count for client in self.clients])

        # Add any workers which are currently working jobs
        all_workers = Worker.all(connection=self.connection)
        for worker in all_workers:
            if worker.get_current_job():
                count += 1
        
        return count
示例#47
0
def workers(request, queue_index):
    queue_index = int(queue_index)
    queue = get_queue_by_index(queue_index)
    all_workers = Worker.all(queue.connection)
    workers = [worker for worker in all_workers
               if queue.name in worker.queue_names()]

    context_data = {
        'queue': queue,
        'queue_index': queue_index,
        'workers': workers,
    }
    return render(request, 'django_rq/workers.html', context_data)
示例#48
0
def remove_ghost_workers():
    if not OPTIONS.get('remove_ghost_workers', False):
        return

    if not redis_runs_on_same_machine():
        logger.warning('Cannot remove Ghost Workers, because the configured Redis Server is not running on localhost!')
        return

    setup_rq_connection()

    for w in Worker.all():
        if not worker_running(w):
            w.register_death()
示例#49
0
文件: test_cli.py 项目: nvie/rq
    def test_requeue(self):
        """rq requeue -u <url> --all"""
        connection = Redis.from_url(self.redis_url)
        queue = Queue('requeue', connection=connection)
        registry = queue.failed_job_registry

        runner = CliRunner()

        job = queue.enqueue(div_by_zero)
        job2 = queue.enqueue(div_by_zero)
        job3 = queue.enqueue(div_by_zero)

        worker = Worker([queue])
        worker.work(burst=True)

        self.assertIn(job, registry)
        self.assertIn(job2, registry)
        self.assertIn(job3, registry)

        result = runner.invoke(
            main,
            ['requeue', '-u', self.redis_url, '--queue', 'requeue', job.id]
        )
        self.assert_normal_execution(result)

        # Only the first specified job is requeued
        self.assertNotIn(job, registry)
        self.assertIn(job2, registry)
        self.assertIn(job3, registry)

        result = runner.invoke(
            main,
            ['requeue', '-u', self.redis_url, '--queue', 'requeue', '--all']
        )
        self.assert_normal_execution(result)
        # With --all flag, all failed jobs are requeued
        self.assertNotIn(job2, registry)
        self.assertNotIn(job3, registry)
示例#50
0
    def test_job_execution(self):
        """Job is removed from StartedJobRegistry after execution."""
        registry = self.conn.get_started_registry()
        queue = self.conn.mkqueue()
        worker = Worker([queue], connection=self.conn)

        job = queue.enqueue(say_hello)

        worker.prepare_job_execution(job)
        self.assertIn(job.id, registry.get_job_ids())

        worker.perform_job(job)
        self.assertNotIn(job.id, registry.get_job_ids())

        # Job that fails
        job = queue.enqueue(div_by_zero)

        worker.prepare_job_execution(job)
        self.assertIn(job.id, registry.get_job_ids())

        worker.perform_job(job)
        self.assertNotIn(job.id, registry.get_job_ids())
示例#51
0
文件: views.py 项目: ui/django-rq
def worker_details(request, queue_index, key):
    queue_index = int(queue_index)
    queue = get_queue_by_index(queue_index)
    worker = Worker.find_by_key(key, connection=queue.connection)
    # Convert microseconds to milliseconds
    worker.total_working_time = worker.total_working_time / 1000

    queue_names = ', '.join(worker.queue_names())

    context_data = {
        'queue': queue,
        'queue_index': queue_index,
        'worker': worker,
        'queue_names': queue_names,
        'job': worker.get_current_job(),
        'total_working_time': worker.total_working_time * 1000
    }
    return render(request, 'django_rq/worker_details.html', context_data)
示例#52
0
文件: test_cli.py 项目: nvie/rq
    def test_info_only_workers(self):
        """rq info -u <url> --only-workers (-W)"""
        runner = CliRunner()
        result = runner.invoke(main, ['info', '-u', self.redis_url, '--only-workers'])
        self.assert_normal_execution(result)
        self.assertIn('0 workers, 0 queue', result.output)

        queue = Queue(connection=self.connection)
        queue.enqueue(say_hello)
        result = runner.invoke(main, ['info', '-u', self.redis_url, '--only-workers'])
        self.assert_normal_execution(result)
        self.assertIn('0 workers, 1 queues', result.output)

        foo_queue = Queue(name='foo', connection=self.connection)
        foo_queue.enqueue(say_hello)

        bar_queue = Queue(name='bar', connection=self.connection)
        bar_queue.enqueue(say_hello)

        worker = Worker([foo_queue, bar_queue], connection=self.connection)
        worker.register_birth()

        worker_2 = Worker([foo_queue, bar_queue], connection=self.connection)
        worker_2.register_birth()
        worker_2.set_state(WorkerStatus.BUSY)

        result = runner.invoke(main, ['info', 'foo', 'bar',
                                      '-u', self.redis_url, '--only-workers'])

        self.assert_normal_execution(result)
        self.assertIn('2 workers, 2 queues', result.output)

        result = runner.invoke(main, ['info', 'foo', 'bar', '--by-queue',
                                      '-u', self.redis_url, '--only-workers'])

        self.assert_normal_execution(result)
        # Ensure both queues' workers are shown
        self.assertIn('foo:', result.output)
        self.assertIn('bar:', result.output)
        self.assertIn('2 workers, 2 queues', result.output)
示例#53
0
import os
import urlparse
from redis import Redis
from rq import Queue, Connection
from rq.worker import Worker

listen = ['high', 'default', 'low']

redis_url = os.getenv('REDISTOGO_URL', 'redis://localhost:6379')

urlparse.uses_netloc.append('redis')
url = urlparse.urlparse(redis_url)
conn = Redis(host=url.hostname, port=url.port, db=0, password=url.password)

if __name__ == '__main__':
    with Connection(conn):
        worker = Worker(map(Queue, listen))
        worker.work()
示例#54
0
 def workers(cls):
     cls.connect()
     remove_ghost_workers()
     return [{'name': w.name, 'key': w.key,
         'pid': w.pid, 'state': w.state, 'stopped': w.stopped,
         'queues': w.queue_names()} for w in Worker.all()]
示例#55
0
def GetWorksByQueueName(name, host=None, port=None):
    from rq.worker import Worker
    Q = GetQueue(name, host, port)
    w = Worker(Q, connection=default_redis)
    return w.all(connection=default_redis)