Ejemplo n.º 1
0
    def get(self, request):
        # Gather the version numbers from all installed Django apps
        installed_apps = {}
        for app_config in apps.get_app_configs():
            app = app_config.module
            version = getattr(app, "VERSION", getattr(app, "__version__",
                                                      None))
            if version:
                if type(version) is tuple:
                    version = ".".join(str(n) for n in version)
                installed_apps[app_config.name] = version
        installed_apps = {k: v for k, v in sorted(installed_apps.items())}

        return Response({
            "django-version":
            DJANGO_VERSION,
            "installed-apps":
            installed_apps,
            "peering-manager-version":
            settings.VERSION,
            "python-version":
            platform.python_version(),
            "rq-workers-running":
            Worker.count(get_connection("default")),
        })
Ejemplo n.º 2
0
    def post(self, request, pk):
        """
        Run a Script identified as "<module>.<script>" and return the pending JobResult as the result
        """
        script = self._get_script(pk)()
        input_serializer = serializers.ScriptInputSerializer(data=request.data)

        # Check that at least one RQ worker is running
        if not Worker.count(get_connection('default')):
            raise RQWorkerNotRunningException()

        if input_serializer.is_valid():
            data = input_serializer.data['data']
            commit = input_serializer.data['commit']

            script_content_type = ContentType.objects.get(app_label='extras',
                                                          model='script')
            job_result = JobResult.enqueue_job(
                run_script,
                script.full_name,
                script_content_type,
                request.user,
                data=data,
                request=copy_safe_request(request),
                commit=commit)
            script.result = job_result
            serializer = serializers.ScriptDetailSerializer(
                script, context={'request': request})

            return Response(serializer.data)

        return Response(input_serializer.errors,
                        status=status.HTTP_400_BAD_REQUEST)
Ejemplo n.º 3
0
 def register_all_dirty(self):
     """Register current TreeItem and all parent paths as dirty
     (should be called before RQ job adding)
     """
     r_con = get_connection()
     for p in self.all_pootle_paths():
         r_con.zincrby(POOTLE_DIRTY_TREEITEMS, p)
Ejemplo n.º 4
0
    def get(self, request):
        # Gather the version numbers from all installed Django apps
        installed_apps = {}
        for app_config in apps.get_app_configs():
            app = app_config.module
            version = getattr(app, 'VERSION', getattr(app, '__version__',
                                                      None))
            if version:
                if type(version) is tuple:
                    version = '.'.join(str(n) for n in version)
                installed_apps[app_config.name] = version
        installed_apps = {k: v for k, v in sorted(installed_apps.items())}

        # Gather installed plugins
        plugins = {}
        for plugin_name in settings.PLUGINS:
            plugin_name = plugin_name.rsplit('.', 1)[-1]
            plugin_config = apps.get_app_config(plugin_name)
            plugins[plugin_name] = getattr(plugin_config, 'version', None)
        plugins = {k: v for k, v in sorted(plugins.items())}

        return Response({
            'django-version':
            DJANGO_VERSION,
            'installed-apps':
            installed_apps,
            'netbox-version':
            settings.VERSION,
            'plugins':
            plugins,
            'python-version':
            platform.python_version(),
            'rq-workers-running':
            Worker.count(get_connection('default')),
        })
Ejemplo n.º 5
0
    def run(self, request, pk):
        """
        Run a Report identified as "<module>.<script>" and return the pending JobResult as the result
        """
        # Check that the user has permission to run reports.
        if not request.user.has_perm('extras.run_report'):
            raise PermissionDenied(
                "This user does not have permission to run reports.")

        # Check that at least one RQ worker is running
        if not Worker.count(get_connection('default')):
            raise RQWorkerNotRunningException()

        # Retrieve and run the Report. This will create a new JobResult.
        report = self._retrieve_report(pk)
        report_content_type = ContentType.objects.get(app_label='extras',
                                                      model='report')
        job_result = JobResult.enqueue_job(run_report, report.full_name,
                                           report_content_type, request.user)
        report.result = job_result

        serializer = serializers.ReportDetailSerializer(
            report, context={'request': request})

        return Response(serializer.data)
Ejemplo n.º 6
0
    def post(self, request, module, name):

        # Permissions check
        if not request.user.has_perm('extras.run_script'):
            return HttpResponseForbidden()

        script = self._get_script(name, module)
        form = script.as_form(request.POST, request.FILES)

        # Allow execution only if RQ worker process is running
        if not Worker.count(get_connection('default')):
            messages.error(request, "Unable to run script: RQ worker process not running.")

        elif form.is_valid():
            commit = form.cleaned_data.pop('_commit')

            script_content_type = ContentType.objects.get(app_label='extras', model='script')
            job_result = JobResult.enqueue_job(
                run_script,
                script.full_name,
                script_content_type,
                request.user,
                data=form.cleaned_data,
                request=copy_safe_request(request),
                commit=commit
            )

            return redirect('extras:script_result', job_result_pk=job_result.pk)

        return render(request, 'extras/script.html', {
            'module': module,
            'script': script,
            'form': form,
        })
Ejemplo n.º 7
0
    def post(self, request, module, name):

        # Permissions check
        if not request.user.has_perm('extras.run_report'):
            return HttpResponseForbidden()

        report = get_report(module, name)
        if report is None:
            raise Http404

        # Allow execution only if RQ worker process is running
        if not Worker.count(get_connection('default')):
            messages.error(
                request,
                "Unable to run report: RQ worker process not running.")
            return render(request, 'extras/report.html', {
                'report': report,
            })

        # Run the Report. A new JobResult is created.
        report_content_type = ContentType.objects.get(app_label='extras',
                                                      model='report')
        job_result = JobResult.enqueue_job(run_report, report.full_name,
                                           report_content_type, request.user)

        return redirect('extras:report_result', job_result_pk=job_result.pk)
Ejemplo n.º 8
0
def enqueue_task(index, func, *args, **kwargs):
    '''Adds function to RQ with tracking by index

    If index is not None, then a Redis list will manage the queued/active jobs
    related to the unique index. Jobs will be removed immediately upon completion.

    Args:
        index (str): Unique key to track queued jobs by
        func (callable): Function to be queued for background worker
        *args (list): List of parameters for func
        **kwargs (dict): Dictionary of parameters for func

    Returns:
        int: Position in queue for the given index; Defaults to None
    '''
    queue = kwargs.pop('queue', 'default')
    redis_queue = django_rq.get_queue(queue)
    # Queue the job like normal
    job = redis_queue.enqueue(func, *args, **kwargs)

    job_position = None
    # If index is None, do the queuing and return, no need to track.
    if index is not None:
        conn = get_connection(use_strict_redis=True)

        # Add the job to the list for the index
        job_position = conn.lpush(index, job.get_id())
        del conn

        # Enqueue the follow up to clean the queue when work is completed
        redis_queue.enqueue(cleanup_task, index, job.get_id(), depends_on=job)

    return job
Ejemplo n.º 9
0
    def cancel_optimization(self, request, public_id=None):
        transport_network_obj = self.get_object()
        if transport_network_obj.optimization_status in [
                TransportNetwork.STATUS_ERROR, TransportNetwork.STATUS_FINISHED
        ]:
            raise ValidationError('Optimization is not running or queued')

        redis_conn = get_connection()
        workers = Worker.all(redis_conn)
        for worker in workers:
            if worker.state == WorkerStatus.BUSY and \
                    worker.get_current_job_id() == str(transport_network_obj.job_id):
                send_kill_horse_command(redis_conn, worker.name)

        # remove from queue
        cancel_job(str(transport_network_obj.job_id), connection=redis_conn)

        transport_network_obj.optimization_status = None
        transport_network_obj.optimization_ran_at = None
        transport_network_obj.optimization_error_message = None
        transport_network_obj.save()

        return Response(
            TransportNetworkSerializer(transport_network_obj).data,
            status.HTTP_200_OK)
Ejemplo n.º 10
0
    def test_get_connection_sentinel(self, sentinel_class_mock):
        """
        Test that get_connection returns the right connection based for
        `sentinel` queue.
        """
        sentinel_mock = MagicMock()
        sentinel_mock.master_for.return_value = sentinel_mock
        sentinel_class_mock.side_effect = [sentinel_mock]

        config = QUEUES['sentinel']
        connection = get_connection('sentinel')

        self.assertEqual(connection, sentinel_mock)
        sentinel_class_mock.assert_called_once()
        sentinel_mock.master_for.assert_called_once()

        sentinel_instances = sentinel_class_mock.call_args[0][0]
        self.assertListEqual(config['SENTINELS'], sentinel_instances)

        connection_kwargs = sentinel_mock.master_for.call_args[1]
        self.assertEqual(connection_kwargs['service_name'],
                         config['MASTER_NAME'])
        self.assertEqual(connection_kwargs['db'], config['DB'])
        self.assertEqual(connection_kwargs['password'], config['PASSWORD'])
        self.assertEqual(connection_kwargs['socket_timeout'],
                         config['SOCKET_TIMEOUT'])
Ejemplo n.º 11
0
def get_worker_count(request=None):
    """
    Return a count of the active Celery workers.
    """
    # Inner imports so we don't risk circular imports
    from nautobot.core.celery import app  # noqa
    from rq.worker import Worker  # noqa
    from django_rq.queues import get_connection  # noqa

    # Try RQ first since, it's faster.
    rq_count = Worker.count(get_connection("default"))

    # Celery next, since it's slower.
    inspect = app.control.inspect()
    active = inspect.active()  # None if no active workers
    celery_count = len(active) if active is not None else 0

    if rq_count and not celery_count:
        if request:
            messages.warning(
                request,
                "RQ workers are deprecated. Please migrate your workers to Celery."
            )

    return celery_count
Ejemplo n.º 12
0
 def register_all_dirty(self):
     """Register current TreeItem and all parent paths as dirty
     (should be called before RQ job adding)
     """
     r_con = get_connection()
     for p in self.all_pootle_paths():
         r_con.zincrby(POOTLE_DIRTY_TREEITEMS, p)
Ejemplo n.º 13
0
 def unregister_all_dirty(self):
     """Unregister current TreeItem and all parent paths as dirty
     (should be called from RQ job procedure after cache is updated)
     """
     r_con = get_connection()
     for p in self.all_pootle_paths():
         r_con.zincrby(POOTLE_DIRTY_TREEITEMS, p, -1)
Ejemplo n.º 14
0
    def get(self, request):
        # Gather the version numbers from all installed Django apps
        installed_apps = {}
        for app_config in apps.get_app_configs():
            app = app_config.module
            version = getattr(app, "VERSION", getattr(app, "__version__",
                                                      None))
            if version:
                if type(version) is tuple:
                    version = ".".join(str(n) for n in version)
                installed_apps[app_config.name] = version
        installed_apps = {k: v for k, v in sorted(installed_apps.items())}

        # Gather installed plugins
        plugins = {}
        for plugin_name in settings.PLUGINS:
            plugin_name = plugin_name.rsplit(".", 1)[-1]
            plugin_config = apps.get_app_config(plugin_name)
            plugins[plugin_name] = getattr(plugin_config, "version", None)
        plugins = {k: v for k, v in sorted(plugins.items())}

        return Response({
            "django-version":
            DJANGO_VERSION,
            "installed-apps":
            installed_apps,
            "nautobot-version":
            settings.VERSION,
            "plugins":
            plugins,
            "python-version":
            platform.python_version(),
            "rq-workers-running":
            Worker.count(get_connection("default")),
        })
Ejemplo n.º 15
0
 def handle(self, *args, **options):
     try:
         queues = [Queue(name, connection=get_connection(name)) for name in settings.QUEUES]
         for queue in queues:
             w = Worker([queue], connection=queue.connection)
             w.work(burst=settings.BURST)
     except ConnectionError as e:
         print(e)
Ejemplo n.º 16
0
 def unregister_dirty(self, decrement=1):
     """Unregister current TreeItem as dirty
     (should be called from RQ job procedure after cache is updated)
     """
     r_con = get_connection()
     job = get_current_job()
     logger.debug('UNREGISTER %s (-%s) where job_id=%s' %
                  (self.get_cachekey(), decrement, job.id))
     r_con.zincrby(POOTLE_DIRTY_TREEITEMS, self.get_cachekey(), 0 - decrement)
Ejemplo n.º 17
0
 def unregister_all_dirty(self, decrement=1):
     """Unregister current TreeItem and all parent paths as dirty
     (should be called from RQ job procedure after cache is updated)
     """
     r_con = get_connection()
     job = get_current_job()
     for p in self.all_pootle_paths():
         logger.debug('UNREGISTER %s (-%s) where job_id=%s' %
                      (p, decrement, job.id))
         r_con.zincrby(POOTLE_DIRTY_TREEITEMS, p, 0 - decrement)
Ejemplo n.º 18
0
def edit_scheduling_job(request, job_id):
    import django_rq
    connection = get_connection()
    job = Job.fetch(job_id, connection=connection)
    queue = django_rq.get_queue('default')
    scheduler = django_rq.get_scheduler('default')
    job = queue.fetch_job(job_id)

    with scheduler.connection._pipeline() as pipe:
        pipe.watch(scheduler.scheduled_jobs_key)
        _unixtime = pipe.zscore(scheduler.scheduled_jobs_key, job.id)
        if _unixtime is not None:
            _time = datetime.datetime.fromtimestamp(_unixtime)
        else:
            _time = None

    if request.method == 'POST':
        form = JobSchedulingForm(request.POST)
        if form.is_valid():
            _next_time = form.cleaned_data['next_start']
            _next_unixtime = _next_time.timestamp()

            new_repeat = form.cleaned_data['repeat'] * 60
            new_timeout = form.cleaned_data['timeout'] * 60

            if _next_unixtime != _unixtime:
                with scheduler.connection._pipeline() as pipe:
                    pipe.watch(scheduler.scheduled_jobs_key)
                    pipe.zadd(scheduler.scheduled_jobs_key, _next_unixtime,
                              job.id)
            if new_repeat != job.meta.get('interval'):
                job.meta['interval'] = new_repeat
                job.save()
            if new_timeout != job.timeout:
                job.timeout = new_timeout
                job.save()
            return redirect(reverse('scheduler_edit_scheduling',
                                    args=[job.id]))
    else:
        form = JobSchedulingForm(
            initial={
                'next_start': _time,
                'repeat': int(job.meta.get('interval') / 60),
                'timeout': int(job.timeout / 60),
            })

    context_data = {
        'scheduler': scheduler,
        'job': job,
        'time': _time,
        'form': form,
        'timeout_min': int(job.timeout / 60),
        'interval_min': int(job.meta.get('interval') / 60),
    }
    return render(request, 'scheduler/edit_scheduling.html', context_data)
Ejemplo n.º 19
0
def scheduler_perform_job(request, job_id):
    import django_rq
    connection = get_connection()
    job = Job.fetch(job_id, connection=connection)
    queue = django_rq.get_queue('default')
    queue.enqueue_call(
        func=job.func_name,
        args=job.args,
        kwargs=job.kwargs,
    )
    return redirect(reverse('scheduler_home'))
Ejemplo n.º 20
0
 def test_get_connection_default(self):
     """
     Test that get_connection returns the right connection based for
     `defaut` queue.
     """
     config = CONNECTIONS['default']
     connection = get_connection()
     connection_kwargs = connection.connection_pool.connection_kwargs
     self.assertEqual(connection_kwargs['host'], config['HOST'])
     self.assertEqual(connection_kwargs['port'], config['PORT'])
     self.assertEqual(connection_kwargs['db'], config['DB'])
Ejemplo n.º 21
0
 def test_get_connection_test(self):
     """
     Test that get_connection returns the right connection based for
     `test` queue.
     """
     config = QUEUES["test"]
     connection = get_connection("test")
     connection_kwargs = connection.connection_pool.connection_kwargs
     self.assertEqual(connection_kwargs["host"], config["HOST"])
     self.assertEqual(connection_kwargs["port"], config["PORT"])
     self.assertEqual(connection_kwargs["db"], config["DB"])
Ejemplo n.º 22
0
 def test_get_connection_test(self):
     """
     Test that get_connection returns the right connection based for
     `test` queue.
     """
     config = QUEUES['test']
     connection = get_connection('test')
     connection_kwargs = connection.connection_pool.connection_kwargs
     self.assertEqual(connection_kwargs['host'], config['HOST'])
     self.assertEqual(connection_kwargs['port'], config['PORT'])
     self.assertEqual(connection_kwargs['db'], config['DB'])
Ejemplo n.º 23
0
 def test_get_connection_test(self):
     """
     Test that get_connection returns the right connection based for
     `test` queue.
     """
     config = QUEUES['test']
     connection = get_connection('test')
     connection_kwargs = connection.connection_pool.connection_kwargs
     self.assertEqual(connection_kwargs['host'], config['HOST'])
     self.assertEqual(connection_kwargs['port'], config['PORT'])
     self.assertEqual(connection_kwargs['db'], config['DB'])
Ejemplo n.º 24
0
 def unregister_dirty(self, decrement=1):
     """Unregister current TreeItem as dirty
     (should be called from RQ job procedure after cache is updated)
     """
     r_con = get_connection()
     job = get_current_job()
     if job:
         logger.debug("UNREGISTER %s (-%s) where job_id=%s", self.cache_key,
                      decrement, job.id)
     else:
         logger.debug("UNREGISTER %s (-%s)", self.cache_key, decrement)
     r_con.zincrby(KEY_DIRTY_TREEITEMS, 0 - decrement, self.cache_key)
Ejemplo n.º 25
0
def cleanup_task(index, job_id):
    '''Follow-up job to clean up Redis job tracker

    Args:
        index (str): Unique key where the specified job_id is located
        job_id (str): Job UUID which should be dequeued
    '''
    conn = get_connection(use_strict_redis=True)

    # Remove the first instance of the job
    conn.lrem(index, 1, job_id)
    del conn
Ejemplo n.º 26
0
def info(request, token):
    """
    Return the HireFire json data needed to scale worker dynos
    """
    if not settings.HIREFIRE_TOKEN:
        return HttpResponseBadRequest('Hirefire not configured.  Set the HIREFIRE_TOKEN environment variable on the app to use Hirefire for dyno scaling')
    if token != settings.HIREFIRE_TOKEN:
        return HttpResponseForbidden('Invalid token')

    current_tasks = 0

    queues = []
    for index, config in enumerate(QUEUES_LIST):

        queue = get_queue_by_index(index)
        connection = queue.connection

        # Only look at the default queue
        if queue.name != 'default':
            continue

        queue_data = {
            'name': queue.name,
            'jobs': queue.count,
            'index': index,
            'connection_kwargs': connection.connection_pool.connection_kwargs
        }

        connection = get_connection(queue.name)
        all_workers = Worker.all(connection=connection)
        queue_workers = [worker for worker in all_workers if queue in worker.queues]
        queue_data['workers'] = len(queue_workers)

        finished_job_registry = FinishedJobRegistry(queue.name, connection)
        started_job_registry = StartedJobRegistry(queue.name, connection)
        deferred_job_registry = DeferredJobRegistry(queue.name, connection)
        queue_data['finished_jobs'] = len(finished_job_registry)
        queue_data['started_jobs'] = len(started_job_registry)
        queue_data['deferred_jobs'] = len(deferred_job_registry)

        current_tasks += queue_data['jobs']
        current_tasks += queue_data['started_jobs']

        queues.append(queue_data)

    payload = [{
        'quantity': current_tasks,
        'name': 'worker',
    }]

    payload = json.dumps(payload)
    return HttpResponse(payload, content_type='application/json')
Ejemplo n.º 27
0
def test_azure_sync_resource_list_create(
        get_subscription_and_session, get_resource_by_id, get_resources_list,
        mock_response_class, json_file, subscription, resource_group,
        require_resource_types, django_rq_worker):
    """Check sync Multiple Azure Resource - Create"""

    subscription_id = subscription.subscription_id

    data_resource_list = json_file("resource-list.json")
    data_resource = json_file("resource-vm.json")

    count = len(data_resource_list['value'])
    get_subscription_and_session.return_value = (subscription,
                                                 requests.Session())
    get_resources_list.return_value = data_resource_list['value']
    get_resource_by_id.return_value = data_resource

    job = django_rq.enqueue('mce_tasks_rq.azure.sync_resource_list',
                            args=[subscription_id])
    django_rq_worker.work()

    assert job.get_status() == JobStatus.FINISHED
    """
    print('!!! job._dependency_ids : ', job._dependency_ids)
    print('!!! job._dependency_id : ', job._dependency_id)
    print('!!! job.dependency : ', job.dependency)
    """
    # , connection=queue.connection
    assert "jobs_ids" in job.result
    assert len(job.result["jobs_ids"]) == 1

    connection = get_connection('default')

    job_id = job.result["jobs_ids"][0]
    job = Job.fetch(job_id, connection=connection)
    """
    print('!!! job._dependency_ids : ', job._dependency_ids)
    print('!!! job._dependency_id : ', job._dependency_id)
    print('!!! job.dependency : ', job.dependency)
    !!! job._dependency_ids :  ['5531ce25-058f-41e9-8262-04905135201e']
    !!! job._dependency_id :  5531ce25-058f-41e9-8262-04905135201e
    !!! job.dependency :  <Job 5531ce25-058f-41e9-8262-04905135201e: mce_tasks_rq.azure.sync_resource_list('00000000-0000-0000-0000-000000000000')>

    """

    assert job.func_name == "mce_tasks_rq.azure.sync_resource"

    assert job.result == dict(
        pk=ResourceAzure.objects.first().pk,
        created=True,
        changes=None,
    )
Ejemplo n.º 28
0
    def get_context_data(self, *args, **kwargs):
        context = super(QueuedJobsMixin, self).get_context_data(*args, **kwargs)

        # Add a list of job items currently active for the object
        obj = self.object
        lname = generate_object_index(obj)

        # Retrieve a list of jobs
        conn = get_connection()
        queue = django_rq.get_queue()
        context['queued_jobs'] = [queue.fetch_job(job_id) for job_id in conn.lrange(lname, 0, 10)]

        return context
Ejemplo n.º 29
0
    def sync(self, request, pk):
        """
        Enqueue pull git repository and refresh data.
        """
        if not request.user.has_perm("extras.change_gitrepository"):
            raise PermissionDenied("This user does not have permission to make changes to Git repositories.")

        if not Worker.count(get_connection("default")):
            raise RQWorkerNotRunningException()

        repository = get_object_or_404(GitRepository, id=pk)
        enqueue_pull_git_repository_and_refresh_data(repository, request)
        return Response({"message": f"Repository {repository} sync job added to queue."})
Ejemplo n.º 30
0
def get_queue_index(name='default'):
    """
    Returns the position of Queue for the named queue in QUEUES_LIST
    """
    queue_index = None
    connection = get_connection(name)
    connection_kwargs = connection.connection_pool.connection_kwargs
    for i in range(0, 100):
        q = get_queue_by_index(i)
        if q.name == name and q.connection.connection_pool.connection_kwargs == connection_kwargs:
            queue_index = i
            break
    return queue_index
Ejemplo n.º 31
0
def delete_job(job_id):
    redis_conn = get_connection()
    workers = Worker.all(redis_conn)
    for worker in workers:
        if worker.state == WorkerStatus.BUSY and \
                worker.get_current_job_id() == str(job_id):
            send_kill_horse_command(redis_conn, worker.name)

    try:
        # remove from queue
        cancel_job(str(job_id), connection=redis_conn)
    except NoSuchJobError:
        pass
Ejemplo n.º 32
0
 def test_get_scheduler(self):
     """
     Ensure get_scheduler creates a scheduler instance with the right
     connection params for `test` queue.
     """
     connection = get_connection('test')
     config = QUEUES['test']
     scheduler = get_scheduler(connection, 'test')
     connection_kwargs = scheduler.connection.connection_pool.connection_kwargs
     self.assertEqual(scheduler.queue_name, 'test')
     self.assertEqual(connection_kwargs['host'], config['HOST'])
     self.assertEqual(connection_kwargs['port'], config['PORT'])
     self.assertEqual(connection_kwargs['db'], config['DB'])
Ejemplo n.º 33
0
def info(request, token):
    """
    Return the HireFire json data needed to scale worker dynos
    """
    if not settings.HIREFIRE_TOKEN:
        return HttpResponseBadRequest(
            "Hirefire not configured.  Set the HIREFIRE_TOKEN environment variable on the app to use Hirefire for dyno scaling"
        )
    if token != settings.HIREFIRE_TOKEN:
        raise PermissionDenied("Invalid token")

    current_tasks = 0

    queues = []
    for index, config in enumerate(QUEUES_LIST):

        queue = get_queue_by_index(index)
        connection = queue.connection

        # Only look at the default queue
        if queue.name != "default":
            continue

        queue_data = {
            "name": queue.name,
            "jobs": queue.count,
            "index": index,
            "connection_kwargs": connection.connection_pool.connection_kwargs,
        }

        connection = get_connection(queue.name)
        all_workers = Worker.all(connection=connection)
        queue_workers = [worker for worker in all_workers if queue in worker.queues]
        queue_data["workers"] = len(queue_workers)

        finished_job_registry = FinishedJobRegistry(queue.name, connection)
        started_job_registry = StartedJobRegistry(queue.name, connection)
        deferred_job_registry = DeferredJobRegistry(queue.name, connection)
        queue_data["finished_jobs"] = len(finished_job_registry)
        queue_data["started_jobs"] = len(started_job_registry)
        queue_data["deferred_jobs"] = len(deferred_job_registry)

        current_tasks += queue_data["jobs"]
        current_tasks += queue_data["started_jobs"]

        queues.append(queue_data)

    payload = [{"quantity": current_tasks, "name": "worker"}]

    payload = json.dumps(payload)
    return HttpResponse(payload, content_type="application/json")
Ejemplo n.º 34
0
    def is_being_refreshed(self):
        """Checks if current TreeItem is being refreshed"""
        r_con = get_connection()
        path = r_con.get(POOTLE_REFRESH_STATS)

        if path is not None:
            if path == '/':
                return True

            lang, prj, dir, file = split_pootle_path(path)
            key = self.get_cachekey()

            return key in path or path in key or key in '/projects/%s/' % prj

        return False
Ejemplo n.º 35
0
    def is_being_refreshed(self):
        """Checks if current TreeItem is being refreshed"""
        r_con = get_connection()
        path = r_con.get(POOTLE_REFRESH_STATS)

        if path is None:
            return False

        if path == '/':
            return True

        proj_code = split_pootle_path(path)[1]
        key = self.cache_key

        return key in path or path in key or key in '/projects/%s/' % proj_code
Ejemplo n.º 36
0
def get_failed_queue_index(name="default"):
    """
    Returns the position of FailedQueue for the named queue in QUEUES_LIST
    """
    # Get the index of FailedQueue for 'default' Queue in QUEUES_LIST
    queue_index = None
    connection = get_connection(name)
    connection_kwargs = connection.connection_pool.connection_kwargs
    for i in range(0, 100):
        q = get_queue_by_index(i)
        if q.name == "failed" and q.connection.connection_pool.connection_kwargs == connection_kwargs:
            queue_index = i
            break

    return queue_index
Ejemplo n.º 37
0
    def is_being_refreshed(self):
        """Checks if current TreeItem is being refreshed"""
        r_con = get_connection()
        path = r_con.get(POOTLE_REFRESH_STATS)

        if path is not None:
            if path == '/':
                return True

            lang, prj, dir, file = split_pootle_path(path)
            key = self.get_cachekey()

            return key in path or path in key or key in '/projects/%s/' % prj

        return False
Ejemplo n.º 38
0
def stats(request):
    queues = []
    for index, config in enumerate(QUEUES_LIST):
        queue = get_queue_by_index(index)
        queue_data = {"name": queue.name, "jobs": queue.count, "index": index}
        if queue.name == "failed":
            queue_data["workers"] = "-"
        else:
            connection = get_connection(queue.name)
            all_workers = Worker.all(connection=connection)
            queue_workers = [worker for worker in all_workers if queue in worker.queues]
            queue_data["workers"] = len(queue_workers)
        queues.append(queue_data)

    context_data = {"queues": queues}
    return render(request, "django_rq/stats.html", context_data)
Ejemplo n.º 39
0
    def test_get_connection_sentinel(self, sentinel_class_mock):
        """
        Test that get_connection returns the right connection based for
        `sentinel` queue.
        """
        sentinel_mock = MagicMock()
        sentinel_mock.master_for.return_value = sentinel_mock
        sentinel_class_mock.side_effect = [sentinel_mock]

        config = QUEUES['sentinel']
        connection = get_connection('sentinel')

        self.assertEqual(connection, sentinel_mock)
        sentinel_class_mock.assert_called_once()
        sentinel_mock.master_for.assert_called_once()

        sentinel_instances = sentinel_class_mock.call_args[0][0]
        self.assertListEqual(config['SENTINELS'], sentinel_instances)

        connection_kwargs = sentinel_mock.master_for.call_args[1]
        self.assertEqual(connection_kwargs['service_name'],
                         config['MASTER_NAME'])
Ejemplo n.º 40
0
def queues(request):
    queues = []
    for index, config in enumerate(QUEUES_LIST):
        queue = get_queue_by_index(index)
        connection = queue.connection

        queue_data = {
            "name": queue.name,
            "jobs": queue.count,
            "index": index,
            "connection_kwargs": connection.connection_pool.connection_kwargs,
        }

        if queue.name == "failed":
            queue_data["workers"] = "-"
            queue_data["finished_jobs"] = "-"
            queue_data["started_jobs"] = "-"
            queue_data["deferred_jobs"] = "-"

        else:
            connection = get_connection(queue.name)
            all_workers = Worker.all(connection=connection)
            queue_workers = [worker for worker in all_workers if queue in worker.queues]
            queue_data["workers"] = len(queue_workers)

            finished_job_registry = FinishedJobRegistry(queue.name, connection)
            started_job_registry = StartedJobRegistry(queue.name, connection)
            deferred_job_registry = DeferredJobRegistry(queue.name, connection)
            queue_data["finished_jobs"] = len(finished_job_registry)
            queue_data["started_jobs"] = len(started_job_registry)
            queue_data["deferred_jobs"] = len(deferred_job_registry)

        queues.append(queue_data)

    context_data = {"title": "RQ Queues", "queues": queues}
    return render(request, "django_rq/stats.html", context_data)
Ejemplo n.º 41
0
 def unregister_dirty(self):
     """Unregister current TreeItem as dirty
     (should be called from RQ job procedure after cache is updated)
     """
     r_con = get_connection()
     r_con.zincrby(POOTLE_DIRTY_TREEITEMS, self.get_cachekey(), -1)
Ejemplo n.º 42
0
# pootle shell
from django_rq.queues import get_connection

POOTLE_DIRTY_TREEITEMS = 'pootle:dirty:treeitems'
c = get_connection()
keys = c.zrangebyscore(POOTLE_DIRTY_TREEITEMS, 1, 1000000)
updates = {k: 0.0 for k in keys}
c.zadd(POOTLE_DIRTY_TREEITEMS, **updates)
Ejemplo n.º 43
0
    return get_redis_connection(QUEUES_LIST[index]["connection_config"])


def get_queue(name="default", default_timeout=None, async=None, autocommit=None):
    """
    Returns an rq Queue using parameters defined in ``RQ_QUEUES``
    """
    from .settings import QUEUES

    # If async is provided, use it, otherwise, get it from the configuration
    if async is None:
        async = QUEUES[name].get("ASYNC", True)

    return DjangoRQ(
        name, default_timeout=default_timeout, connection=get_connection(name), async=async, autocommit=autocommit
    )


def get_queue_by_index(index):
    """
    Returns an rq Queue using parameters defined in ``QUEUES_LIST``
    """
    from .settings import QUEUES_LIST

    config = QUEUES_LIST[int(index)]
    if config["name"] == "failed":
        return FailedQueue(connection=get_redis_connection(config["connection_config"]))
    return DjangoRQ(
        config["name"], connection=get_redis_connection(config["connection_config"]), async=config.get("ASYNC", True)
    )
Ejemplo n.º 44
0
def get_failed_queue(name="default"):
    """
    Returns the rq failed Queue using parameters defined in ``RQ_QUEUES``
    """
    return FailedQueue(connection=get_connection(name))
Ejemplo n.º 45
0
 def get_scheduler(name="default", interval=60):
     """
     Returns an RQ Scheduler instance using parameters defined in
     ``RQ_QUEUES``
     """
     return Scheduler(name, interval=interval, connection=get_connection(name))
Ejemplo n.º 46
0
 def get_dirty_score(self):
     r_con = get_connection()
     return r_con.zscore(POOTLE_DIRTY_TREEITEMS, self.get_cachekey())
import pytest
from django_rq.queues import get_connection
import redis

try:
    get_connection().ping()
except redis.exceptions.ConnectionError:
    cant_connect = True
else:
    cant_connect = False


rq_redis = pytest.mark.skipif(cant_connect, reason="can't connect to Redis")
Ejemplo n.º 48
0
 def count_queue_workers(self, queue):
     connection = queues.get_connection(queue.name)
     workers = Worker.all(connection=connection)
     return len([_ for _ in workers if queue in _.queues])