def get(self, request): # Gather the version numbers from all installed Django apps installed_apps = {} for app_config in apps.get_app_configs(): app = app_config.module version = getattr(app, "VERSION", getattr(app, "__version__", None)) if version: if isinstance(version, tuple): version = ".".join(str(n) for n in version) installed_apps[app_config.name] = version installed_apps = {k: v for k, v in sorted(installed_apps.items())} # Gather installed plugins plugins = {} for plugin_name in settings.PLUGINS: plugin_name = plugin_name.rsplit(".", 1)[-1] plugin_config = apps.get_app_config(plugin_name) plugins[plugin_name] = getattr(plugin_config, "version", None) plugins = {k: v for k, v in sorted(plugins.items())} # Gather Celery workers workers = celery_app.control.inspect().active() # list or None worker_count = len(workers) if workers is not None else 0 return Response( { "django-version": DJANGO_VERSION, "installed-apps": installed_apps, "nautobot-version": settings.VERSION, "plugins": plugins, "python-version": platform.python_version(), "rq-workers-running": RQWorker.count(get_rq_connection("default")), "celery-workers-running": worker_count, } )
def get(self, request): # Gather the version numbers from all installed Django apps installed_apps = {} for app_config in apps.get_app_configs(): app = app_config.module version = getattr(app, 'VERSION', getattr(app, '__version__', None)) if version: if type(version) is tuple: version = '.'.join(str(n) for n in version) installed_apps[app_config.name] = version installed_apps = {k: v for k, v in sorted(installed_apps.items())} # Gather installed plugins plugins = {} for plugin_name in settings.PLUGINS: plugin_name = plugin_name.rsplit('.', 1)[-1] plugin_config = apps.get_app_config(plugin_name) plugins[plugin_name] = getattr(plugin_config, 'version', None) plugins = {k: v for k, v in sorted(plugins.items())} return Response({ 'django-version': DJANGO_VERSION, 'installed-apps': installed_apps, 'netbox-version': settings.VERSION, 'plugins': plugins, 'python-version': platform.python_version(), 'rq-workers-running': Worker.count(get_connection('default')), })
def get(self, request): # Gather the version numbers from all installed Django apps installed_apps = {} for app_config in apps.get_app_configs(): app = app_config.module version = getattr(app, "VERSION", getattr(app, "__version__", None)) if version: if type(version) is tuple: version = ".".join(str(n) for n in version) installed_apps[app_config.name] = version installed_apps = {k: v for k, v in sorted(installed_apps.items())} return Response({ "django-version": DJANGO_VERSION, "installed-apps": installed_apps, "peering-manager-version": settings.VERSION, "python-version": platform.python_version(), "rq-workers-running": Worker.count(get_connection("default")), })
def get(self, request): # Gather the version numbers from all installed Django apps installed_apps = {} for app_config in apps.get_app_configs(): app = app_config.module version = getattr(app, "VERSION", getattr(app, "__version__", None)) if version: if type(version) is tuple: version = ".".join(str(n) for n in version) installed_apps[app_config.name] = version installed_apps = {k: v for k, v in sorted(installed_apps.items())} # Gather installed plugins plugins = {} for plugin_name in settings.PLUGINS: plugin_name = plugin_name.rsplit(".", 1)[-1] plugin_config = apps.get_app_config(plugin_name) plugins[plugin_name] = getattr(plugin_config, "version", None) plugins = {k: v for k, v in sorted(plugins.items())} return Response({ "django-version": DJANGO_VERSION, "installed-apps": installed_apps, "nautobot-version": settings.VERSION, "plugins": plugins, "python-version": platform.python_version(), "rq-workers-running": Worker.count(get_connection("default")), })
def get_worker_count(request=None): """ Return a count of the active Celery workers. """ # Inner imports so we don't risk circular imports from nautobot.core.celery import app # noqa from rq.worker import Worker # noqa from django_rq.queues import get_connection # noqa # Try RQ first since, it's faster. rq_count = Worker.count(get_connection("default")) # Celery next, since it's slower. inspect = app.control.inspect() active = inspect.active() # None if no active workers celery_count = len(active) if active is not None else 0 if rq_count and not celery_count: if request: messages.warning( request, "RQ workers are deprecated. Please migrate your workers to Celery." ) return celery_count
def get_statistics(): queues = [] for index, config in enumerate(QUEUES_LIST): queue = get_queue_by_index(index) connection = queue.connection connection_kwargs = connection.connection_pool.connection_kwargs # Raw access to the first item from left of the redis list. # This might not be accurate since new job can be added from the left # with `at_front` parameters. # Ideally rq should supports Queue.oldest_job last_job_id = connection.lindex(queue.key, 0) last_job = queue.fetch_job( last_job_id.decode('utf-8')) if last_job_id else None if last_job: oldest_job_timestamp = to_localtime(last_job.enqueued_at)\ .strftime('%Y-%m-%d, %H:%M:%S') else: oldest_job_timestamp = "-" # parse_class and connection_pool are not needed and not JSON serializable connection_kwargs.pop('parser_class', None) connection_kwargs.pop('connection_pool', None) queue_data = { 'name': queue.name, 'jobs': queue.count, 'oldest_job_timestamp': oldest_job_timestamp, 'index': index, 'connection_kwargs': connection_kwargs } if queue.name == 'failed': queue_data['workers'] = '-' queue_data['finished_jobs'] = '-' queue_data['started_jobs'] = '-' queue_data['deferred_jobs'] = '-' else: connection = get_connection(queue.name) queue_data['workers'] = Worker.count(queue=queue) finished_job_registry = FinishedJobRegistry(queue.name, connection) started_job_registry = StartedJobRegistry(queue.name, connection) deferred_job_registry = DeferredJobRegistry(queue.name, connection) failed_job_registry = FailedJobRegistry(queue.name, connection) queue_data['finished_jobs'] = len(finished_job_registry) queue_data['started_jobs'] = len(started_job_registry) queue_data['deferred_jobs'] = len(deferred_job_registry) queue_data['failed_jobs'] = len(failed_job_registry) queues.append(queue_data) return {'queues': queues}
def get_statistics(): queues = [] for index, config in enumerate(QUEUES_LIST): queue = get_queue_by_index(index) connection = queue.connection connection_kwargs = connection.connection_pool.connection_kwargs # Raw access to the first item from left of the redis list. # This might not be accurate since new job can be added from the left # with `at_front` parameters. # Ideally rq should supports Queue.oldest_job last_job_id = connection.lindex(queue.key, 0) last_job = queue.fetch_job(last_job_id.decode('utf-8')) if last_job_id else None if last_job: oldest_job_timestamp = to_localtime(last_job.enqueued_at)\ .strftime('%Y-%m-%d, %H:%M:%S') else: oldest_job_timestamp = "-" # parse_class and connection_pool are not needed and not JSON serializable connection_kwargs.pop('parser_class', None) connection_kwargs.pop('connection_pool', None) queue_data = { 'name': queue.name, 'jobs': queue.count, 'oldest_job_timestamp': oldest_job_timestamp, 'index': index, 'connection_kwargs': connection_kwargs } if queue.name == 'failed': queue_data['workers'] = '-' queue_data['finished_jobs'] = '-' queue_data['started_jobs'] = '-' queue_data['deferred_jobs'] = '-' else: connection = get_connection(queue.name) queue_data['workers'] = Worker.count(queue=queue) finished_job_registry = FinishedJobRegistry(queue.name, connection) started_job_registry = StartedJobRegistry(queue.name, connection) deferred_job_registry = DeferredJobRegistry(queue.name, connection) failed_job_registry = FailedJobRegistry(queue.name, connection) queue_data['finished_jobs'] = len(finished_job_registry) queue_data['started_jobs'] = len(started_job_registry) queue_data['deferred_jobs'] = len(deferred_job_registry) queue_data['failed_jobs'] = len(failed_job_registry) queues.append(queue_data) return {'queues': queues}
def get(self): '''获取worker列表''' worker_list = [] total = Worker.count(connection=rq2.connection) workers = Worker.all(connection=rq2.connection) for worker in workers: worker_list.append({ "queue_names": worker.queue_names(), "current_job": worker.get_current_job(), "heartbeat": worker.heartbeat(), "name": worker.name, "state": worker.get_state(), }) return { "code": StatesCode.SUCCESS, "total": total, "data": worker_list }
def get_worker_count(request): """ Return a count of the active Celery workers. """ # Inner imports so we don't risk circular imports from nautobot.core.celery import app # noqa from rq.worker import Worker # noqa from django_rq.queues import get_connection # noqa # Try RQ first since, it's faster. rq_count = Worker.count(get_connection("default")) # FIXME(jathan): If both RQ/Celery workers are running, this warning is # displayed but barely seen because of the redirect after task execution. if rq_count: messages.warning( request, "RQ workers are deprecated. Please migrate your worker to Celery.") # Celery next, since it's slower. inspect = app.control.inspect() active = inspect.active() # None if no active workers return len(active) if active is not None else 0
def run_auto_worker(self): if Worker.count(queue=self) <= self.max_workers: aw = AutoWorker(self.name, max_procs=1) aw.work()