def test_system_status(db): results = list(system_status()) assert [row["key"] for row in results] == [ "clickhouse_alive", "clickhouse_disk_0_free_space", "clickhouse_disk_0_total_space", "clickhouse_table_sizes", "clickhouse_system_metrics", ] assert len(results[-2]["subrows"]["rows"]) > 0 assert len(results[-1]["subrows"]["rows"]) > 0
def test_system_status(db): results = list(system_status()) assert [row["key"] for row in results] == [ "clickhouse_alive", "clickhouse_event_count", "clickhouse_event_count_last_month", "clickhouse_event_count_month_to_date", "clickhouse_disk_0_free_space", "clickhouse_disk_0_total_space", "clickhouse_table_sizes", "clickhouse_system_metrics", "last_event_ingested_timestamp", "dead_letter_queue_size", "dead_letter_queue_events_last_day", "dead_letter_queue_ratio_ok", ] assert len(results[6]["subrows"]["rows"]) > 0 assert len(results[7]["subrows"]["rows"]) > 0
def system_status(request): is_multitenancy: bool = getattr(settings, "MULTI_TENANCY", False) if is_multitenancy and not request.user.is_staff: raise AuthenticationFailed(detail="You're not authorized.") from .models import Element, Event, SessionRecordingEvent redis_alive = is_redis_alive() postgres_alive = is_postgres_alive() metrics: List[Dict[str, Union[str, bool, int, float]]] = [] metrics.append({ "key": "posthog_version", "metric": "PostHog version", "value": VERSION }) metrics.append({ "key": "analytics_database", "metric": "Analytics database in use", "value": "ClickHouse" if is_ee_enabled() else "Postgres", }) metrics.append({ "key": "plugin_sever_alive", "metric": "Plugin server alive", "value": is_plugin_server_alive() }) metrics.append({ "key": "plugin_sever_version", "metric": "Plugin server version", "value": get_plugin_server_version() or "unknown", }) metrics.append({ "key": "db_alive", "metric": "Postgres database alive", "value": postgres_alive }) if postgres_alive: postgres_version = connection.cursor().connection.server_version metrics.append({ "key": "pg_version", "metric": "Postgres version", "value": f"{postgres_version // 10000}.{(postgres_version // 100) % 100}.{postgres_version % 100}", }) if not is_ee_enabled(): event_table_count = get_table_approx_count(Event._meta.db_table) event_table_size = get_table_size(Event._meta.db_table) element_table_count = get_table_approx_count( Element._meta.db_table) element_table_size = get_table_size(Element._meta.db_table) session_recording_event_table_count = get_table_approx_count( SessionRecordingEvent._meta.db_table) session_recording_event_table_size = get_table_size( SessionRecordingEvent._meta.db_table) metrics.append({ "metric": "Postgres elements table size", "value": f"{element_table_count} rows (~{element_table_size})", }) metrics.append({ "metric": "Postgres events table size", "value": f"{event_table_count} rows (~{event_table_size})" }) metrics.append({ "metric": "Postgres session recording table size", "value": f"{session_recording_event_table_count} rows (~{session_recording_event_table_size})", }) if is_ee_enabled(): from ee.clickhouse.system_status import system_status metrics.extend(list(system_status())) metrics.append({ "key": "redis_alive", "metric": "Redis alive", "value": redis_alive }) if redis_alive: import redis try: redis_info = get_redis_info() redis_queue_depth = get_redis_queue_depth() metrics.append({ "metric": "Redis version", "value": f"{redis_info.get('redis_version')}" }) metrics.append({ "metric": "Redis current queue depth", "value": f"{redis_queue_depth}" }) metrics.append({ "metric": "Redis connected client count", "value": f"{redis_info.get('connected_clients')}" }) metrics.append({ "metric": "Redis memory used", "value": f"{redis_info.get('used_memory_human', '?')}B" }) metrics.append({ "metric": "Redis memory peak", "value": f"{redis_info.get('used_memory_peak_human', '?')}B" }) metrics.append({ "metric": "Redis total memory available", "value": f"{redis_info.get('total_system_memory_human', '?')}B", }) except redis.exceptions.ConnectionError as e: metrics.append({ "metric": "Redis metrics", "value": f"Redis connected but then failed to return metrics: {e}" }) return JsonResponse({"results": metrics})
def list(self, request: Request) -> Response: redis_alive = is_redis_alive() postgres_alive = is_postgres_alive() metrics: List[Dict[str, Union[str, bool, int, float]]] = [] metrics.append({ "key": "posthog_version", "metric": "PostHog version", "value": VERSION }) metrics.append({ "key": "analytics_database", "metric": "Analytics database in use", "value": "ClickHouse" if is_clickhouse_enabled() else "Postgres", }) metrics.append({ "key": "plugin_sever_alive", "metric": "Plugin server alive", "value": is_plugin_server_alive() }) metrics.append({ "key": "plugin_sever_version", "metric": "Plugin server version", "value": get_plugin_server_version() or "unknown", }) plugin_server_queues = get_plugin_server_job_queues() metrics.append({ "key": "plugin_sever_job_queues", "metric": "Job queues enabled in plugin server", "value": ", ".join([q.capitalize() for q in plugin_server_queues]) if plugin_server_queues else "unknown", }) metrics.append({ "key": "db_alive", "metric": "Postgres database alive", "value": postgres_alive }) if postgres_alive: postgres_version = connection.cursor().connection.server_version metrics.append({ "key": "pg_version", "metric": "Postgres version", "value": f"{postgres_version // 10000}.{(postgres_version // 100) % 100}.{postgres_version % 100}", }) if not is_clickhouse_enabled(): event_table_count = get_table_approx_count( Event._meta.db_table) event_table_size = get_table_size(Event._meta.db_table) element_table_count = get_table_approx_count( Element._meta.db_table) element_table_size = get_table_size(Element._meta.db_table) session_recording_event_table_count = get_table_approx_count( SessionRecordingEvent._meta.db_table) session_recording_event_table_size = get_table_size( SessionRecordingEvent._meta.db_table) metrics.append({ "metric": "Postgres elements table size", "value": f"{element_table_count} rows (~{element_table_size})", }) metrics.append({ "metric": "Postgres events table size", "value": f"{event_table_count} rows (~{event_table_size})" }) metrics.append({ "metric": "Postgres session recording table size", "value": f"{session_recording_event_table_count} rows (~{session_recording_event_table_size})", }) if is_clickhouse_enabled(): from ee.clickhouse.system_status import system_status metrics.extend(list(system_status())) metrics.append({ "key": "redis_alive", "metric": "Redis alive", "value": redis_alive }) if redis_alive: import redis try: redis_info = get_redis_info() redis_queue_depth = get_redis_queue_depth() metrics.append({ "metric": "Redis version", "value": f"{redis_info.get('redis_version')}" }) metrics.append({ "metric": "Redis current queue depth", "value": f"{redis_queue_depth}" }) metrics.append({ "metric": "Redis connected client count", "value": f"{redis_info.get('connected_clients')}" }) metrics.append({ "metric": "Redis memory used", "value": f"{redis_info.get('used_memory_human', '?')}B" }) metrics.append({ "metric": "Redis memory peak", "value": f"{redis_info.get('used_memory_peak_human', '?')}B" }) metrics.append({ "metric": "Redis total memory available", "value": f"{redis_info.get('total_system_memory_human', '?')}B", }) except redis.exceptions.ConnectionError as e: metrics.append({ "metric": "Redis metrics", "value": f"Redis connected but then failed to return metrics: {e}" }) return Response({ "results": { "overview": metrics, "internal_metrics": get_internal_metrics_dashboards() } })
def list(self, request: Request) -> Response: redis_alive = is_redis_alive() postgres_alive = is_postgres_alive() metrics: List[Dict[str, Union[str, bool, int, float, Dict[str, Any]]]] = [] metrics.append({"key": "posthog_version", "metric": "PostHog version", "value": VERSION}) metrics.append({"key": "posthog_git_sha", "metric": "PostHog Git SHA", "value": GIT_SHA}) helm_info = get_helm_info_env() if len(helm_info) > 0: metrics.append( { "key": "helm", "metric": "Helm Info", "value": "", "subrows": {"columns": ["key", "value"], "rows": list(helm_info.items())}, } ) metrics.append( {"key": "analytics_database", "metric": "Analytics database in use", "value": "ClickHouse",} ) metrics.append( {"key": "plugin_sever_alive", "metric": "Plugin server alive", "value": is_plugin_server_alive()} ) metrics.append( { "key": "plugin_sever_version", "metric": "Plugin server version", "value": get_plugin_server_version() or "unknown", } ) plugin_server_queues = get_plugin_server_job_queues() metrics.append( { "key": "plugin_sever_job_queues", "metric": "Job queues enabled in plugin server", "value": ", ".join([q.capitalize() for q in plugin_server_queues]) if plugin_server_queues else "unknown", } ) metrics.append({"key": "db_alive", "metric": "Postgres database alive", "value": postgres_alive}) if postgres_alive: postgres_version = connection.cursor().connection.server_version metrics.append( { "key": "pg_version", "metric": "Postgres version", "value": f"{postgres_version // 10000}.{(postgres_version // 100) % 100}.{postgres_version % 100}", } ) metrics.append( {"key": "async_migrations_ok", "metric": "Async migrations up-to-date", "value": async_migrations_ok()}, ) from ee.clickhouse.system_status import system_status metrics.extend(list(system_status())) metrics.append({"key": "redis_alive", "metric": "Redis alive", "value": redis_alive}) if redis_alive: import redis try: redis_info = get_redis_info() redis_queue_depth = get_redis_queue_depth() metrics.append({"metric": "Redis version", "value": f"{redis_info.get('redis_version')}"}) metrics.append({"metric": "Redis current queue depth", "value": f"{redis_queue_depth}"}) metrics.append( {"metric": "Redis connected client count", "value": f"{redis_info.get('connected_clients')}"} ) metrics.append({"metric": "Redis memory used", "value": f"{redis_info.get('used_memory_human', '?')}B"}) metrics.append( {"metric": "Redis memory peak", "value": f"{redis_info.get('used_memory_peak_human', '?')}B"} ) metrics.append( { "metric": "Redis total memory available", "value": f"{redis_info.get('total_system_memory_human', '?')}B", } ) except redis.exceptions.ConnectionError as e: metrics.append( {"metric": "Redis metrics", "value": f"Redis connected but then failed to return metrics: {e}"} ) return Response({"results": {"overview": metrics, "internal_metrics": get_internal_metrics_dashboards()}})