def wf_event_recording() -> typing.List[Graph]: return [ Graph( title="wf event recording latency success", dataSource=DATASOURCE, targets=[ Target( expr= f'sum(flyte:propeller:all:workflow:event_recording:success_duration_ms) by (quantile, wf)', refId='A', ), ], yAxes=single_y_axis(format=MILLISECONDS_FORMAT), ), Graph( title="wf event recording count", dataSource=DATASOURCE, targets=[ Target( expr= f'sum(rate(flyte:propeller:all:workflow:event_recording:success_duration_ms_count[5m])) by (wf)', legendFormat="success", refId='A', ), Target( expr= f'sum(rate(flyte:propeller:all:workflow:event_recording:failure_duration_ms_count[5m])) by (wf)', legendFormat="failure", refId='A', ), ], yAxes=single_y_axis(format=NO_FORMAT), ), ]
def errors(collapse: bool) -> Row: return Row( title="Error (System vs user)", collapse=collapse, panels=[ Graph( title="User errors", dataSource=DATASOURCE, targets=[ Target( expr= 'sum(rate(flyte:propeller:all:node:user_error_duration_ms_count{project=~"$project",domain=~"$domain",wf=~"$project:$domain:$workflow"}[5m]))', refId='A', ), ], yAxes=single_y_axis(format=SHORT_FORMAT), ), Graph( title="System errors", dataSource=DATASOURCE, targets=[ Target( expr= 'sum(rate(flyte:propeller:all:node:system_error_duration_ms_count{project=~"$project",domain=~"$domain",wf=~"$project:$domain:$workflow"}[5m]))', refId='A', ), ], yAxes=single_y_axis(format=SHORT_FORMAT), ), ])
def dynamic_wf_build() -> typing.List[Graph]: return [ Graph( title="Dynamic workflow build latency", dataSource=DATASOURCE, targets=[ Target( expr= f'sum(flyte:propeller:all:node:build_dynamic_workflow_us) by (quantile, wf) / 1000', refId='A', ), ], yAxes=single_y_axis(format=MILLISECONDS_FORMAT), ), Graph( title="Dynamic workflow build count", dataSource=DATASOURCE, targets=[ Target( expr= f'sum(rate(flyte:propeller:all:node:build_dynamic_workflow_us_count[5m])) by (wf)', refId='A', ), ], yAxes=single_y_axis(format=NO_FORMAT), ), ]
def wf_store_latency(collapse: bool) -> Row: return Row( title="etcD write metrics", collapse=collapse, panels=[ Graph( title="wf update etcD latency", dataSource=DATASOURCE, targets=[ Target( expr= f'sum(flyte:propeller:all:wf_update_latency_ms) by (quantile)', refId='A', ), ], yAxes=single_y_axis(format=MILLISECONDS_FORMAT), ), Graph( title="etcD writes", dataSource=DATASOURCE, targets=[ Target( expr= f'sum(rate(flyte:propeller:all:wf_update_latency_ms_count[5m]))', refId='A', ), ], yAxes=single_y_axis(format=NO_FORMAT), ), Graph( title="etcD write conflicts", dataSource=DATASOURCE, targets=[ Target( expr= f'sum(rate(flyte:propeller:all:wf_update_conflict[5m]))', refId='A', ), ], yAxes=single_y_axis(format=NO_FORMAT), ), Graph( title="etcD write fail", dataSource=DATASOURCE, targets=[ Target( expr= f'sum(rate(flyte:propeller:all:wf_update_failed[5m]))', refId='A', ), ], yAxes=single_y_axis(format=NO_FORMAT), ), ])
def quota_stats(collapse: bool) -> Row: return Row( title="Kubernetes Quota Usage stats", collapse=collapse, panels=[ Graph( title="CPU Limits vs usage", dataSource=DATASOURCE, targets=[ Target( expr= 'kube_resourcequota{resource="limits.cpu", namespace="$project-$domain", type="hard"}', refId='A', legendFormat="max cpu", ), Target( expr= 'kube_resourcequota{resource="limits.cpu", namespace="$project-$domain", type="used"}', refId='B', legendFormat="used cpu", ), ], yAxes=YAxes( YAxis(format=OPS_FORMAT), YAxis(format=SHORT_FORMAT), ), ), Graph( title="Mem Limits vs usage", dataSource=DATASOURCE, targets=[ Target( expr= 'kube_resourcequota{resource="limits.memory", namespace="$project-$domain", type="hard"}', refId='A', legendFormat="max mem", ), Target( expr= 'kube_resourcequota{resource="limits.memory", namespace="$project-$domain", type="used"}', refId='B', legendFormat="used mem", ), ], yAxes=YAxes( YAxis(format=OPS_FORMAT), YAxis(format=SHORT_FORMAT), ), ), ])
def number_of_active_processes_graph(grr_component): return Graph( title="Number of Active Processes", targets=[ Target( expr='sum(up{{job="grr_{}"}})'.format(grr_component), legendFormat="Active Processes", ), ], alert=Alert( name="Number of Active Processes alert", message="The number of active {} processes is below {}".format( grr_component.capitalize(), config.ACTIVE_PROCESSES_ALERTING_CONDITION), alertConditions=[ AlertCondition(Target( expr='sum(up{{job="grr_{}"}})'.format(grr_component), legendFormat="Active Processes", ), timeRange=TimeRange("10s", "now"), evaluator=LowerThan( config.ACTIVE_PROCESSES_ALERTING_CONDITION), operator=OP_AND, reducerType=RTYPE_SUM) ], ))
def generate_rds_free_storage_space_graph(name: str, cloudwatch_data_source: str): """ Generate rds graph """ y_axes = single_y_axis(format=BYTES) targets = [ CloudwatchMetricsTarget( alias="Free storage", metricName="FreeStorageSpace", statistics=["Minimum"], namespace=NAMESPACE, dimensions={"DBInstanceIdentifier": name}, period="1m", refId=ALERT_REF_ID, ), ] return Graph( title="Free storage", dataSource=cloudwatch_data_source, targets=targets, yAxes=y_axes, transparent=TRANSPARENT, editable=EDITABLE, bars=False, lines=True, ).auto_ref_ids()
def node_errors() -> Graph: return Graph( title="node event recording count", dataSource=DATASOURCE, targets=[ Target( expr= f'sum(rate(flyte:propeller:all:node:perma_system_error_duration_unlabeled_ms_count[5m]))', legendFormat="system error", refId='A', ), Target( expr= f'sum(rate(flyte:propeller:all:node:perma_user_error_duration_unlabeled_ms[5m]))', legendFormat="user error", refId='A', ), Target( expr= f'sum(rate(flyte:propeller:all:node:perma_unknown_error_duration_unlabeled_ms[5m]))', legendFormat="user error", refId='A', ), ], yAxes=single_y_axis(format=NO_FORMAT), )
def error_codes(api: str, interval: int = 1) -> Graph: return Graph( title=f"{api} return codes", dataSource=DATASOURCE, targets=[ Target( expr=f'sum(irate(flyte:admin:{api}:codes:OK[{interval}m]))', legendFormat="ok", refId='A', ), Target( expr=f'sum(irate(flyte:admin:{api}:codes:InvalidArgument[{interval}m]))', legendFormat="invalid-args", refId='B', ), Target( expr=f'sum(irate(flyte:admin:{api}:codes:AlreadyExists[{interval}m]))', legendFormat="already-exists", refId='C', ), Target( expr=f'sum(irate(flyte:admin:{api}:codes:FailedPrecondition[{interval}m]))', legendFormat="failed-precondition", refId='D', ), ], yAxes=YAxes( YAxis(format=OPS_FORMAT), YAxis(format=SHORT_FORMAT), ), )
def create_lambda_sqs_graph(name: str, cloudwatch_data_source: str, fifo: bool): """Create SQS graph""" if fifo: name += ".fifo" targets = [ CloudwatchMetricsTarget( alias="Number of messages sent to the queue", namespace="AWS/SQS", statistics=["Sum"], metricName="NumberOfMessagesSent", dimensions={"QueueName": name}, ) ] yAxes = single_y_axis(format=SHORT_FORMAT) return Graph( title="SQS: {}".format(name), dataSource=cloudwatch_data_source, targets=targets, yAxes=yAxes, transparent=TRANSPARENT, editable=EDITABLE, ).auto_ref_ids()
def generate_firehose_graph(influxdb_data_source: str) -> Graph: """ Generate Firehose graph """ y_axes = single_y_axis(format=SHORT_FORMAT) targets = [ InfluxDBTarget( alias=FIREHOSE_INCOMING_RECORDS_ALIAS, query='SELECT sum("incoming_records_sum") FROM "{}"."{}" WHERE ("delivery_stream_name" =~ /^$firehose$/) AND $timeFilter GROUP BY time(5m), "delivery_stream_name" fill(0)'.format( # noqa: E501 RETENTION_POLICY, FIREHOSE_MEASUREMENT ), rawQuery=RAW_QUERY, ), InfluxDBTarget( alias=FIREHOSE_DELIVERY_TO_S3_SUCCESS_ALIAS, query='SELECT sum("delivery_to_s3._success_sum") FROM "{}"."{}" WHERE ("delivery_stream_name" =~ /^$firehose$/) AND $timeFilter GROUP BY time(5m), "delivery_stream_name" fill(0)'.format( # noqa: E501 RETENTION_POLICY, FIREHOSE_MEASUREMENT ), rawQuery=RAW_QUERY, ), InfluxDBTarget( alias=FIREHOSE_DELIVERY_TO_S3_ALIAS, query='SELECT sum("delivery_to_s3._records_sum") FROM "{}"."{}" WHERE ("delivery_stream_name" =~ /^$firehose$/) AND $timeFilter GROUP BY time(5m), "delivery_stream_name" fill(0)'.format( # noqa: E501 RETENTION_POLICY, FIREHOSE_MEASUREMENT ), rawQuery=RAW_QUERY, ), ] series_overrides = [ { "alias": FIREHOSE_INCOMING_RECORDS_ALIAS, "color": colors.ORANGE, }, { "alias": FIREHOSE_DELIVERY_TO_S3_ALIAS, "color": colors.YELLOW, }, { "alias": FIREHOSE_DELIVERY_TO_S3_SUCCESS_ALIAS, "color": colors.GREEN, "zindex": 1, }, ] return Graph( title="Firehose: $firehose", dataSource=influxdb_data_source, targets=targets, yAxes=y_axes, seriesOverrides=series_overrides, transparent=TRANSPARENT, editable=EDITABLE, bars=True, lines=False, ).auto_ref_ids()
def lambda_generate_memory_utilization_graph( name: str, cloudwatch_data_source: str, lambda_insights_namespace: str, *args, **kwargs, ) -> Graph: """ Generate lambda graph """ targets = [ CloudwatchMetricsTarget( alias="used_memory_max", namespace=lambda_insights_namespace, statistics=["Maximum"], metricName="used_memory_max", dimensions={"function_name": name}, ), CloudwatchMetricsTarget( alias="allocated_memory", namespace=lambda_insights_namespace, statistics=["Maximum"], metricName="total_memory", dimensions={"function_name": name}, ), ] yAxes = YAxes(YAxis(format="decmbytes"), ) seriesOverrides = [ { "alias": "used_memory_max", "points": False, "color": colors.GREEN, }, { "alias": "allocated_memory", "points": False, "color": colors.RED, "fill": 0 }, ] alert = None return Graph( title="Lambda Memory Utilization", dataSource=cloudwatch_data_source, targets=targets, seriesOverrides=seriesOverrides, yAxes=yAxes, transparent=TRANSPARENT, editable=EDITABLE, alert=alert, alertThreshold=ALERT_THRESHOLD, # gridPos=GridPos(8,12,12,0) ).auto_ref_ids()
def generate_req_count_graph( cloudwatch_data_source: str, loadbalancer: str, target_group: str, grid_pos: GridPos, ) -> Graph: """ Generate req graph """ request_count_alias = "RequestCount Per Service" request_count_per_target_alias = "RequestCount Per Container" targets = [ CloudwatchMetricsTarget( alias=request_count_alias, namespace="AWS/ApplicationELB", statistics=["Sum"], metricName="RequestCount", dimensions={ "LoadBalancer": loadbalancer, "TargetGroup": target_group }, ), CloudwatchMetricsTarget( alias=request_count_per_target_alias, namespace="AWS/ApplicationELB", statistics=["Sum"], metricName="RequestCountPerTarget", dimensions={ "LoadBalancer": loadbalancer, "TargetGroup": target_group }, ), ] seriesOverrides = [ { "alias": request_count_alias, "color": colors.YELLOW, "fill": 0 }, { "alias": request_count_per_target_alias, "color": colors.GREEN, "fill": 0 }, ] return Graph( title="Requests", dataSource=cloudwatch_data_source, targets=targets, seriesOverrides=seriesOverrides, transparent=TRANSPARENT, editable=EDITABLE, gridPos=grid_pos, ).auto_ref_ids()
def generate_elasticsearch_status_red_alert_graph( name: str, client_id: str, cloudwatch_data_source: str, notifications: List[str]) -> Graph: """ Generate Elasticsearch graph """ y_axes = YAxes( YAxis(format=SHORT_FORMAT), YAxis(format=SHORT_FORMAT), ) targets = [ CloudwatchMetricsTarget( alias="Red status", namespace=NAMESPACE, period="1m", statistics=["Maximum"], dimensions={ "DomainName": name, "ClientId": client_id }, metricName="ClusterStatus.red", ), ] alert = None if notifications: alert = Alert( name="Elasticsearch is in status red", message="Elasticsearch is in status red", executionErrorState="alerting", alertConditions=[ AlertCondition( Target(refId=ALERT_REF_ID), timeRange=TimeRange("5m", "now"), evaluator=GreaterThan(0), reducerType=RTYPE_MAX, operator=OP_OR, ), ], frequency="2m", gracePeriod="2m", notifications=notifications, ) return Graph( title="Status RED alerts", dataSource=cloudwatch_data_source, targets=targets, yAxes=y_axes, transparent=TRANSPARENT, editable=EDITABLE, bars=True, lines=False, alert=alert, ).auto_ref_ids()
def generate_elasticache_redis_swap_and_memory_usage_graph( cache_cluster_id: str, cloudwatch_data_source: str) -> Graph: """ Generate ElastiCache Redis graph """ y_axes = single_y_axis(format=BYTES) aliases = { "bytes": "Bytes used for cache", "swap": "Swap Usage", } targets = [ CloudwatchMetricsTarget( alias=aliases["bytes"], namespace=NAMESPACE, period="1m", statistics=["Maximum"], dimensions={"CacheClusterId": cache_cluster_id}, metricName="BytesUsedForCache", ), CloudwatchMetricsTarget( alias=aliases["swap"], namespace=NAMESPACE, period="1m", statistics=["Maximum"], dimensions={"CacheClusterId": cache_cluster_id}, metricName="SwapUsage", ), ] series_overrides = [ { "alias": aliases["swap"], "color": colors.BLUE, "lines": True, "bars": False, }, { "alias": aliases["bytes"], "color": colors.GREEN, "lines": True, "bars": False, }, ] return Graph( title="Memory and Swap usage", dataSource=cloudwatch_data_source, targets=targets, yAxes=y_axes, seriesOverrides=series_overrides, transparent=TRANSPARENT, editable=EDITABLE, bars=True, lines=False, ).auto_ref_ids()
def db_operations_errors(grr_component): return Graph( title="Database Operations Errors Rate by Call", targets=[ Target( expr='sum by (call) (rate(db_request_errors_total{{job="grr_{0}"}}[10m]))'.format(grr_component), legendFormat="{{call}}", ), ])
def db_operations_latency(grr_component): return Graph( title="Database Operations Latency by Call", targets=[ Target( expr='sum by (call) (rate(db_request_latency_sum{{job="grr_{0}"}}[10m]) / rate(db_request_latency_count{{job="grr_{0}"}}[10m]))'.format(grr_component), legendFormat="{{call}}", ), ])
def sum_process_memory_bytes(grr_component): return Graph( title="Sum of Process Memory Bytes (across all instances)", targets=[ Target( expr='sum(process_resident_memory_bytes{{job="grr_{}"}})'.format(grr_component), legendFormat="Resident Memory", ), ])
def resource_stats(collapse: bool) -> Row: return Row( title="Task stats", collapse=collapse, panels=[ Graph( title="Pending tasks", dataSource=DATASOURCE, targets=[ Target( expr= 'sum(kube_pod_container_status_waiting * on(pod) group_left(label_execution_id, label_task_name, label_node_id, label_workflow_name) kube_pod_labels{label_execution_id !="",namespace=~"$project-$domain",label_workflow_name=~"$workflow"}) by (namespace, label_execution_id, label_task_name, label_node_id, label_workflow_name) > 0', refId='A', ), ], yAxes=single_y_axis(format=SHORT_FORMAT), ), Graph( title="Memory Usage Percentage", dataSource=DATASOURCE, targets=[ Target( expr= '(max(container_memory_rss{image!=""} * on(pod) group_left(label_execution_id, label_task_name, label_node_id, label_workflow_name) kube_pod_labels{label_execution_id !="",namespace=~"$project-$domain",label_workflow_name=~"$workflow"} * on(pod) group_left(phase) kube_pod_status_phase{phase="Running"}) by (namespace, pod, label_execution_id, label_task_name, label_node_id, label_workflow_name) / max(kube_pod_container_resource_limits_memory_bytes{container!=""} * on(pod) group_left(label_execution_id, label_task_name, label_node_id, label_workflow_name) kube_pod_labels{label_execution_id !=""} * on(pod) group_left(phase) kube_pod_status_phase{phase="Running"}) by (namespace, pod, label_execution_id, label_task_name, label_node_id, label_workflow_name)) > 0', refId='A', ), ], yAxes=single_y_axis(format=SHORT_FORMAT), ), Graph( title="CPU Usage Percentage", dataSource=DATASOURCE, targets=[ Target( expr= '(sum(rate(container_cpu_usage_seconds_total{image!=""}[2m]) * on(pod) group_left(label_execution_id, label_task_name, label_node_id, label_workflow_name) kube_pod_labels{label_execution_id !="",namespace=~"$project-$domain",label_workflow_name=~"$workflow"} * on(pod) group_left(phase) kube_pod_status_phase{phase="Running"}) by (namespace, pod, label_execution_id, label_task_name, label_node_id, label_workflow_name) / sum(kube_pod_container_resource_limits_cpu_cores{container!=""} * on(pod) group_left(label_execution_id, label_task_name, label_node_id, label_workflow_name) kube_pod_labels{label_execution_id !=""} * on(pod) group_left(phase) kube_pod_status_phase{phase="Running"}) by (namespace, pod, label_execution_id, label_task_name, label_node_id, label_workflow_name)) > 0', refId='A', ), ], yAxes=single_y_axis(format=SHORT_FORMAT), ), ])
def round_latency(interval: int = 1) -> Graph: return Graph( title=f"round Latency by quantile", dataSource=DATASOURCE, targets=[ Target( expr=f'sum(rate(flyte:propeller:all:round:raw_unlabeled_ms[{interval}m])) by (quantile)', refId='A', ), ], yAxes=single_y_axis(format=MILLISECONDS_FORMAT), )
def node_input_latency() -> Graph: return Graph( title=f"Node Input latency quantile and workflow", dataSource=DATASOURCE, targets=[ Target( expr=f'sum(flyte:propeller:all:node:node_input_latency_ms) by (quantile, wf)', refId='A', ), ], yAxes=single_y_axis(format=MILLISECONDS_FORMAT), )
def round_latency_per_wf(interval: int = 1) -> Graph: return Graph( title=f"round Latency per workflow", dataSource=DATASOURCE, targets=[ Target( expr=f'sum(rate(flyte:propeller:all:round:raw_ms[{interval}m])) by (wf)', refId='A', ), ], yAxes=single_y_axis(format=MILLISECONDS_FORMAT), )
def api_latency(api: str, interval: int = 1) -> Graph: return Graph( title=f"{api} Latency", dataSource=DATASOURCE, targets=[ Target( expr=f'sum(rate(flyte:admin:{api}:duration_ms[{interval}m])) by (quantile)', refId='A', ), ], yAxes=single_y_axis(format=SECONDS_FORMAT), )
def generate_rds_transaction_id_graph(name: str, cloudwatch_data_source: str, notifications: List[str]): """ Generate rds graph """ y_axes = single_y_axis(format=SHORT_FORMAT) targets = [ CloudwatchMetricsTarget( alias="Transaction ids used", metricName="MaximumUsedTransactionIDs", statistics=["Maximum"], namespace=NAMESPACE, dimensions={"DBInstanceIdentifier": name}, period="1m", refId=ALERT_REF_ID, ), ] alert = None if notifications: alert = Alert( name="{} transaction ids used Errors".format(name), message="{} is having transaction ids used errors".format(name), executionErrorState="alerting", alertConditions=[ AlertCondition( Target(refId=ALERT_REF_ID), timeRange=TimeRange("5m", "now"), evaluator=GreaterThan(1000000000), reducerType=RTYPE_MAX, operator=OP_AND, ) ], gracePeriod="2m", frequency="2m", notifications=notifications, ) return Graph( title="Transaction ids used", dataSource=cloudwatch_data_source, targets=targets, yAxes=y_axes, transparent=TRANSPARENT, editable=EDITABLE, bars=False, lines=True, alert=alert, ).auto_ref_ids()
def generate_desired_count_graph( name: str, cluster_name: str, max: int, cloudwatch_data_source: str, notifications: List[str], grid_pos: GridPos, ): targets = [ CloudwatchMetricsTarget( alias="Containers", namespace=CONTAINER_INSIGHTS_NAMESPACE, statistics=["Maximum"], metricName="DesiredTaskCount", dimensions={ "ServiceName": name, "ClusterName": cluster_name }, refId=ALERT_REF_ID, ), ] alert = None if notifications and max > 1: alert = Alert( name="{} Desired count of containers nearing the max".format(name), message="{} is having Desired count of containers nearing the max". format(name), executionErrorState="alerting", alertConditions=[ AlertCondition( Target(refId=ALERT_REF_ID), timeRange=TimeRange("15m", "now"), evaluator=GreaterThan(0.9 * max), # 90% of max reducerType=RTYPE_MAX, operator=OP_AND, ) ], gracePeriod="1m", notifications=notifications, ) return Graph( title="Desired Tasks", dataSource=cloudwatch_data_source, targets=targets, transparent=TRANSPARENT, editable=EDITABLE, alert=alert, gridPos=grid_pos, alertThreshold=ALERT_THRESHOLD, ).auto_ref_ids()
def generate_rds_database_connections_graph(name: str, cloudwatch_data_source: str): """ Generate rds graph """ y_axes = single_y_axis(format=SHORT_FORMAT) min_alias = "min" max_alias = "max" mean_alias = "mean" targets = [ CloudwatchMetricsTarget( alias=max_alias, namespace=NAMESPACE, dimensions={"DBInstanceIdentifier": name}, metricName="DatabaseConnections", statistics=["Maximum"], period="1m", refId=ALERT_REF_ID, ), CloudwatchMetricsTarget( alias=mean_alias, metricName="DatabaseConnections", statistics=["Average"], namespace=NAMESPACE, dimensions={"DBInstanceIdentifier": name}, period="1m", ), CloudwatchMetricsTarget( alias=min_alias, metricName="DatabaseConnections", statistics=["Minimum"], namespace=NAMESPACE, dimensions={"DBInstanceIdentifier": name}, period="1m", ), ] series_overrides = get_series_overrides(min_alias, mean_alias, max_alias) return Graph( title="Database connections", dataSource=cloudwatch_data_source, targets=targets, yAxes=y_axes, seriesOverrides=series_overrides, transparent=TRANSPARENT, editable=EDITABLE, bars=False, lines=True, ).auto_ref_ids()
def generate_rds_network_throughput_graph(name: str, cloudwatch_data_source: str): """ Generate rds graph """ y_axes = single_y_axis(format=BYTES_SEC, min=None) targets = [ CloudwatchMetricsTarget( alias="RX", metricName="NetworkReceiveThroughput", statistics=["Maximum"], namespace=NAMESPACE, dimensions={"DBInstanceIdentifier": name}, period="1m", ), CloudwatchMetricsTarget( alias="TX", metricName="NetworkTransmitThroughput", statistics=["Maximum"], namespace=NAMESPACE, dimensions={"DBInstanceIdentifier": name}, period="1m", ), ] series_overrides = [ { "alias": "TX", "color": colors.GREEN, "transform": "negative-Y", "fillGradient": 10, }, { "alias": "RX", "color": colors.YELLOW, "fillGradient": 10 }, ] return Graph( title="Network throughput", dataSource=cloudwatch_data_source, targets=targets, yAxes=y_axes, transparent=TRANSPARENT, editable=EDITABLE, bars=False, lines=True, seriesOverrides=series_overrides, ).auto_ref_ids()
def avg_cpu_usage_percentage(grr_component): return Graph( title="CPU Usage", targets=[ Target( expr= 'avg(rate(process_cpu_seconds_total{{job="grr_{}"}}[30s])) * 100' .format(grr_component), legendFormat="Average Process CPU Usage in %", ), ], yAxes=YAxes(left=YAxis(max=105)), )
def threadpool_outstanding_tasks_vs_threads_num(grr_component): return Graph( title="Outstanding Tasks vs. Number of Threads", targets=[ Target( expr='sum(threadpool_outstanding_tasks{{job="grr_{}"}})'.format(grr_component), legendFormat="Outstanding Tasks", ), Target( expr='sum(threadpool_threads{{job="grr_{}"}})'.format(grr_component), legendFormat="Threads", ), ])
def threadpool_cpu_usage(grr_component): return Graph( title="Threadpool Average CPU Usage", targets=[ Target( expr='avg(rate(threadpool_cpu_use{{job="grr_{}"}}[30s])) * 100' .format(grr_component), legendFormat= "Average Process CPU Usage in % (over all jobs & pools)", ), ], yAxes=YAxes(left=YAxis(max=105)), )