def test_auto_refids_preserves_provided_ids(): """ auto_ref_ids() provides refIds for all targets without refIds already set. """ dashboard = G.Dashboard( title="Test dashboard", rows=[ G.Row(panels=[ G.Graph( title="CPU Usage by Namespace (rate[5m])", targets=[ G.Target( expr='whatever #Q', legendFormat='{{namespace}}', ), G.Target( expr='hidden whatever', legendFormat='{{namespace}}', refId='Q', ), G.Target(expr='another target'), ], ).auto_ref_ids() ]), ], ) assert dashboard.rows[0].panels[0].targets[0].refId == 'A' assert dashboard.rows[0].panels[0].targets[1].refId == 'Q' assert dashboard.rows[0].panels[0].targets[2].refId == 'B' dashboard = G.Dashboard( title="Test dashboard", panels=[ G.Graph(title="CPU Usage by Namespace (rate[5m])", dataSource="My data source", targets=[ G.Target( expr='whatever #Q', legendFormat='{{namespace}}', ), G.Target( expr='hidden whatever', legendFormat='{{namespace}}', refId='Q', ), G.Target(expr='another target'), ], yAxes=G.YAxes( G.YAxis(format=G.SHORT_FORMAT, label="CPU seconds"), G.YAxis(format=G.SHORT_FORMAT), ), gridPos=G.GridPos(h=1, w=24, x=0, y=8)).auto_ref_ids() ], ).auto_panel_ids() assert dashboard.panels[0].targets[0].refId == 'A' assert dashboard.panels[0].targets[1].refId == 'Q' assert dashboard.panels[0].targets[2].refId == 'B'
def PromGraph(data_source, title, expressions, **kwargs): """Create a graph that renders Prometheus data. :param str data_source: The name of the data source that provides Prometheus data. :param title: The title of the graph. :param expressions: List of tuples of (legend, expr), where 'expr' is a Prometheus expression. Or a list of dict where keys are Target's args. :param kwargs: Passed on to Graph. """ letters = string.ascii_uppercase expressions = list(expressions) if len(expressions) > len(letters): raise ValueError( 'Too many expressions. Can support at most {}, but got {}'.format( len(letters), len(expressions))) if all(isinstance(expr, dict) for expr in expressions): targets = [ G.Target(refId=refId, **args) for (args, refId) in zip(expressions, letters) ] else: targets = [ G.Target(expr, legend, refId=refId) for ((legend, expr), refId) in zip(expressions, letters) ] return G.Graph(title=title, dataSource=data_source, targets=targets, **kwargs)
def filesystem_usage(datasource): return G.Graph( title="Filesystem Usage", dataSource=datasource, xAxis=X_TIME, yAxes=[ G.YAxis( format="percent", ), G.YAxis( show=False, ), ], targets=[ G.Target( # Get the proportion used of each filesystem on a volume from # a PersistentVolumeClaim on each node of the cluster. It's # hard to figure out the role each filesystem serves from this # graph (since all we get is the PVC name). Better than # nothing, though. Hopefully later we can do better. expr=""" 100 * filesystem_used_bytes{volume=~"pvc-.*"} / filesystem_size_bytes{volume=~"pvc-.*"} """, legendFormat="{{volume}}", refId="A", ), ], )
def process_open_fds(datasource): return G.Graph( title="Open File Descriptors", dataSource=datasource, xAxis=X_TIME, yAxes=[ G.YAxis( format="none", label="Count", ), G.YAxis( show=False, ), ], targets=[ G.Target( expr=""" process_open_fds{pod=~".+"} """, refId="A", legendFormat="{{pod}}", ), ], )
def memory_usage(datasource): return G.Graph( title="Memory Usage", dataSource=datasource, xAxis=X_TIME, yAxes=[ G.YAxis( # 2 ^ 30 bytes format="gbytes", label="Memory", ), G.YAxis( show=False, ), ], targets=[ G.Target( expr=""" sum(machine_memory_bytes) / 2 ^ 30 """, legendFormat="Total Physical Memory", refId="A", ), G.Target( expr=""" rss:container_memory:total / 2 ^ 30 """, legendFormat="Total Container RSS", refId="B", ), ], )
def test_auto_id(): """auto_panel_ids() provides IDs for all panels without IDs already set.""" dashboard = G.Dashboard( title="Test dashboard", rows=[ G.Row(panels=[ G.Graph( title="CPU Usage by Namespace (rate[5m])", dataSource="My data source", targets=[ G.Target( expr='whatever', legendFormat='{{namespace}}', refId='A', ), ], yAxes=[ G.YAxis(format=G.SHORT_FORMAT, label="CPU seconds"), G.YAxis(format=G.SHORT_FORMAT), ], ) ]), ], ).auto_panel_ids() assert dashboard.rows[0].panels[0].id == 1
def test_auto_refids_preserves_provided_ids(): """ auto_ref_ids() provides refIds for all targets without refIds already set. """ dashboard = G.Dashboard( title="Test dashboard", rows=[ G.Row(panels=[ G.Graph( title="CPU Usage by Namespace (rate[5m])", targets=[ G.Target( expr='whatever #Q', legendFormat='{{namespace}}', ), G.Target( expr='hidden whatever', legendFormat='{{namespace}}', refId='Q', ), G.Target( expr='another target' ), ], ).auto_ref_ids() ]), ], ) assert dashboard.rows[0].panels[0].targets[0].refId == 'A' assert dashboard.rows[0].panels[0].targets[1].refId == 'Q' assert dashboard.rows[0].panels[0].targets[2].refId == 'B'
def unhandled_errors(datasource): return G.Graph( title="Unhandled Errors", dataSource=datasource, xAxis=X_TIME, yAxes=[ G.YAxis( format="none", label="Count", ), G.YAxis( show=False, ), ], targets=[ G.Target( expr=""" sum(s4_unhandled_error_counter) """, refId="A", legendFormat="Total Unhandled Errors", ), ], )
def last_convergence(datasource): return G.Graph( title="Since Last Convergence", dataSource=datasource, xAxis=X_TIME, yAxes=[ G.YAxis( format="none", label="Period", ), G.YAxis( show=False, ), ], targets=[ G.Target( expr=""" time() - max( s4_last_convergence_succeeded{ pod=~"subscription-converger-.*" } ) """, refId="A", legendFormat="Time Since Last Convergence Success", ), ], )
def test_graph_panel(): data_source = 'dummy data source' targets = ['dummy_prom_query'] title = 'dummy title' graph = G.Graph(data_source, targets, title) data = graph.to_json_data() assert data['targets'] == targets assert data['datasource'] == data_source assert data['title'] == title assert 'alert' not in data
def simple_graph(title, exprs, yAxes=None): if not isinstance(exprs, (list, tuple)): exprs = [exprs] return g.Graph( title=title, dataSource="$source", # One graph per row. span=g.TOTAL_SPAN, targets=[g.Target(expr=expr) for expr in exprs], yAxes=yAxes or g.YAxes(), tooltip=DECREASING_ORDER_TOOLTIP, )
def AddGraphPanel(self, title: Text, raw_sql: Text, y_axis_title: Text): self.AddPanel( core.Graph( title=title, targets=[ core.SqlTarget( rawSql=raw_sql, format=core.TABLE_TARGET_FORMAT, ), ], yAxes=core.YAxes(core.YAxis(format=y_axis_title), ), ))
def test_graphite_target_full(): dashboard = G.Graph(title="Graphite target full test", dataSource="graphite datasource", targets=[ G.GraphiteTarget(refId="A", target="foo.bar"), G.GraphiteTarget(refId="B", target="sumSeries(#A,foo2.bar2)") ]) dashboard.resolve_graphite_targets() for target in dashboard.targets: assert target.targetFull != "" assert not re.findall("#[A-Z]", target.targetFull)
def s4_customer_deployments(datasource): return G.Graph( title="Customer Deployments", dataSource=datasource, xAxis=X_TIME, yAxes=[ G.YAxis( format="none", label="Total Customer Deployments", min=0, max=100, ), G.YAxis( show=False, ), ], targets=[ G.Target( # Each replicaset and pod end up with their own series. Label # these more succinctly. Leave them distinct in case it is # interesting to see where restarts have happened. expr=""" label_replace( s4_deployment_gauge{pod=~"subscription-converger-.*"}, "shortpod", "# Deploys ($1)", "pod", "subscription-converger-(.*)" ) """, refId="A", legendFormat="{{shortpod}}", ), G.Target( # As above. expr=""" label_replace( s4_running_pod_gauge{pod=~"subscription-converger-.*"}, "shortpod", "# Running ($1)", "pod", "subscription-converger-(.*)" ) """, refId="B", legendFormat="{{shortpod}}", ), ], )
def run(self): templateList = [ G.Template(default="", dataSource="default", name="serverid", label="ServerID", query="label_values(serverid)") ] dashboard = G.Dashboard(title=self.options.title, templating=G.Templating(list=templateList)) # Simple table processing - could be enhanced to use GridPos etc. for metric in metrics: if 'section' in metric: dashboard.rows.append( G.Row(title=metric['section'], showTitle=True)) continue if 'row' in metric: dashboard.rows.append(G.Row(title='', showTitle=False)) continue graph = G.Graph(title=metric['title'], dataSource='default', maxDataPoints=1000, legend=G.Legend(show=True, alignAsTable=True, min=True, max=True, avg=True, current=True, total=True, sort='max', sortDesc=True), yAxes=G.single_y_axis()) ref_id = 'A' for texp in metric['expr']: graph.targets.append(G.Target(expr=texp, refId=ref_id)) ref_id = chr(ord(ref_id) + 1) dashboard.rows[-1].panels.append(graph) # Auto-number panels - returns new dashboard dashboard = dashboard.auto_panel_ids() s = io.StringIO() write_dashboard(dashboard, s) print("""{ "dashboard": %s } """ % s.getvalue())
def _row(title): return core.Row(panels=[ core.Graph(title=title, dataSource='prometheus', targets=[ core.Target( expr=title, legendFormat='{{namespace}}', ), ], yAxes=[ core.YAxis(format=core.NO_FORMAT), core.YAxis(format=core.SHORT_FORMAT), ]) ])
def test_graph_panel_threshold(): data_source = 'dummy data source' targets = ['dummy_prom_query'] title = 'dummy title' thresholds = [ G.GraphThreshold(20.0), G.GraphThreshold(40.2, colorMode="ok") ] graph = G.Graph(data_source, targets, title, thresholds=thresholds) data = graph.to_json_data() assert data['targets'] == targets assert data['datasource'] == data_source assert data['title'] == title assert 'alert' not in data assert data['thresholds'] == thresholds
def test_serialization_cloudwatch_metrics_target(): """Serializing a graph doesn't explode.""" graph = G.Graph( title="Lambda Duration", dataSource="Cloudwatch data source", targets=[ C.CloudwatchMetricsTarget(), ], id=1, yAxes=G.YAxes( G.YAxis(format=G.SHORT_FORMAT, label="ms"), G.YAxis(format=G.SHORT_FORMAT), ), ) stream = StringIO() _gen.write_dashboard(graph, stream) assert stream.getvalue() != ''
def test_serialization_humio_metrics_target(): """Serializing a graph doesn't explode.""" graph = G.Graph( title="Humio Logs", dataSource="Humio data source", targets=[ H.HumioTarget(), ], id=1, yAxes=G.YAxes( G.YAxis(format=G.SHORT_FORMAT, label="ms"), G.YAxis(format=G.SHORT_FORMAT), ), ) stream = StringIO() _gen.write_dashboard(graph, stream) assert stream.getvalue() != ''
def test_graph_panel_alert(): data_source = 'dummy data source' targets = ['dummy_prom_query'] title = 'dummy title' alert = [ G.AlertCondition(G.Target(), G.Evaluator('a', 'b'), G.TimeRange('5', '6'), 'd', 'e') ] thresholds = [ G.GraphThreshold(20.0), G.GraphThreshold(40.2, colorMode="ok") ] graph = G.Graph(data_source, targets, title, thresholds=thresholds, alert=alert) data = graph.to_json_data() assert data['targets'] == targets assert data['datasource'] == data_source assert data['title'] == title assert data['alert'] == alert assert data['thresholds'] == []
def cpu_usage(datasource, intervals): return G.Graph( title="CPU usage", dataSource=datasource, xAxis=X_TIME, yAxes=[ G.YAxis( format="percent", label="Average", min=0, max=100, ), G.YAxis( format="percent", label="Average", ), ], targets=list( G.Target( # CPU usage (as a percentage of maximum possible) averaged # over a period is given as 100 times the sum (over all # containers) of the rate of increase (in seconds) divided by # the maximum possible increase (1 second per CPU). # # The sums are taken from recording rules because recomputing # them for every point on the graph for every graph request # becomes prohitively expensive. Only a few specific rates # are "recorded" and the ``interval`` parameter must match one # of those. :( # # See prometheus.yaml for the recording rules. expr=""" 100 * cpu:container_usage_seconds:rate{} / cores:machine_cpu:total """.format(interval), legendFormat="CPU Usage ({} avg)".format(interval), refId=refId(n), ) for n, interval in enumerate(intervals), ), )
def Graph( id, title, targets, dashLength=None, dashes=False, spaceLength=None, xAxis=None, yAxes=None, nullPointMode='connected', ): def merge_target(target): return {**{ 'intervalFactor': 2, 'legendFormat': '', 'refId': 'A', 'step': 600, }, **target} targets = [merge_target(t) for t in targets] assert isinstance(yAxes, YAxes) return core.Graph( id=id, title=title, dashLength=dashLength, dashes=dashes, spaceLength=spaceLength, targets=targets, xAxis=xAxis, yAxes=yAxes, dataSource='prometheus', nullPointMode=nullPointMode, editable=False, )
def test_panel_extra_json(): data_source = 'dummy data source' targets = ['dummy_prom_query'] title = 'dummy title' extraJson = { 'fillGradient': 6, 'yaxis': {'align': True}, 'legend': {'avg': True}, } graph = G.Graph(data_source, targets, title, extraJson=extraJson) data = graph.to_json_data() assert data['targets'] == targets assert data['datasource'] == data_source assert data['title'] == title assert 'alert' not in data assert data['fillGradient'] == 6 assert data['yaxis']['align'] is True # Nested non-dict object should also be deep-updated assert data['legend']['max'] is False assert data['legend']['avg'] is True
def test_serialization(): """Serializing a graph doesn't explode.""" graph = G.Graph( title="CPU Usage by Namespace (rate[5m])", dataSource="My data source", targets=[ G.Target( expr='namespace:container_cpu_usage_seconds_total:sum_rate', legendFormat='{{namespace}}', refId='A', ), ], id=1, yAxes=[ G.YAxis(format=G.SHORT_FORMAT, label="CPU seconds / second"), G.YAxis(format=G.SHORT_FORMAT), ], ) stream = StringIO() _gen.write_dashboard(graph, stream) assert stream.getvalue() != ''
def test_alert_thresholds(): some_target_for_alert = G.GraphiteTarget(refId="A", target="foo.bar") graph = G.Graph(title="Graph with alert", targets=[some_target_for_alert], alert=G.Alert(name="alert name", message="alert message", alertConditions=[ G.AlertCondition( some_target_for_alert, timeRange=G.TimeRange("5m", "now"), evaluator=G.GreaterThan(10), reducerType=G.RTYPE_MAX, operator=G.OP_AND) ])) stream = StringIO() _gen.write_dashboard(graph, stream) graph_json = json.loads(stream.getvalue()) print(graph_json.keys()) #threre is a threshold assert graph_json['thresholds'][0] != None
def test_auto_refids(): """ auto_ref_ids() provides refIds for all targets without refIds already set. """ dashboard = G.Dashboard( title="Test dashboard", rows=[ G.Row(panels=[ G.Graph( title="CPU Usage by Namespace (rate[5m])", targets=[G.Target(expr="metric %d" % i) for i in range(53)], ).auto_ref_ids() ]), ], ) assert dashboard.rows[0].panels[0].targets[0].refId == 'A' assert dashboard.rows[0].panels[0].targets[25].refId == 'Z' assert dashboard.rows[0].panels[0].targets[26].refId == 'AA' assert dashboard.rows[0].panels[0].targets[51].refId == 'AZ' assert dashboard.rows[0].panels[0].targets[52].refId == 'BA'
def PromGraph(title, expressions, **kwargs): """Create a graph that renders Prometheus data. :param title: The title of the graph. :param expressions: List of tuples of (legend, expr), where 'expr' is a Prometheus expression. :param kwargs: Passed on to Graph. """ letters = string.ascii_uppercase expressions = list(expressions) if len(expressions) > len(letters): raise ValueError( 'Too many expressions. Can support at most {}, but got {}'.format( len(letters), len(expressions))) targets = [ G.Target(expr, legend, refId=refId) for ((legend, expr), refId) in zip(expressions, letters) ] return G.Graph(title=title, dataSource=PROMETHEUS, targets=targets, **kwargs)
def test_serialization_zabbix_target(): """Serializing a graph doesn't explode.""" graph = G.Graph( title="CPU Usage", dataSource="Zabbix data source", targets=[ Z.zabbixMetricTarget(group="Zabbix Group", host="Zabbix Host", application="CPU", item="/CPU (load)/", functions=[ Z.ZabbixSetAliasFunction("View alias"), ]), ], id=1, yAxes=[ G.YAxis(format=G.SHORT_FORMAT, label="CPU seconds / second"), G.YAxis(format=G.SHORT_FORMAT), ], ) stream = StringIO() _gen.write_dashboard(graph, stream) assert stream.getvalue() != ''
def network_usage(datasource): return G.Graph( title="Network Usage", dataSource=datasource, xAxis=X_TIME, yAxes=[ G.YAxis( # 2^20 bytes / second format="MBs", label="Transferred", ), G.YAxis( show=False, ), ], targets=[ G.Target( # Get the rate of data received on the public interface (eth0) # for each entire node (id="/") over the last minute. expr=""" receive:container_network_bytes:rate1m / 2 ^ 20 """, legendFormat="receive", refId="A", ), G.Target( # And rate of data sent. expr=""" transmit:container_network_bytes:rate1m / 2 ^ 20 """, legendFormat="transmit", refId="B", ), ], )
def test_serialization_opentsdb_target(): """Serializing a graph doesn't explode.""" graph = G.Graph( title="CPU Usage", dataSource="OpenTSDB data source", targets=[ O.OpenTSDBTarget(metric='cpu', alias='$tag_instance', filters=[ O.OpenTSDBFilter(value='*', tag='instance', type='wildcard', groupBy=True), ]), ], id=1, yAxes=[ G.YAxis(format=G.SHORT_FORMAT, label="CPU seconds / second"), G.YAxis(format=G.SHORT_FORMAT), ], ) stream = StringIO() _gen.write_dashboard(graph, stream) assert stream.getvalue() != ''