def mkdashboard(title: str, datasources: Optional[List[str]] = None): """ Build dashboard with multiple panels, each with a different data source. """ # datasource = grafanalib.core.DataSourceInput(name="foo", label="foo", pluginId="foo", pluginName="foo") datasources = datasources or [] # Build dashboard object model. panels = [] for datasource in datasources: panel = grafanalib.core.Panel(dataSource=datasource, gridPos={ "h": 1, "w": 24, "x": 0, "y": 0 }) panels.append(panel.panel_json(overrides={})) dashboard = grafanalib.core.Dashboard(title=title, panels=panels) # Render dashboard to JSON. dashboard_json = StringIO() write_dashboard(dashboard, dashboard_json) dashboard_json.seek(0) dashboard = json.loads(dashboard_json.read()) return dashboard
def test_serialization_azure_logs_target(): """Serializing a graph doesn't explode.""" logs_query = """AzureMetrics | where TimeGenerated > ago(30d) | extend tail_latency = Maximum / Average | where MetricName == "Http5xx" or (MetricName == "HttpResponseTime" and Average >= 3) or (MetricName == "HttpResponseTime" and tail_latency >= 10 and Average >= 0.5) | summarize dcount(TimeGenerated) by Resource | order by dcount_TimeGenerated""" graph = G.GaugePanel( title="Test Logs", dataSource="default", targets=[ A.AzureLogsTarget( query=logs_query, resource="/subscriptions/3a680d1a-9310-4667-9e6a-9fcd2ecddd86", subscription="3a680d1a-9310-4667-9e6a-9fcd2ecddd86", refId="Bad Minutes", ), ], ) stream = StringIO() _gen.write_dashboard(graph, stream) assert stream.getvalue() != ""
def test_serialization_cloudwatch_logs_insights_target(): """Serializing a graph doesn't explode.""" graph = G.Logs(title="Lambda Duration", dataSource="Cloudwatch data source", targets=[ C.CloudwatchLogsInsightsTarget(), ], id=1, wrapLogMessages=True) stream = StringIO() _gen.write_dashboard(graph, stream) assert stream.getvalue() != ''
def test_serialization_zabbix_trigger_panel(): """Serializing a graph doesn't explode.""" graph = Z.ZabbixTriggersPanel(id=1, title="Zabbix Triggers", dataSource="Zabbix data source", triggers=Z.ZabbixTrigger( group='Zabbix Group', application="", trigger="/trigger.regexp/", host="/zabbix.host/")) stream = StringIO() _gen.write_dashboard(graph, stream) assert stream.getvalue() != ''
def export(self, output_file=''): from baskerville.models.metrics.registry import metrics_registry panels = [] for i, (metric_name, value) in enumerate(metrics_registry.registry.items()): if i % 4 == 0 or i == len(metrics_registry.registry): self.rows.append(Row(panels=panels)) panels = [] if 'timer' in metric_name: g = Gauge() g.maxValue = 0 g.maxValue = 100 g.show = True g.thresholdMarkers = True panels.append( SingleStat( title=metric_name, dataSource=self.ds, gauge=g, targets=[ Target( expr= f'({metric_name}_sum / {metric_name}_count)', target=metric_name, refId='A', metric=metric_name, datasource=self.ds, ) ])) else: panels.append( Graph(title=metric_name, dataSource=self.ds, targets=[ Target(expr=f'{metric_name}_total' if 'total' in metric_name else metric_name, target=metric_name, refId='A', metric=metric_name, datasource=self.ds) ])) for panel in panels: self.rows.append(Row(panels=[panel])) self.dashboard = Dashboard(title=self.dash_title, rows=self.rows).auto_panel_ids() with open(output_file, 'w') as f: write_dashboard(self.dashboard, f)
def run(self): templateList = [ G.Template(default="", dataSource="default", name="serverid", label="ServerID", query="label_values(serverid)") ] dashboard = G.Dashboard(title=self.options.title, templating=G.Templating(list=templateList)) # Simple table processing - could be enhanced to use GridPos etc. for metric in metrics: if 'section' in metric: dashboard.rows.append( G.Row(title=metric['section'], showTitle=True)) continue if 'row' in metric: dashboard.rows.append(G.Row(title='', showTitle=False)) continue graph = G.Graph(title=metric['title'], dataSource='default', maxDataPoints=1000, legend=G.Legend(show=True, alignAsTable=True, min=True, max=True, avg=True, current=True, total=True, sort='max', sortDesc=True), yAxes=G.single_y_axis()) ref_id = 'A' for texp in metric['expr']: graph.targets.append(G.Target(expr=texp, refId=ref_id)) ref_id = chr(ord(ref_id) + 1) dashboard.rows[-1].panels.append(graph) # Auto-number panels - returns new dashboard dashboard = dashboard.auto_panel_ids() s = io.StringIO() write_dashboard(dashboard, s) print("""{ "dashboard": %s } """ % s.getvalue())
def test_serialization_cloudwatch_metrics_target(): """Serializing a graph doesn't explode.""" graph = G.Graph( title="Lambda Duration", dataSource="Cloudwatch data source", targets=[ C.CloudwatchMetricsTarget(), ], id=1, yAxes=G.YAxes( G.YAxis(format=G.SHORT_FORMAT, label="ms"), G.YAxis(format=G.SHORT_FORMAT), ), ) stream = StringIO() _gen.write_dashboard(graph, stream) assert stream.getvalue() != ''
def test_serialization_humio_metrics_target(): """Serializing a graph doesn't explode.""" graph = G.Graph( title="Humio Logs", dataSource="Humio data source", targets=[ H.HumioTarget(), ], id=1, yAxes=G.YAxes( G.YAxis(format=G.SHORT_FORMAT, label="ms"), G.YAxis(format=G.SHORT_FORMAT), ), ) stream = StringIO() _gen.write_dashboard(graph, stream) assert stream.getvalue() != ''
def test_serialization_azure_graph_target(): """Serializing a graph doesn't explode.""" graph_query = """Resources | project name, type, location | order by name asc""" graph = G.GaugePanel( title="Test Logs", dataSource="default", targets=[ A.AzureLogsTarget( query=graph_query, subscription="3a680d1a-9310-4667-9e6a-9fcd2ecddd86", refId="Resources", ), ], ) stream = StringIO() _gen.write_dashboard(graph, stream) assert stream.getvalue() != ""
def test_serialization_azure_metrics_target(): """Serializing a graph doesn't explode.""" graph = G.TimeSeries( title="Test Azure Monitor", dataSource="default", targets=[ A.AzureMonitorMetricsTarget( aggregation="Total", metricDefinition="Microsoft.Web/sites", metricName="Requests", metricNamespace="Microsoft.Web/sites", resourceGroup="test-grafana", resourceName="test-grafana", subscription="3a680d1a-9310-4667-9e6a-9fcd2ecddd86", refId="Requests", ), ], ) stream = StringIO() _gen.write_dashboard(graph, stream) assert stream.getvalue() != ""
def test_serialization(): """Serializing a graph doesn't explode.""" graph = G.Graph( title="CPU Usage by Namespace (rate[5m])", dataSource="My data source", targets=[ G.Target( expr='namespace:container_cpu_usage_seconds_total:sum_rate', legendFormat='{{namespace}}', refId='A', ), ], id=1, yAxes=[ G.YAxis(format=G.SHORT_FORMAT, label="CPU seconds / second"), G.YAxis(format=G.SHORT_FORMAT), ], ) stream = StringIO() _gen.write_dashboard(graph, stream) assert stream.getvalue() != ''
def test_alert_thresholds(): some_target_for_alert = G.GraphiteTarget(refId="A", target="foo.bar") graph = G.Graph(title="Graph with alert", targets=[some_target_for_alert], alert=G.Alert(name="alert name", message="alert message", alertConditions=[ G.AlertCondition( some_target_for_alert, timeRange=G.TimeRange("5m", "now"), evaluator=G.GreaterThan(10), reducerType=G.RTYPE_MAX, operator=G.OP_AND) ])) stream = StringIO() _gen.write_dashboard(graph, stream) graph_json = json.loads(stream.getvalue()) print(graph_json.keys()) #threre is a threshold assert graph_json['thresholds'][0] != None
def generate_dashboards(dashboards, output_dir): """ Generate and write dashboard json files into the output_dir. For each key in dashboards dict a new subfolder is created. If output_dir already exists it will be removed and recreated. """ shutil.rmtree( output_dir, ignore_errors=True ) # Remove the output dir, don't raise errors if dir doesn't exist output_path = Path(output_dir) output_path.mkdir( exist_ok=True ) # Directory mounted as a Docker volume can't be deleted so it's fine if it exists for key in dashboards: subdir = output_path / key subdir.mkdir(exist_ok=True) for dashboard in dashboards[key]: output_file = output_path / key / f'{dashboard.title}.json' with open(output_file, 'w') as output: generator.write_dashboard(dashboard, output)
def test_serialization_zabbix_target(): """Serializing a graph doesn't explode.""" graph = G.Graph( title="CPU Usage", dataSource="Zabbix data source", targets=[ Z.zabbixMetricTarget(group="Zabbix Group", host="Zabbix Host", application="CPU", item="/CPU (load)/", functions=[ Z.ZabbixSetAliasFunction("View alias"), ]), ], id=1, yAxes=[ G.YAxis(format=G.SHORT_FORMAT, label="CPU seconds / second"), G.YAxis(format=G.SHORT_FORMAT), ], ) stream = StringIO() _gen.write_dashboard(graph, stream) assert stream.getvalue() != ''
def test_serialization_opentsdb_target(): """Serializing a graph doesn't explode.""" graph = G.Graph( title="CPU Usage", dataSource="OpenTSDB data source", targets=[ O.OpenTSDBTarget(metric='cpu', alias='$tag_instance', filters=[ O.OpenTSDBFilter(value='*', tag='instance', type='wildcard', groupBy=True), ]), ], id=1, yAxes=[ G.YAxis(format=G.SHORT_FORMAT, label="CPU seconds / second"), G.YAxis(format=G.SHORT_FORMAT), ], ) stream = StringIO() _gen.write_dashboard(graph, stream) assert stream.getvalue() != ''
yAxes=[ YAxis(format=BYTES_FORMAT), YAxis(format=SHORT_FORMAT), ], ) row = Row(panels=[panel]) db = Dashboard( title='Autogenerated MySQL Disk Consumption', rows=[row], time=Time('now-6M', 'now'), ) s = StringIO.StringIO() write_dashboard(db, s) dashboard_json = s.getvalue() print dashboard_json payload = { "dashboard": json.loads(dashboard_json), "overwrite": True # "folderId": 0, } r = requests.post(GRAFANA_API_URL, json=payload) if r.ok: print r.content else: print 'error: %s (%s)' % (r.content, r.status_code)
def dashboard_to_json_bytes(dashboard): io = BytesIO() write_dashboard(dashboard, io) dashboard_json_bytes = io.getvalue() return dashboard_json_bytes
def run(self): templateList = [] if self.options.use_sdp: templateList.append( G.Template(default="1", dataSource="default", name="sdpinst", label="SDPInstance", query="label_values(sdpinst)")) if self.options.customer: templateList.append( G.Template(default="1", dataSource="default", name="customer", label="Customer", query="label_values(customer)")) templateList.append( G.Template(default="", dataSource="default", name="serverid", label="ServerID", query="label_values(serverid)")) dashboard = G.Dashboard(title=self.options.title, templating=G.Templating(list=templateList)) for metric in metrics: if 'section' in metric: dashboard.rows.append( G.Row(title=metric['section'], showTitle=True)) continue if 'row' in metric: dashboard.rows.append(G.Row(title='', showTitle=False)) continue if 'type' in metric and metric['type'] == 'gauge': pass # text = G.Text(title=metric['title'], # dataSource='default') # dashboard.rows[-1].panels.append(G.Text) else: yAxis = G.single_y_axis(format="short") if 'yformat' in metric: yAxis = G.single_y_axis(format=metric['yformat']) graph = G.Graph(title=metric['title'], dataSource='default', maxDataPoints=1000, legend=G.Legend(show=True, alignAsTable=True, min=True, max=True, avg=True, current=True, total=True, sort='max', sortDesc=True), yAxes=yAxis) refId = 'A' for targ in metric['target']: texp = targ['expr'] legend = "instance {{instance}}, serverid {{serverid}}" if 'legend' in targ: legend += ' %s' % targ['legend'] # Remove SDP if not self.options.use_sdp: texp = texp.replace('sdpinst="$sdpinst",', '') if self.options.customer: texp = texp.replace('{', '{customer="$customer",') graph.targets.append( G.Target(expr=texp, legendFormat=legend, refId=refId)) refId = chr(ord(refId) + 1) dashboard.rows[-1].panels.append(graph) # Auto-number panels - returns new dashboard dashboard = dashboard.auto_panel_ids() s = io.StringIO() write_dashboard(dashboard, s) print("""{ "dashboard": %s } """ % s.getvalue())