def test_kpss(): data = Data("cpu.utilization", filter=Filter('app', 'test-app'))\ .kpss(over='1h')\ .publish(label = 'a') assert data.call_stack[0].name == 'kpss' # mode should default to level assert data.call_stack[0].args == [ KWArg("over", '1h'), KWArg("mode", 'level') ] data = Data("cpu.utilization", filter=Filter('app', 'test-app'))\ .kpss(over='2m', mode='trend')\ .publish(label = 'a') # should allow trend assert data.call_stack[0].args == [ KWArg("over", '2m'), KWArg("mode", 'trend') ] try: Data("cpu.utilization", filter=Filter('app', 'test-app'))\ .kpss(over='2m', mode='tr3nd') assert False except ValueError as ve: assert str(ve) == 'kpss mode must be level|trend'
def test_integrate(): data = Data("cpu.utilization", filter=Filter('app', 'test-app'))\ .integrate()\ .publish(label = 'a') assert data.call_stack[0].name == 'integrate' assert data.call_stack[0].args == []
def test_fill(): data = Data("cpu.utilization", filter=Filter('app', 'test-app'))\ .fill(value=42, duration="1m")\ .publish(label = 'a') assert data.call_stack[0].name =='fill' assert data.call_stack[0].args == [KWArg("value", 42), KWArg("duration", '1m')]
def test_count_percentage_by_methods(): # TODO consider making this test use dynamic fn calls to test all stream # methods with the same signature. data = Data('cpu.utilization', filter=Filter('app', 'test-app'))\ .top(count=3, percentage=22.3, by=["env", "datacenter"])\ .bottom(count=4, percentage=22.4, by=["env", "datacenter"])\ .publish(label='A') assert data.call_stack[0].args == [KWArg("count", 3), KWArg("percentage", 22.3), KWArg("by", ["env", "datacenter"])] assert data.call_stack[1].args == [KWArg("count", 4), KWArg("percentage", 22.4), KWArg("by", ["env", "datacenter"])]
def __program__(self, app, env): app_filter = And(Filter('app', 'my-app'), Filter('env', 'prod')) return Program(Data('cpu.user', filter=app_filter).mean().publish('A'))
#!/usr/bin/env python """Examples for the `signal_analog.charts` module.""" from signal_analog.charts \ import TimeSeriesChart, PlotType, PublishLabelOptions, PaletteColor from signal_analog.flow import Data, Filter, Program from signal_analog.combinators import And """ Example 1: single-use chart This is useful when you just want to create a chart and aren't worried about re-usability. """ # Look at the mean of the cpu.user metric for my-app in the prod environment app_filter = And(Filter('app', 'my-app'), Filter('env', 'prod')) program = Program(Data('cpu.user', filter=app_filter).mean().publish('A')) chart = TimeSeriesChart()\ .with_name('CPU Used %')\ .with_description('% CPU used by user')\ .stack_chart(True)\ .with_default_plot_type(PlotType.area_chart)\ .with_program(program) """ Example 2: make a re-usable chart (or template) This is useful when you want your chart to be broadly applicable/used by others """
def test_find_label_published(): data = Data('cpu.utilization', filter=Filter('app', 'test-app'))\ .publish(label='A') program = Program(data) assert program.find_label('A') == data
def test_find_label_unpublished(): data = Data('cpu.utilization', filter=Filter('app', 'test-app')) program = Program(data) assert program.find_label('A') is None
def test_program_init_valid_statements(): data = Data('cpu.utilization', filter=Filter('app', 'test-app')) program = Program(data) assert program.statements[0] == data
def create_kinesis_charts(stream_name, description): """ Create Kinesis charts Left chart shows incoming records and outgoing records. Right chart shows errors, throttles, and iterator age. """ charts = [] sum_filter = And(Filter("StreamName", stream_name), Filter("namespace", "AWS/Kinesis"), Filter("stat", "sum")) charts.append( TimeSeriesChart() \ .with_name("Kinesis Stream " + stream_name) \ .with_description(description) .with_default_plot_type(PlotType.column_chart) \ .with_chart_legend_options("sf_metric", show_legend=True) .with_publish_label_options( PublishLabelOptions( label='IncomingRecords', palette_index=PaletteColor.green ), PublishLabelOptions( label='GetRecords.Records', palette_index=PaletteColor.light_green ) ).with_axes([AxisOption(label="Count")]) .with_program( Program( Plot( assigned_name="A", signal_name="IncomingRecords", filter=sum_filter, rollup=RollupType.sum, fx=[Sum(by=["aws_account_id", "StreamName"])], label="IncomingRecords"), Plot( assigned_name="B", signal_name="GetRecords.Records", filter=sum_filter, rollup=RollupType.sum, fx=[Sum(by=["aws_account_id", "StreamName"])], label="GetRecords.Records") ) ) ) charts.append( TimeSeriesChart() \ .with_name("Kinesis Stream " + stream_name) \ .with_description(description) .with_default_plot_type(PlotType.column_chart) \ .with_chart_legend_options("sf_metric", show_legend=True) .with_publish_label_options( PublishLabelOptions( label='ReadThroughputExceeded', palette_index=PaletteColor.rust, y_axis=0 ), PublishLabelOptions( label='WriteThroughputExceeded', palette_index=PaletteColor.tangerine, y_axis=0 ), PublishLabelOptions( label='GetRecords.IteratorAge', palette_index=PaletteColor.sunflower, value_unit='Millisecond', plot_type=PlotType.area_chart, y_axis=1 )).with_axes([AxisOption(label="Count"), AxisOption(label="Age")]) .with_program( Program( Plot( assigned_name="A", signal_name="ReadProvisionedThroughputExceeded", filter=sum_filter, rollup=RollupType.sum, fx=[Sum(by=["aws_account_id", "StreamName"])], label="ReadThroughputExceeded"), Plot( assigned_name="B", signal_name="WriteProvisionedThroughputExceeded", filter=sum_filter, rollup=RollupType.sum, fx=[Sum(by=["aws_account_id", "StreamName"])], label="WriteThroughputExceeded"), Plot( assigned_name="C", signal_name="GetRecords.IteratorAgeMilliseconds", filter=And( Filter("StreamName", stream_name), Filter("namespace", "AWS/Kinesis"), Filter("stat", "upper") ), rollup=RollupType.max, # max rollup is used here so you can still see spikes over longer windows fx=[Sum(by=["aws_account_id", "StreamName"])], label="GetRecords.IteratorAge") ) ) ) return charts
#!/usr/bin/env python3 from signal_analog.flow import Data, Filter from signal_analog.charts import TimeSeriesChart from signal_analog.dashboards import Dashboard, DashboardGroup from signal_analog.combinators import And """ Creating a new Dashboard Group with Dashboards """ filters = And(Filter('host', 'learn-signalfx')) program = Data('cpu.utilization', filter=filters).publish() cpu_chart = TimeSeriesChart().with_name('CPU').with_program(program) program = Data('postgres_query_time', rollup='rate', filter=filters).publish() query_time = TimeSeriesChart().with_name('Query Time').with_program(program) program = Data('memory.utilization', filter=filters).publish() memory_chart = TimeSeriesChart().with_name('Memory').with_program(program) dashboard1 = Dashboard().with_name('Dashboard 1').with_charts( query_time, cpu_chart, memory_chart) dashboard_group = DashboardGroup() \ .with_name('Learn Signal Analog') \ .with_dashboards(dashboard1) if __name__ == '__main__': from signal_analog.cli import CliBuilder cli = CliBuilder().with_resources(dashboard_group)\
def create_dynamodb_with_stream_charts(table_name, description): """ Create charts for DynamoDB Table that has a Stream enabled. First left chart shows read/write capacity consumed, plus latency. First right chart shows Errors and Throttling on the table. Second left chart shows records returned on the Stream. Second right chart shows errors on the Stream. """ charts = [] charts.extend(create_dynamodb_charts(table_name, description)) charts.append( TimeSeriesChart() \ .with_name("Dynamo Stream " + table_name) \ .with_description(description) .with_default_plot_type(PlotType.column_chart) \ .with_chart_legend_options("sf_metric", show_legend=True) .with_publish_label_options( PublishLabelOptions( label='ReturnedRecordsCount', palette_index=PaletteColor.green, ) ).with_program( Program( Plot( assigned_name="A", signal_name="ReturnedRecordsCount", filter=And( Filter("TableName", table_name), Filter("stat", "sum") ), rollup=RollupType.sum, fx=[Sum(by=["TableName", "aws_account_id"])], label="ReturnedRecordsCount") ) ) ) charts.append( # This chart is usually empty because these kinds of Stream errors don't seem to happen much TimeSeriesChart() \ .with_name("Dynamo Stream " + table_name) \ .with_description(description) .with_default_plot_type(PlotType.column_chart) \ .with_chart_legend_options("sf_metric", show_legend=True) .with_publish_label_options( PublishLabelOptions( label='SystemErrors', palette_index=PaletteColor.rose, y_axis=1 ) ).with_program( Plot( assigned_name="A", signal_name="SystemErrors", filter=And( Filter("TableName", table_name), Filter("stat", "sum"), Filter("Operation", "GetRecords") # Streams only have 1 operation ), rollup=RollupType.sum, fx=[Sum(by=["TableName", "aws_account_id"])], label="SystemErrors") ) ) return charts
def create_dynamodb_charts(table_name, description): """ Create charts for DynamoDB Table. Left chart shows read/write capacity consumed, plus latency. Right chart shows Errors and Throttling. """ charts = [] charts.append( TimeSeriesChart() \ .with_name("Dynamo Table " + table_name) \ .with_description(description) .with_default_plot_type(PlotType.column_chart) \ .with_chart_legend_options("sf_metric", show_legend=True) .with_publish_label_options( PublishLabelOptions( label='ConsumedReadCapacity', palette_index=PaletteColor.green ), PublishLabelOptions( label='ConsumedWriteCapacity', palette_index=PaletteColor.light_green ), PublishLabelOptions( label='Latency', palette_index=PaletteColor.gray, plot_type=PlotType.line_chart, value_unit='Millisecond', y_axis=1 ) ).with_axes([AxisOption(label="Units", min=0), AxisOption(label="Latency", min=0)]) .with_program( Program( Plot( assigned_name="A", signal_name="ConsumedReadCapacityUnits", filter=And( Filter("TableName", table_name), Filter("stat", "sum") ), rollup=RollupType.sum, fx=[Sum(by=["TableName", "aws_account_id"])], label="ConsumedReadCapacity" ), Plot( assigned_name="B", signal_name="ConsumedWriteCapacityUnits", filter=And( Filter("TableName", table_name), Filter("stat", "sum") ), rollup=RollupType.sum, fx=[Sum(by=["TableName", "aws_account_id"])], label="ConsumedWriteCapacity" ), Plot( assigned_name="C", signal_name="SuccessfulRequestLatency", filter=And( Filter("TableName", table_name), Filter("stat", "mean") ), rollup=RollupType.max, fx=[Mean(by=["TableName", "aws_account_id"])], label="Latency" ) ) ) ) charts.append( TimeSeriesChart() \ .with_name("Dynamo Table " + table_name) \ .with_description(description) .with_default_plot_type(PlotType.column_chart) \ .with_chart_legend_options("sf_metric", show_legend=True) .with_publish_label_options( PublishLabelOptions( label='ThrottledRequests', palette_index=PaletteColor.rust ), PublishLabelOptions( label='ReadThrottle', palette_index=PaletteColor.tangerine ), PublishLabelOptions( label='WriteThrottle', palette_index=PaletteColor.sunflower ), PublishLabelOptions( label='SystemErrors', palette_index=PaletteColor.rose, y_axis=1 ) ).with_program( Program( Plot( assigned_name="A", signal_name="ThrottledRequests", filter=And( Filter("TableName", table_name), Filter("stat", "sum") ), rollup=RollupType.sum, fx=[Sum(by=["TableName", "aws_account_id"])], label="ThrottledRequests" ), Plot( assigned_name="B", signal_name="ReadThrottleEvents", filter=And( Filter("TableName", table_name), Filter("stat", "sum") ), rollup=RollupType.sum, fx=[Sum(by=["TableName", "aws_account_id"])], label="ReadThrottle" ), Plot( assigned_name="C", signal_name="WriteThrottleEvents", filter=And( Filter("TableName", table_name), Filter("stat", "sum") ), rollup=RollupType.sum, fx=[Sum(by=["TableName", "aws_account_id"])], label="WriteThrottle" ), Plot( assigned_name="D", signal_name="SystemErrors", filter=And( Filter("TableName", table_name), Filter("stat", "sum"), Not(Filter("Operation", "GetRecords")) # GetRecords is a Dynamo Stream operation ), rollup=RollupType.sum, fx=[Sum(by=["TableName", "aws_account_id"])], label="SystemErrors") ) ) ) return charts
def create_lambda_charts(function_name, description): """ Create Lambda charts Left chart shows activity and latency. Right chart shows errors, throttles, and iterator age. """ charts = [] charts.append( TimeSeriesChart() \ .with_name("Lambda " + function_name + " Invocations") \ .with_description(description) .with_default_plot_type(PlotType.column_chart) \ .with_chart_legend_options("sf_metric", show_legend=True) .with_publish_label_options( PublishLabelOptions( label='Invocations', palette_index=PaletteColor.green ), PublishLabelOptions( label='Duration', palette_index=PaletteColor.gray, y_axis=1, plot_type=PlotType.line_chart, value_unit='Millisecond' ) ) .with_axes([AxisOption(label="Count", min=0), AxisOption(label="Latency", min=0)]) .with_program( Program( Plot( assigned_name="A", signal_name="Invocations", filter=And( Filter("FunctionName", function_name), Filter("namespace", "AWS/Lambda"), Filter("stat", "sum") ), rollup=RollupType.sum, fx=[Sum(by=["aws_account_id", "FunctionName"])], label="Invocations"), Plot( assigned_name="B", signal_name="Duration", filter=And( Filter("FunctionName", function_name), Filter("namespace", "AWS/Lambda"), Filter("stat", "mean") ), rollup=RollupType.max, # max rollup is used here so you can still see spikes over longer windows fx=[Sum(by=["aws_account_id", "FunctionName"])], label="Duration") ) ) ) charts.append( TimeSeriesChart() \ .with_name("Lambda " + function_name) \ .with_description(description) .with_default_plot_type(PlotType.column_chart) \ .with_chart_legend_options("sf_metric", show_legend=True) .with_publish_label_options( PublishLabelOptions( label='Errors', palette_index=PaletteColor.rust ), PublishLabelOptions( label='Throttles', palette_index=PaletteColor.sunflower ), PublishLabelOptions( label='IteratorAge', value_unit='Millisecond', plot_type=PlotType.area_chart, palette_index=PaletteColor.slate_blue, y_axis=1 ) ).with_axes([AxisOption(label="Count", min=0), AxisOption(label="Age", min=0, max=(1000 * 60 * 60 * 36))]) .with_program( Program( Plot( assigned_name="B", signal_name="Errors", filter=And( Filter("FunctionName", function_name), Filter("namespace", "AWS/Lambda"), Filter("stat", "sum") ), rollup=RollupType.sum, fx=[Sum(by=["aws_account_id", "FunctionName"])], label="Errors"), Plot( assigned_name="C", signal_name="Throttles", filter=And( Filter("FunctionName", function_name), Filter("namespace", "AWS/Lambda"), Filter("stat", "sum") ), rollup=RollupType.sum, fx=[Sum(by=["aws_account_id", "FunctionName"])], label="Throttles"), Plot( assigned_name="D", signal_name="IteratorAge", filter=And( Filter("FunctionName", function_name), Filter("Resource", function_name), Filter("namespace", "AWS/Lambda"), Filter('stat', 'upper') ), rollup=RollupType.max, # max rollup is used here so you can still see spikes over longer windows # Max here is just to get rid of bad extra metric in Sfx fx=[Max(by=["aws_account_id", "FunctionName"])], label="IteratorAge") ) ) ) return charts
def create_sqs_charts(queue_name, description): """ Create SQS charts Left chart shows messages sent/deleted and number visible. Right chart shows deadletter queue and age of oldest message. """ charts = [] filter = And(Filter("QueueName", queue_name), Filter("namespace", "AWS/SQS"), Filter("stat", "sum")) charts.append( TimeSeriesChart() \ .with_name("SQS " + queue_name) \ .with_description(description) .with_default_plot_type(PlotType.column_chart) \ .with_chart_legend_options("sf_metric", show_legend=True) .with_publish_label_options( PublishLabelOptions( label='NumberOfMessagesSent', palette_index=PaletteColor.green ), PublishLabelOptions( label='NumberOfMessagesDeleted', palette_index=PaletteColor.light_green ), PublishLabelOptions( label='ApproximateNumberOfMessagesVisible', palette_index=PaletteColor.sky_blue, plot_type=PlotType.line_chart ) ).with_axes([AxisOption(label="Count", min=0)]) .with_program( Program( Plot( assigned_name="A", signal_name="NumberOfMessagesSent", filter=filter, rollup=RollupType.sum, fx=[Sum(by=["aws_account_id", "QueueName"])], label="NumberOfMessagesSent"), Plot( assigned_name="B", signal_name="NumberOfMessagesDeleted", filter=filter, rollup=RollupType.sum, fx=[Sum(by=["aws_account_id", "QueueName"])], label="NumberOfMessagesDeleted"), Plot( assigned_name="C", signal_name="ApproximateNumberOfMessagesVisible", filter=filter, rollup=RollupType.max, fx=[Max(by=["aws_account_id", "QueueName"])], label="ApproximateNumberOfMessagesVisible") ) ) ) charts.append( TimeSeriesChart() \ .with_name("SQS " + queue_name) \ .with_description(description) .with_default_plot_type(PlotType.column_chart) \ .with_chart_legend_options("sf_metric", show_legend=True) .with_publish_label_options( PublishLabelOptions( label='DeadLetterMessages', palette_index=PaletteColor.mulberry, y_axis=0 ), PublishLabelOptions( label='ApproximateAgeOfOldestMessage', palette_index=PaletteColor.sunflower, value_unit='Second', plot_type=PlotType.area_chart, y_axis=1 ) ).with_axes([AxisOption(label="Count", min=0), AxisOption(label="Age", min=0)]) .with_program( Program( Plot( assigned_name="A", signal_name="ApproximateNumberOfMessagesVisible", filter=And( # assumes naming convention for DL queues Filter("QueueName", queue_name + "-deadletter", queue_name + "-dlq"), Filter("namespace", "AWS/SQS"), Filter("stat", "upper") ), rollup=RollupType.max, fx=[Sum(by=["aws_account_id", "QueueName"])], label="DeadLetterMessages"), Plot( assigned_name="B", signal_name="ApproximateAgeOfOldestMessage", filter=And( Filter("QueueName", queue_name), Filter("namespace", "AWS/SQS"), Filter("stat", "upper") ), rollup=RollupType.max, # max rollup is used here so you can still see spikes over longer windows fx=[Max(by=["aws_account_id", "QueueName"])], label="ApproximateAgeOfOldestMessage") ) ) ) return charts
#!/usr/bin/env python """Examples for the `signal_analog.filters` module.""" from signal_analog.flow import Data, Filter from signal_analog.charts import TimeSeriesChart from signal_analog.dashboards import Dashboard from signal_analog.combinators import And from signal_analog.filters import DashboardFilters, FilterVariable, FilterSource, FilterTime """ Example 1: Creating a new Dashboard with Filter Variable This creates a new dashboard for the app specified and with the charts provided and with the Dashboard Filter provided """ filters = And(Filter('app', 'my-app'), Filter('env', 'test')) program = Data('cpu.utilization', filter=filters).publish() chart = TimeSeriesChart().with_name('Chart_Name').with_program(program) app_var = FilterVariable().with_alias('application name') \ .with_property('app') \ .with_is_required(True) \ .with_value('my-app') app_filter = DashboardFilters() \ .with_variables(app_var) dashboard_with_single_filter_variable = Dashboard()\ .with_name('Dashboard Name')\ .with_charts(chart)\ .with_filters(app_filter) """ Example 2: Creating a new Dashboard with multiple filters
#!/usr/bin/env python """Examples of how to use the `signal_analog.flow` module. Some basic understanding of SignalFx is assumed. """ from signal_analog.flow import Data, Filter, Program # A program is a convenient wrapper around SignalFlow statements with a few # utilities like `find_label` that returns a SignalFlow statement based on it's # label. program = Program() # A timeseries representing the 'cpu.utilization' metric that is filtered # down to just the 'shoeadmin' application. Also analyze the mean over the # previous minute and compare it to the data from last week. data = Data('cpu.utilization', filter=Filter('app', 'shoeadmin'))\ .mean(over='1m')\ .timeshift('1w')\ .publish('A') program.add_statements(data) print('{0}\n\t{1}'.format(program, str(program) == str(program.find_label('A'))))
#!/usr/bin/env python """Examples for the `signal_analog.charts` module.""" from signal_analog.charts \ import TimeSeriesChart, PlotType, PublishLabelOptions, PaletteColor, AxisOption from signal_analog.flow import Data, Filter, Program, Plot, RollupType, Sum from signal_analog.combinators import And """ Example 1: single-use chart This is useful when you just want to create a chart and aren't worried about re-usability. """ # Look at the mean of the cpu.user metric for my-app in the prod environment app_filter = And(Filter('app', 'my-app'), Filter('env', 'prod')) program = Program(Data('cpu.user', filter=app_filter).mean().publish('A')) chart = TimeSeriesChart()\ .with_name('CPU Used %')\ .with_description('% CPU used by user')\ .stack_chart(True)\ .with_default_plot_type(PlotType.area_chart)\ .with_program(program) """ Example 2: make a re-usable chart (or template) This is useful when you want your chart to be broadly applicable/used by others """