def post_signalfx_event(signalfx_event):
    '''
    sfx = signalfx.SignalFx(ingest_endpoint='https://ingest.{REALM}.signalfx.com')
    with signalfx.SignalFx().ingest('ORG_TOKEN') as sfx:
        sfx.send_event(
            event_type = signalfx_event)
    '''
    # signalfx trail version valid only for 10 days
    ORG_TOKEN='2uVk73DtBX4ke2gu4ir23w'
    sfx = signalfx.SignalFx(ingest_endpoint='https://ingest.us1.signalfx.com/v2/event')
    with signalfx.SignalFx().ingest(ORG_TOKEN) as sfx:
        sfx.send_event(event_type = signalfx_event)
    
    return
Exemple #2
0
    def __init__(self, endpoint, frequency=5, thread_count=5):
        """Initialize a new Monitor Manager.

        Args:
            endpoint(`Endpoint`): An `Endpoint` object capable of calling
                the `request()` method.
            thread_count(int): The number of requester threads to spawn
            frequency(int): The frequency to make a new request

        """
        # don't use get() here because we want exception if missing key
        self.sfx_token = os.environ['SIGNALFX_TOKEN']  # bail if missing

        self.frequency = frequency
        self.thread_count = thread_count
        self.endpoint = endpoint
        self.queue_requests = queue.Queue()
        self.queue_results = queue.Queue()
        self.logger = logging.getLogger(__name__)
        self.hostname = socket.gethostname()
        self.sfx_client = signalfx.SignalFx()
        self.sfx_ingest = self.sfx_client.ingest(self.sfx_token)
        self.dimensions = None
        #        self.sfx_ingest.add_dimensions(
        #            {AWS_ID_DIMENSION: get_aws_unique_id()})
        self.should_die = False
        self.request_thread = threading.Thread(target=self.add_request,
                                               args=())
        self.request_thread.daemon = True
        self.request_thread.start()
        for i in range(thread_count):
            t = threading.Thread(target=self.request, args=())
            t.start()
    def run_loop(self):
        with signalfx.SignalFx().signalflow(self.token) as flow:
            c = flow.execute(CURRENT_ENDPOINTS_BY_ENV_AND_OS_PROGRAM)
            tsid = None

            for msg in c.stream():
                if isinstance(msg, messages.DataMessage):
                    # This message comes in once every five minutes and provides the current active endpoints

                    for env in self.endpoint_details:
                        env.update(msg.data)

                    new_count = sum(e.total for e in self.endpoint_details)

                    if new_count > self.current_endpoints:
                        self.trend_direction = TrendDirection.UP
                    else:
                        self.trend_direction = TrendDirection.DOWN
                    self.current_endpoints = new_count

                elif isinstance(msg, messages.MetadataMessage):
                    # This message should be the first that comes in, we use it to get the tsid for our computed data
                    for env in self.endpoint_details:
                        env.save_tsid(msg)
                    tsid = msg.tsid
def into_signalfx(sfx_key, cluster_health, node_stats):
    import signalfx
    sfx = signalfx.SignalFx()
    ingest = sfx.ingest(sfx_key)
    for node in node_stats:
        source_node = node['source_node']
        for s in node_stats_to_collect:
            flattened = flatten_json(node['node_stats'][s])
            for k, v in flattened.items():
                if isinstance(
                        v,
                    (int, float)) and not isinstance(v, types.BooleanType):
                    ingest.send(
                        gauges=[{
                            "metric": 'elasticsearch.node.' + s + '.' + k,
                            "value": v,
                            "dimensions": {
                                'cluster_uuid': node.get('cluster_uuid'),
                                'cluster_name': node.get('cluster_name'),
                                'node_name': source_node.get('name'),
                                'node_host': source_node.get('host'),
                                'node_host': source_node.get('ip'),
                                'node_uuid': source_node.get('uuid'),
                                'cluster_name': source_node.get('uuid'),
                            }
                        }])
    ingest.stop()
    def __init__(self, config, log):
        self.config = config
        self.log = log
        self.host = ""

        import signalfx
        sfx = signalfx.SignalFx(ingest_endpoint=config.ingest_endpoint)
        self.sfx = sfx.ingest(config.api_token)
Exemple #6
0
def execute_sfx_program(api_token,
                        program,
                        start_time,
                        end_time,
                        dimensions=None,
                        resolution=60):
    """ Execute an arbitrary SignalFlow program

    :param api_token: a valid SFX API query token (you can get this from the SignalFX dashboard)
    :param program: a valid signalflow program to execute
    :param start_time: beginning of program execution range, as an Arrow object
    :param end_time: end of program execution range, as an Arrow object
    :param dimensions: list of strings to group the returned timeseries by
    :param resolution: smallest time interval (in seconds) to evaluate the program on
        note: SignalFX has a maximum resolution of 1 minute, and only for the most recent data;
              setting a resolution higher than this (or even 1 minute for older data) will be ignored
    :returns: a list of (timestamp, data_points) tuples, where data_points is a dict of timeseries_name -> value
    """
    with signalfx.SignalFx().signalflow(api_token) as sfx:
        curr_time = start_time
        datapoints = []
        while curr_time < end_time:
            # To prevent overloading SignalFX we grab a maximum of 5 days worth of data at a time
            next_time = min(curr_time.shift(days=5), end_time)
            logger.info(f'Querying SignalFX from {curr_time} to {next_time}')
            raw_data = sfx.execute(
                program,

                # SignalFX operates on millisecond timescales
                start=curr_time.timestamp * 1000,
                stop=next_time.timestamp * 1000,
                resolution=resolution * 1000,
            )

            # We can only call _make_ts_label after all of the entries in the raw_data.stream() have been processed
            data_messages = [
                msg for msg in raw_data.stream()
                if isinstance(msg, DataMessage)
            ]
            new_datapoints = sorted([
                (Arrow.utcfromtimestamp(msg.logical_timestamp_ms / 1000), {
                    _make_ts_label(raw_data, key, dimensions): value
                    for key, value in msg.data.items()
                }) for msg in data_messages
            ])

            # SignalFX sometimes gives us duplicate datapoints at the beginning of one chunk/the start of
            # the next chunk.  This doesn't play nicely with the metrics client so detect and remove those here
            if datapoints and new_datapoints[0][0] == datapoints[-1][0]:
                new_datapoints = new_datapoints[1:]
            datapoints.extend(new_datapoints)

            curr_time = next_time
    return datapoints
Exemple #7
0
def postRequestSignalFX(eventTime, eventName, eventStatus):
    sfToken = str(SIGNALFX_TOKEN).split(":")[1][:-1].strip("\"")
    with signalfx.SignalFx().ingest(sfToken) as sfx:
        try:
            sfx.send_event(event_type='LiveOps_event',
                           dimensions={
                               'name': eventName,
                               'status': eventStatus
                           },
                           timestamp=eventTime)
        finally:
            atexit.register(sfx.stop)
Exemple #8
0
def postRequestSignalFX(eventTime, elasticGroups, status):
    sfToken = str(SIGNALFX_TOKEN).split(":")[1][:-1].strip("\"")
    with signalfx.SignalFx().ingest(sfToken) as sfx:
        try:
            sfx.send_event(event_type='Deployment of:',
                           dimensions={
                               'name': elasticGroups,
                               'status': status
                           },
                           timestamp=eventTime)
        except Exception as postE:
            print(postE)
        finally:
            sfx.stop
Exemple #9
0
        def wrapper_send_metrics(*args, **kwargs):
            access_token = utils.get_access_token()
            global ingest

            ingest_endpoint = utils.get_metrics_url()
            ingest_timeout = float(os.environ.get("SIGNALFX_SEND_TIMEOUT",
                                                  0.3))

            sfx = signalfx.SignalFx(ingest_endpoint=ingest_endpoint)

            ingest = sfx.ingest(access_token, timeout=(1, ingest_timeout))
            context = kwargs.get("context", None)

            global default_dimensions
            default_dimensions = utils.get_default_dimensions(context)
            default_dimensions["metric_source"] = "azure_function_wrapper"

            if extra_dimensions is not None:
                default_dimensions = dict(default_dimensions,
                                          **extra_dimensions)

            start_counters = [{
                "metric": "azure.function.invocations",
                "value": 1
            }]
            send_metric(counters=start_counters)
            end_counters = []
            time_start = datetime.datetime.now()
            try:
                result = func(*args, **kwargs)
                return result
            except Exception:
                end_counters.append({
                    "metric": "azure.function.errors",
                    "value": 1
                })
                raise
            finally:
                time_taken = datetime.datetime.now() - time_start
                send_metric(
                    counters=end_counters,
                    gauges=[{
                        "metric": "azure.function.duration",
                        "value": time_taken.total_seconds() * 1000
                    }],
                )

                # flush everything
                ingest.stop()
Exemple #10
0
def run(token, filter_str):

    flow = signalfx.SignalFx().signalflow(token)
    try:
        computation = flow.execute(PROGRAM % filter_str)
        went_anomalous = False
        for msg in computation.stream():
            if isinstance(msg, signalfx.signalflow.messages.EventMessage):
                state = msg.properties.get('is')
                if state == 'anomalous':
                    went_anomalous = True
                if state == 'ok' and went_anomalous:
                    print('Handed hintoffs cleared')
                    break
    finally:
        flow.close()
def collect_signalfx_metrics(config, time_frame):
    with signalfx.SignalFx().signalflow(config.signalfx_api_key) as flow:
        computation = flow.execute(
            config.signalfx_signalflow_program,
            start=time_frame.from_timestamp,
            stop=time_frame.to_timestamp,
            max_delay=1000,
        )
        for msg in filter(
                lambda x: isinstance(x, signalfx.signalflow.messages.
                                     DataMessage), computation.stream()):
            if msg.data and msg.logical_timestamp_ms > time_frame.from_timestamp:
                yield DataPoint(
                    msg.logical_timestamp_ms / 1000,
                    list(msg.data.values())[0],
                )
def main():
    parser = argparse.ArgumentParser()
    group = parser.add_mutually_exclusive_group(required=True)
    group.add_argument('--version',
                       action='store_true',
                       help='show version information')
    group.add_argument('-t', '--token', help='authentication token')
    parser.add_argument('--scope', help='An optional base scope')
    parser.add_argument('-a', '--api-endpoint', help='SignalFx API endpoint')
    parser.add_argument('-n',
                        '--dry-run',
                        action='store_true',
                        help='dry-run mode, do not update detectors')
    parser.add_argument('-D',
                        '--debug',
                        action='store_const',
                        dest='loglevel',
                        const=logging.DEBUG,
                        default=logging.WARNING,
                        help='enable debug logging')
    parser.add_argument('-v',
                        '--verbose',
                        action='store_const',
                        dest='loglevel',
                        const=logging.INFO,
                        help='enable verbose logging')
    parser.add_argument('directory',
                        nargs='?',
                        default='.',
                        help='Source directory')

    options = parser.parse_args()
    if options.version:
        print('{} v{}'.format(version.name, version.version))
        return

    logging.basicConfig(stream=sys.stderr,
                        level=options.loglevel,
                        format='%(asctime)s | %(levelname)8s | %(message)s')
    logging.getLogger('requests').setLevel(logging.WARNING)

    sfx = signalfx.SignalFx(api_endpoint=options.api_endpoint)
    client = syncer.Syncer(sfx.rest(options.token, timeout=5), options.scope,
                           options.dry_run)
    client.sync(options.directory)
    def _create_signalfx_ingest(self):
        """
        Creates and returns the SignalFX ingest client.
        :return: Ingest Client

        """
        ingest = None
        try:
            client = signalfx.SignalFx()
            ingest = client.ingest(self._ingest_token,
                                   endpoint=self._ingest_endpoint,
                                   timeout=self._ingest_timeout)
        except Exception as e:
            self._logger.error(
                "An error occured when creating the ingest client: {0}".format(
                    e))

        return ingest
Exemple #14
0
    def __init__(
            self, token, ingest_endpoint=signalfx.DEFAULT_INGEST_ENDPOINT,
            registry=None, reporting_interval=1, default_dimensions=None):
        if default_dimensions is not None and not isinstance(
                default_dimensions, dict):
            raise TypeError('The default_dimensions argument must be a '
                            'dict of string keys to string values.')

        super(SignalFxReporter, self).__init__(
                registry=registry,
                reporting_interval=reporting_interval)

        self.default_dimensions = default_dimensions
        if default_dimensions is None:
            self.default_dimensions = {}

        self._sfx = (signalfx.SignalFx(ingest_endpoint=ingest_endpoint)
                     .ingest(token))
Exemple #15
0
def publish_tsdata(publish_dict):
    """
    Publish the new time series data based on the old time series data
     and meta data.

    :param publish_dict: Publish information dictionary
    """

    # Load the meta data
    with open(publish_dict['metadata_path']) as metadata_file:
        metadata = json.load(metadata_file)
    # print(metadata)

    # Launch a client to send data to SignalFx
    client = signalfx.SignalFx(publish_dict['api_token'],
                               ingest_endpoint=publish_dict['ingest_endpoint'])

    # Get specific time series file
    tsdata_file = get_time_series_file_path(time.time(),
                                            publish_dict['interval'],
                                            publish_dict['time_range'],
                                            publish_dict['ts_directory'],
                                            'json')

    while True:
        if os.path.exists(tsdata_file):
            # Publish time series data of one file
            publish_one_file_data(client, metadata, tsdata_file, publish_dict)
        else:
            # If this file cannot exist, it is means no any data in this
            #  time slot, so sleep a time interval.
            sleep(TIME_INFOR[publish_dict['time_range']]['second_range'])

        # Get the next time series file
        tsdata_file = get_next_time_series_file_path(tsdata_file,
                                                     publish_dict['interval'],
                                                     publish_dict['time_range']
                                                     )
Exemple #16
0
 def healthz():
     with signalfx.SignalFx().ingest(token) as sfx:
         sfx.send(counters=[
             {
                 'metric': 'k8sws.flask.path.healthz.calls',
                 'value': 1,
                 'timestamp': int(time.now()),
                 'dimensions': {
                     'app': 'k8ws-python-flask'
                 },
             },
         ],
                  cumulative_counters=[
                      {
                          'metric':
                          'k8sws.flask.path.heathz.calls_cumulative',
                          'value': 1,
                          'timestamp': int(time.now()),
                          'dimensions': {
                              'app': 'k8ws-python-flask'
                          },
                      },
                  ])
     return 'OK'
Exemple #17
0
    def submit(self, metrics):

        if (not self.dry_run):
            sfx = signalfx.SignalFx().ingest(self.signalfx_token)

        for metric in metrics:
            hostname = self.get_metric_name(metric)
            metric_value = metric.value
            metric_units = metric.units

            sfx_data = [
                {
                    'metric': metric_units,
                    'value': metric_value,
                    'dimensions': {
                        'host': hostname
                    }
                }
            ]

            if (not self.dry_run):
                try:
                    if (self.signalfx_metric_type == 'gauge'):
                        sfx.send(
                            gauges = sfx_data
                        )
                    if (self.signalfx_metric_type == 'counter'):
                        sfx.send(
                            counters = sfx_data
                        )
                except Exception as e:
                    raise LogsterParsingException("Unable to send metric: {}".format(sfx_data))
                finally:
                    sfx.stop()
            else:
                print("Hostname: {}, Metric Name: {}, Metric Value: {}".format(hostname, metric_units, metric_value))
import signalfx

sfx = signalfx.SignalFx()

program = "data('cpu.utilization').mean().publish()"
with signalfx.SignalFx().signalflow('SOME-TOKEN') as flow:
    print('Executing {0} ...'.format(program))
    computation = flow.execute(program)
    for msg in computation.stream():
        if isinstance(msg, signalfx.signalflow.messages.DataMessage):
            print('{0}: {1}'.format(msg.logical_timestamp_ms, msg.data))
        if isinstance(msg, signalfx.signalflow.messages.EventMessage):
            print('{0}: {1}'.format(msg.timestamp_ms, msg.properties))
Exemple #19
0
def get_sfx_ingest(ingest_token, ingest_endpoint):
    sfx = signalfx.SignalFx(ingest_endpoint=ingest_endpoint)
    ingest = sfx.ingest(ingest_token)
    return ingest
def run_as_backing_job(params: inputs.Inputs, context: LambdaContext,
                       cache: Cache):
    ''' Run in backing job mode, which starts the signalflow program, and puts the received data/metadata into cache '''
    try:
        sfx = signalfx.SignalFx(api_endpoint=params.api_endpoint,
                                ingest_endpoint=params.api_endpoint,
                                stream_endpoint=params.api_endpoint)

        data_queue = Queue(maxsize=100000)
        metadata_queue = Queue(maxsize=100000)

        with sfx.signalflow(params.api_token) as flow:
            print(
                'run_as_backing_job: executing backing job. duration={}-{} program={}'
                .format(time.ctime(params.job_start_ms() / 1000),
                        time.ctime(params.job_end_ms() / 1000),
                        params.program))
            computation = flow.execute(params.program,
                                       start=params.job_start_ms(),
                                       stop=params.job_end_ms(),
                                       resolution=params.resolution_hint_ms,
                                       max_delay=None,
                                       persistent=False,
                                       immediate=False,
                                       disable_all_metric_publishes=None)

            print("run_as_backing_job: waiting for messages ...")
            job_resolution = 0
            for msg in computation.stream():
                if isinstance(msg, signalfx.signalflow.messages.DataMessage):
                    data_queue.put(msg)
                elif isinstance(msg,
                                signalfx.signalflow.messages.ControlMessage):
                    None
                elif isinstance(msg,
                                signalfx.signalflow.messages.MetadataMessage):
                    if job_resolution < 1:
                        job_resolution = msg.properties["sf_resolutionMs"]
                        print("run_as_backing_job: job resolution_ms = ",
                              job_resolution)
                        keepalive_state = KeepaliveState(
                            params, context.aws_request_id, job_resolution)
                        # start keepalive thread now that we know job resolution
                        keepalive_thread = threading.Thread(
                            target=keepalive_thread_fn,
                            args=(params, context, data_queue, metadata_queue,
                                  keepalive_state, cache))
                        keepalive_thread.start()
                    metadata_queue.put(msg)
                else:
                    None
                    # print('run_as_backing_job: dequeued message {0}: {1}'.format(msg, msg.__dict__))
            print(
                "run_as_backing_job: received last message from job. exiting ..."
            )
            # give time for enqueued messages to be processed by other threads and memcache states to be updated before exiting
            time.sleep(defaults.BACKING_JOB_SHUTDOWN_MS / 1000)
    except Exception as e:
        print("run_as_backing_job: exception", e, traceback.format_exc())
    finally:
        print("run_as_backing_job: ended")
Exemple #21
0
import sys
import time
import signalfx

#Variables
timestamp = time.time() * 1000

#Parse access token
parser = argparse.ArgumentParser(description='SignalFx metrics reporting demo')
parser.add_argument('token', help='Your SignalFx API access token')
options = parser.parse_args()
#debugging - comment out to silence
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)

#Create SignalFx object
client = signalfx.SignalFx(ingest_endpoint='https://ingest.us1.signalfx.com')
ingest = client.ingest(options.token)

#send datapoint

ingest.send(gauges=[{
    'metric': 'pytest.datapoint1',
    'value': 777,
    'timestamp': timestamp,
    'dimensions': {
        'host': 'server1',
        'environment': 'development',
        'team': 'L1'
    }
}])
#!/usr/bin/env python

# Copyright (C) 2016 SignalFx, Inc. All rights reserved.

import logging
import os
import sys
import time

sys.path.insert(0,
                os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
import signalfx

MY_TOKEN = os.environ['SIGNALFX_API_TOKEN']
sfx = signalfx.SignalFx().ingest(MY_TOKEN)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)

# Basic Usage
sfx.send(gauges=[{
    'metric': 'myfunc.time',
    'value': 532,
    'timestamp': time.time() * 1000
}],
         counters=[{
             'metric': 'myfunc.calls',
             'value': 42,
             'timestamp': time.time() * 1000
         }],
         cumulative_counters=[{
             'metric': 'myfunc.calls_cumulative',
             'value': 10,
Exemple #23
0
        output = handle_asset(args['key'], args['api_url'], tf_type, tf_name,
                              chart['chartId'])
        if output != None:
            out += filter_hcl(output.decode('utf-8'))
            out += "\n"

    output = handle_asset(args['key'], args['api_url'], "signalfx_dashboard",
                          name, id)
    if output != None:
        out += replace_chart_ids(filter_hcl(output.decode('utf-8')), chart_ids)

    return out


with signalfx.SignalFx(api_endpoint=args['api_url'], ).rest(
        args['key']) as sfx:
    if args['group']:
        group = sfx.get_dashboard_group(args['group'])
        for i, dash in enumerate(group['dashboards']):
            print(f"Exporting dashboard {dash}")
            dash_name = args['name'] + f"_dash_{i}"
            dash_out = handle_dashboard(sfx, dash, dash_name, args)
            # Replace the dashboard group id
            dash_out = dash_out.replace(
                f"\"{args['group']}\"",
                f"signalfx_dashboard_group.{args['name']}.id")
            write_output(args['output'], dash_name + ".tf", dash_out)

        output = handle_asset(args['key'], args['api_url'],
                              "signalfx_dashboard_group", args['name'],
                              args['group'])
    stop_time2_dt.strftime(
        '%Y-%m-%d %H:%M:%S'
    )  #datetime.strftime('%Y-%m-%d %H:%M:%S', stop_time2_dt)
)

# stop_time2 = time.gmtime(float(get_timestamp(stop_time2_dt)))
# start_time2 = time.gmtime(float(get_timestamp(start_time2_dt)))

# timestamp
stop_time2 = int(get_timestamp(stop_time2_dt) * 1000)
start_time2 = int(get_timestamp(start_time2_dt) * 1000)

print 'stop_time2_dt', stop_time2_dt, 'start_time2_dt', start_time2_dt

import signalfx
flow = signalfx.SignalFx().signalflow('y0z1mWizm2m-ae4Erm15ag')

program = "failed_check_statuses = data('asg.healthy.infinite-loop.failed-check-status.TEST', extrapolation='zero')" \
                 ".not_between(200, 399, True, True)" \
                 ".publish(label='failed_check_statuses')"

c = flow.execute(program, start=start_time2, stop=stop_time2)

for msg in c.stream():
    if isinstance(msg, signalfx.signalflow.messages.DataMessage):
        data = msg.data
        print('@data {0}: {1}'.format(msg.logical_timestamp_ms,
                                      str(json.dumps(data))))

        print ' {0:<19} | {1:<12} | {2:<34} | {3:<17} | {4:<50} | {5:<16} | {6}'.format(
            'timestamp', 'key', 'AWSUniqueId', 'service', 'host', 'host_ip',
Exemple #25
0
import argparse
import os
import logging
import sys
import time

sys.path.insert(0,
                os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
import signalfx  # noqa

if __name__ == '__main__':
    parser = argparse.ArgumentParser(
        description='SignalFx metrics reporting demo')
    parser.add_argument('token', help='Your SignalFx API access token')
    options = parser.parse_args()
    client = signalfx.SignalFx()
    ingest = client.Ingest(options.token)
    logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)

    try:
        i = 0
        while True:
            ingest.send(gauges=[{
                'metric': 'test.cpu',
                'value': i % 10
            }],
                        counters=[{
                            'metric': 'cpu_cnt',
                            'value': i % 2
                        }])
            i += 1
Exemple #26
0
import functools
import signalfx
import os
import datetime
import six

from . import utils
from .version import name, version

ingest_endpoint = utils.get_metrics_url()
ingest_timeout = float(os.environ.get('SIGNALFX_SEND_TIMEOUT', 0.3))

sfx = signalfx.SignalFx(ingest_endpoint=ingest_endpoint)

is_cold_start = True

default_dimensions = {}

ingest = None


def map_datapoint(data_point):
    return {
        'metric':
        data_point['metric'],
        'value':
        data_point['value'],
        'dimensions':
        dict(data_point['dimensions'], **default_dimensions)
        if 'dimensions' in data_point else default_dimensions
    }
else:
    print('SF_TOKEN env variable not found')
    sys.exit(0)

filepath = '/arlogs/userlist'

realm = 'us0'

if 'REALM' in os.environ:
    realm = os.environ['REALM']

endpoint = 'https://ingest.' + realm + '.signalfx.com'

token = os.environ['SF_TOKEN']

sfx = signalfx.SignalFx(ingest_endpoint=endpoint).ingest(token)


@app.route('/healthz')
def health():
    sfx.send(counters=[{'metric': 'autorem-listener.heartbeat', 'value': 1}])
    return "OK"


@app.route('/health', methods=['POST'])
def healthCheck():
    '''Sends dummy event'''

    sfx.send_event(event_type='Health Check', properties={'status': 'OK'})

    return "OK"
Exemple #28
0
def main(sfx_auth_key, api_endpoint, stream_endpoint, format, verbose,
         detector_id):

    if verbose:
        lvl = logging.DEBUG
    else:
        lvl = logging.INFO

    if format == "json":
        lvl = logging.CRITICAL

    logging.basicConfig(level=lvl, datefmt="%H:%M:%S")

    with signalfx.SignalFx(
            api_endpoint=api_endpoint,
            stream_endpoint=stream_endpoint).rest(sfx_auth_key) as sfx:

        try:
            detector = sfx.get_detector(detector_id)
            events = sfx.get_detector_events(detector_id)
            incidents = sfx.get_detector_incidents(detector_id)
        except requests.exceptions.HTTPError as e:
            sys.exit("Unable to find detector id {0} ({1})".format(
                detector_id, e))

        with signalfx.SignalFx(api_endpoint=api_endpoint,
                               stream_endpoint=stream_endpoint).signalflow(
                                   sfx_auth_key) as flow:

            computation = flow.preflight(
                detector["programText"],
                int(round(time.time() * 1000)) - (3600 * 1000),
                int(round(time.time() * 1000)),
            )
            for _ in computation.stream():
                pass

            logging.info("Registered %d rule(s)" %
                         (len(Check.__subclasses__()) +
                          len(RuleCheck.__subclasses__())))

            warnings = defaultdict(list)

            for check in Check.__subclasses__():
                logging.debug("Processing %s" % check.ecode)
                ch = check()
                if ch.process(detector, events, incidents, computation):
                    warnings[detector_id].append({
                        "error_code": ch.ecode,
                        "description": ch.desc,
                        "help": ch.help,
                    })

            rule_warnings = defaultdict(lambda: defaultdict(list))

            for check in RuleCheck.__subclasses__():
                logging.debug("Processing alert check %s" % check.ecode)

                ch = check()
                result = ch.process(detector, events, incidents, computation)
                if len(result):
                    for rule_id, ecode in result.items():
                        rule_warnings[detector_id][rule_id].append({
                            "error_code":
                            ch.ecode,
                            "description":
                            ch.desc,
                            "help":
                            ch.help,
                        })

            if len(warnings) > 0:
                logging.error("Detector checks:")
                for det in warnings:
                    for warn in warnings[det]:
                        logging.info("\t{0}: {1}\n\t{2}\n\t{3}\n".format(
                            det, warn['error_code'], warn['description'],
                            warn['help']))
            if len(rule_warnings) > 0:
                logging.error("Alert rule checks")
                for det in rule_warnings:
                    for rule in rule_warnings[det]:
                        for warn in rule_warnings[det][rule]:
                            logging.info(
                                "\t{0}: Rule: {1} {2}\n\t{3}\n\t{4}\n".format(
                                    det, rule, warn['error_code'],
                                    warn['description'], warn['help']))

            if format == "json":
                print(
                    json.dumps(
                        {
                            "warnings": warnings,
                            "rule_warnings": rule_warnings
                        },
                        sort_keys=True,
                        indent=4,
                    ))

            computation.close()
Exemple #29
0
args = parser.parse_args()

if not args.token: args.token = os.environ['SIGNALFX_API_TOKEN']
if not args.metric_name: args.metric_name = 'fake..metric_to_"test'
if not args.tag_name: args.tag_name = '"fake_tag_name"~'
if not args.key: args.key = 'fake_key"%'
if not args.value: args.value = 'fake_value'

FAKE_METRIC_NAME, FAKE_TAG, FAKE_KEY, FAKE_VALUE = \
    args.metric_name, args.tag_name, args.key, args.value

TOKEN = args.token
gT, sT, dT, uT = 'get_tag', 'search_tags', 'delete_tag', 'update_tag'
sD, uD, gD = 'search_dimensions', 'update_dimension', 'get_dimension'

sfx = signalfx.SignalFx().rest(TOKEN)
CLIENT_NAME = 'sfx'

def test_func(client_name, func_name, sleep_time=5, msg='is being tested!',
              func_args=None, **kwargs):
    return_value = None
    if func_name in dir(eval(client_name)):
        try:
            func = eval(client_name + '.' + func_name)
            return_value = func(*func_args, **kwargs)
            print func_name, msg, '\n', return_value, '\n'
            time.sleep(sleep_time)
        except requests.exceptions.HTTPError, e:
            print func_name, e.message
    else:
        print func_name, 'not available'
Exemple #30
0
def main():
    parser = argparse.ArgumentParser(description=(
        'SignalFlow Analytics interactive command-line client (v{})'.format(
            version)))
    parser.add_argument('-t', '--token', metavar='TOKEN', help='session token')
    parser.add_argument('-x',
                        '--execute',
                        action='store_true',
                        help='force non-interactive mode')
    parser.add_argument('--api-endpoint',
                        metavar='URL',
                        default='https://api.signalfx.com',
                        help='override API endpoint URL')
    parser.add_argument('--stream-endpoint',
                        metavar='URL',
                        default='https://stream.signalfx.com',
                        help='override stream endpoint URL')
    parser.add_argument('-a',
                        '--start',
                        metavar='START',
                        default='-1m',
                        help='start timestamp or delta (default: -1m)')
    parser.add_argument('-o',
                        '--stop',
                        metavar='STOP',
                        default=None,
                        help='stop timestamp or delta (default: infinity)')
    parser.add_argument('-r',
                        '--resolution',
                        metavar='RESOLUTION',
                        default=None,
                        help='compute resolution (default: auto)')
    parser.add_argument('-d',
                        '--max-delay',
                        metavar='MAX-DELAY',
                        default=None,
                        help='maximum data wait (default: auto)')
    parser.add_argument('--output',
                        choices=['live', 'csv', 'graph'],
                        default='live',
                        help='default output format')
    parser.add_argument('program',
                        nargs='?',
                        type=argparse.FileType('r'),
                        default=sys.stdin,
                        help='file to read program from (default: stdin)')
    TimezoneAction.add_to_parser(parser)
    options = parser.parse_args()

    params = {
        'start': options.start,
        'stop': options.stop,
        'resolution': options.resolution,
        'max_delay': options.max_delay,
        'output': options.output,
    }

    # Ensure that we have a session token.
    token = find_session_token(options)
    if not token:
        sys.stderr.write('No authentication token found.\n')
        return 1

    flow = signalfx.SignalFx(
        api_endpoint=options.api_endpoint,
        stream_endpoint=options.stream_endpoint).signalflow(token)
    try:
        if sys.stdin.isatty() and not options.execute:
            prompt(flow, options.timezone, params)
        else:
            program = options.program.read()
            params = process_params(**params)
            if options.output == 'live':
                live.stream(flow, options.timezone, program, **params)
            else:
                data = csvflow.stream(flow, program, **params)
                if options.output == 'csv':
                    for line in data:
                        print(line)
                elif options.output == 'graph':
                    graph.render(data, options.timezone)
    finally:
        flow.close()

    return 0