def send_metrics(build_ran, create_app, route_http_failed, service_http_failed,
                 run_time):
    """ send data to MetricSender"""
    ms_time = time.time()
    ms = MetricSender()

    ms.add_metric(
        {'openshift.master.app.create.route_http_failed': route_http_failed})
    ms.add_metric({
        'openshift.master.app.create.service_http_failed':
        service_http_failed
    })

    if build_ran == 1:
        ms.add_metric({'openshift.master.app.build.create': create_app})
        ms.add_metric({'openshift.master.app.build.create.time': run_time})
    else:
        ms.add_metric({'openshift.master.app.create': create_app})
        ms.add_metric({'openshift.master.app.create.time': run_time})

    logger.debug("Metrics being sent to zabbix:")
    logger.debug(ms.print_unique_metrics())

    ms.send_metrics()
    logger.info("Data sent to Zagg in %s seconds", str(time.time() - ms_time))
class EtcdStatusMetricSender(object):
    """ class to gather all metrics from etcd daemons """
    def __init__(self):
        self.api_host = None
        self.args = None
        self.parser = None
        self.config = None
        self.etcd_ping = 0
        self.default_config = '/etc/openshift_tools/etcd_metrics.yml'
        self.metric_sender = MetricSender()

    def parse_args(self):
        '''Parse the arguments for this script'''
        self.parser = argparse.ArgumentParser(
            description="Script that gathers metrics from etcd")
        self.parser.add_argument('-d',
                                 '--debug',
                                 default=False,
                                 action="store_true",
                                 help="debug mode")
        self.parser.add_argument('-v',
                                 '--verbose',
                                 default=False,
                                 action="store_true",
                                 help="Verbose?")
        self.parser.add_argument(
            '-t',
            '--test',
            default=False,
            action="store_true",
            help="Run the script but don't send to zabbix")
        self.parser.add_argument(
            '-c',
            '--configfile',
            default=self.default_config,
            help=
            "Config file that contains metrics to be collected, defaults to etcd_metrics.yml"
        )

        self.args = self.parser.parse_args()

    def call_etcd_api(self, rest_path):
        '''Makes the API calls to rest endpoints in etcd'''
        try:
            response = requests.get(
                self.api_host + rest_path,
                cert=(self.config['etcd_info']['files']['ssl_client_cert'],
                      self.config['etcd_info']['files']['ssl_client_key']),
                verify=False)
            self.etcd_ping = 1
        except requests.exceptions.ConnectionError as ex:
            print "ERROR talking to etcd API: {0}".format(ex.message)
        else:
            return response.content

    def json_metric(self, met):
        '''process json data from etcd'''
        return_data = {}
        api_response = self.call_etcd_api(met['path'])
        if api_response:
            content = json.loads(api_response)

            for item in met['values']:
                return_data[met['prefix'] +
                            item['zab_key']] = content[item['src']]

        return return_data

    def text_metric(self, met):
        '''process text value from etcd'''
        return_data = {}

        content = self.call_etcd_api(met['path'])
        if content:
            for metric in text_string_to_metric_families(content):
                # skipping histogram and summary types unless we find a good way to add them to zabbix (unlikely)
                if metric.type in ['histogram', 'summary']:
                    continue
                elif metric.type in ['counter', 'gauge'
                                     ] and metric.name in met['values']:
                    zab_metric_name = met['prefix'] + metric.name.replace(
                        '_', '.')
                    if len(metric.samples) > 1:
                        if met['values'][metric.name]:
                            sub_key = met['values'][metric.name]
                        for singlemetric in metric.samples:
                            return_data['{0}.{1}'.format(
                                zab_metric_name,
                                singlemetric[1][sub_key])] = singlemetric[2]
                    else:
                        return_data[zab_metric_name] = metric.samples[0][2]
                else:
                    if self.args.debug:
                        print 'Got unknown type of metric from etcd, skipping it: ({0}) '.format(
                            metric.type)

        return return_data

    def run(self):
        ''' Get data from etcd API
        '''
        self.parse_args()

        try:
            with open(self.args.configfile, 'r') as configfile:
                self.config = yaml.load(configfile)
        except IOError as ex:
            print 'There was a problem opening the config file: {0}'.format(ex)
            print 'Exiting'
            sys.exit(1)

        # find out the etcd port
        try:
            with open(
                    self.config['etcd_info']['files']
                ['openshift_master_config'], 'r') as f:
                om_config = yaml.load(f)
        except IOError as ex:
            print 'Problem opening openshift master config: {0}'.format(ex)
            sys.exit(2)
        else:
            self.api_host = om_config["etcdClientInfo"]["urls"][0]

        # let's get the metrics
        for metric in self.config['etcd_info']['metrics']:
            if metric['type'] == 'text':
                self.metric_sender.add_metric(self.text_metric(metric))
            elif metric['type'] == 'json':
                self.metric_sender.add_metric(self.json_metric(metric))

        self.send_zagg_data()

    def send_zagg_data(self):
        ''' Sending the data to zagg or displaying it in console when test option is used
        '''
        self.metric_sender.add_metric(
            {'openshift.master.etcd.ping': self.etcd_ping})

        if not self.args.test:
            self.metric_sender.send_metrics()
        else:
            self.metric_sender.print_unique_metrics()
Example #3
0
class PrometheusMetricSender(object):
    """ class to gather all metrics from prometheus metrics endpoints """
    def __init__(self):
        self.args = None
        self.parser = None
        self.config = None
        self.metric_sender = MetricSender()

    def parse_args(self):
        '''Parse the arguments for this script'''
        self.parser = argparse.ArgumentParser(
            description="Script that gathers metrics from prometheus endpoints"
        )
        self.parser.add_argument('-d',
                                 '--debug',
                                 default=False,
                                 action="store_true",
                                 help="debug mode")
        self.parser.add_argument(
            '-t',
            '--test',
            default=False,
            action="store_true",
            help="Run the script but don't send to monitoring systems")
        self.parser.add_argument(
            '-c',
            '--configfile',
            default='/etc/openshift_tools/prometheus_metrics.yml',
            help=
            "Config file that contains metrics to be collected, defaults to prometheus_metrics.yml"
        )

        self.args = self.parser.parse_args()

    @staticmethod
    def call_api(rest_path):
        ''' Makes REST call to given url'''
        try:
            response = requests.get(rest_path)
        except requests.exceptions.ConnectionError:
            logger.exception('Error talking to the rest endpoint given: %s',
                             rest_path)
        else:
            return response.content

    def read_metric(self, met):
        ''' read a prometheus endpoint and create data for monitoring systems'''
        return_data = {}
        content = self.call_api(met['url'])
        if content is not None:
            for metric in text_string_to_metric_families(content):
                # skipping histogram and summary types unless we find a good way to add them to zabbix (unlikely)
                if metric.type in ['histogram', 'summary']:
                    continue
                elif metric.type in ['counter', 'gauge']:
                    if metric.name in met['metrics']:
                        zmetric_name = '{}.{}'.format(
                            met['name'], metric.name.replace('_', '.'))
                        logger.debug('Sending: %s - %s', zmetric_name,
                                     metric.samples[0][2])
                        return_data[zmetric_name] = metric.samples[0][2]
                    else:
                        logger.debug(
                            'We are skipping metric, not requested: %s',
                            metric.name)
                else:
                    logger.error('Unknown metric type: %s - %s', metric.type,
                                 metric.name)

        return return_data

    @staticmethod
    def check_endpoint(endpoint_config):
        ''' Just a quick check to make sure the config file has the keys required and they are not empty
            for example, the expected endpoint config should have a valid name, url, and metrics listed
            - name: 'podchecker'
              url: 'http://podchecker.projectawesome.svc.cluster.local:1234/metrics'
              metrics:
              - 'podchecker_awesome_stats'
              - 'podchecker_memory_usage'

            if any of the above config options are not present or empty, PrometheusMetricsSender skips the endpoint
        '''
        for item in set(('name', 'url', 'metrics')):
            if not endpoint_config.get(item):
                return False

        return True

    def run(self):
        ''' Get data from prometheus metrics endpoints
        '''
        self.parse_args()

        if self.args.debug:
            logger.setLevel(logging.DEBUG)

        try:
            with open(self.args.configfile, 'r') as configfile:
                self.config = yaml.safe_load(configfile)
        except IOError:
            logger.exception('There was a problem opening the config file')
            logger.error('Exiting because of above problem')
            sys.exit(1)

        for target in self.config['endpoints']:
            if self.check_endpoint(target):
                self.metric_sender.add_metric(self.read_metric(target))

        self.send_zagg_data()

    def send_zagg_data(self):
        ''' Sending the data to monitoring or displaying it in console when test option is used
        '''
        if not self.args.test:
            self.metric_sender.send_metrics()
        else:
            self.metric_sender.print_unique_metrics()
Example #4
0
class UrlCheck(object):
    """Checks for an url"""
    def __init__(self):
        self.args = None
        self.metrics = None  # metric sender
        self.config = None
        self.default_config = '/etc/openshift_tools/urlchecks.yml'
        self.parse_args()
        if self.args.verbose:
            logConsole.setLevel(logging.INFO)
        if self.args.debug:
            logConsole.setLevel(logging.DEBUG)

    def parse_args(self):
        """ parse the args from the cli """

        parser = argparse.ArgumentParser(
            description='Openshift url check sender')
        parser.add_argument('-v',
                            '--verbose',
                            action='store_true',
                            default=None,
                            help='Verbose?')
        parser.add_argument('--debug',
                            action='store_true',
                            default=None,
                            help='Debug?')
        parser.add_argument('--dry-run',
                            action='store_true',
                            default=False,
                            help='Collect stats, but no report to zabbix')
        parser.add_argument(
            '-c',
            '--configfile',
            default=self.default_config,
            help=
            "Config file that contains urls to check, defaults to urlchecks.yml"
        )

        self.args = parser.parse_args()

    @staticmethod
    def check_url(url):
        ''' Connect to URL and check if you get http 200 back '''
        logger.debug('Running the check against %s', url)
        try:
            returncode = requests.get(url, verify=False).status_code
            logger.debug("return code %s", returncode)
            return bool(returncode == 200)
        except requests.exceptions.RequestException:
            logger.error("URL check failed. ")
            return False

    def run(self):
        """Main function to run the check"""
        logging.info('Starting url checker...')

        try:
            with open(self.args.configfile, 'r') as configfile:
                self.config = yaml.load(configfile)
                logging.debug('Loaded config file: %s', self.config)
        except IOError:
            logging.error(
                'There was a problem opening the config file. Exiting.')
            sys.exit(1)

        return_data = {}
        self.metrics = MetricSender(verbose=self.args.verbose,
                                    debug=self.args.debug)

        for itemtocheck in self.config['urls_to_check']:
            if self.check_url(itemtocheck['url']):
                return_data[itemtocheck['zab_key']] = 1
            else:
                return_data[itemtocheck['zab_key']] = 0

        logger.debug('return_data before adding to sender: %s', return_data)
        self.metrics.add_metric(return_data)

        logger.info('self metrics before sending to zabbix %s',
                    self.metrics.active_senders[0].unique_metrics)
        if self.args.dry_run:
            self.metrics.print_unique_metrics()
        else:
            self.metrics.send_metrics()