Ejemplo n.º 1
0
def main():
    """  Main function to run the check """

    args = parse_args()
    metric_sender = MetricSender(verbose=args.verbose, debug=args.debug)

    filesys_full_metric = ['filesys.full']
    filesys_inode_derived_metrics = {
        'filesys.inodes.pused':
        'filesys.usedfiles / (filesys.usedfiles + filesys.freefiles) * 100'
    }

    discovery_key_fs = 'disc.filesys'
    item_prototype_macro_fs = '#OSO_FILESYS'
    item_prototype_key_full = 'disc.filesys.full'
    item_prototype_key_inode = 'disc.filesys.inodes.pused'

    # Get the disk space
    filesys_full_metrics = pminfo.get_metrics(filesys_full_metric)

    filtered_filesys_metrics = filter_out_docker_filesystems(
        filesys_full_metrics, 'filesys.full.')

    if args.filter_pod_pv:
        filtered_filesys_metrics = filter_out_customer_pv_filesystems(
            filtered_filesys_metrics)

    if args.force_send_zeros:
        filtered_filesys_metrics = zero_mount_percentages(
            filtered_filesys_metrics)

    metric_sender.add_dynamic_metric(discovery_key_fs, item_prototype_macro_fs,
                                     filtered_filesys_metrics.keys())
    for filesys_name, filesys_full in filtered_filesys_metrics.iteritems():
        metric_sender.add_metric(
            {'%s[%s]' % (item_prototype_key_full, filesys_name): filesys_full})

    # Get filesytem inode metrics
    filesys_inode_metrics = pminfo.get_metrics(
        derived_metrics=filesys_inode_derived_metrics)

    filtered_filesys_inode_metrics = filter_out_docker_filesystems(
        filesys_inode_metrics, 'filesys.inodes.pused.')

    if args.filter_pod_pv:
        filtered_filesys_inode_metrics = filter_out_customer_pv_filesystems(
            filtered_filesys_inode_metrics)

    if args.force_send_zeros:
        filtered_filesys_inode_metrics = zero_mount_percentages(
            filtered_filesys_inode_metrics)

    for filesys_name, filesys_inodes in filtered_filesys_inode_metrics.iteritems(
    ):
        metric_sender.add_metric({
            '%s[%s]' % (item_prototype_key_inode, filesys_name):
            filesys_inodes
        })

    metric_sender.send_metrics()
Ejemplo n.º 2
0
    def run(self):
        """Main function to run the check"""
        logging.info('Starting url checker...')

        try:
            with open(self.args.configfile, 'r') as configfile:
                self.config = yaml.load(configfile)
                logging.debug('Loaded config file: %s', self.config)
        except IOError:
            logging.error(
                'There was a problem opening the config file. Exiting.')
            sys.exit(1)

        return_data = {}
        self.metrics = MetricSender(verbose=self.args.verbose,
                                    debug=self.args.debug)

        for itemtocheck in self.config['urls_to_check']:
            if self.check_url(itemtocheck['url']):
                return_data[itemtocheck['zab_key']] = 1
            else:
                return_data[itemtocheck['zab_key']] = 0

        logger.debug('return_data before adding to sender: %s', return_data)
        self.metrics.add_metric(return_data)

        logger.info('self metrics before sending to zabbix %s',
                    self.metrics.active_senders[0].unique_metrics)
        if self.args.dry_run:
            self.metrics.print_unique_metrics()
        else:
            self.metrics.send_metrics()
    def run(self):
        """  Main function to run the check """

        self.parse_args()
        self.ora = OpenshiftRestApi()
        self.metric_sender = MetricSender(verbose=self.args.verbose,
                                          debug=self.args.debug)

        status = None
        try:
            self.get_service()
            if not self.args.service_count:
                status = self.check_service()

        except Exception as ex:
            print "Problem retreiving data: %s " % ex.message

        if status:
            self.metric_sender.add_metric({
                "openshift.webservice.{}.status".format(self.args.pod):
                status
            })

        self.metric_sender.add_metric(
            {'openshift.cluster.service.count': self.servicecount},
            synthetic=True)
        self.metric_sender.send_metrics()
 def __init__(self):
     self.api_host = None
     self.args = None
     self.parser = None
     self.config = None
     self.etcd_ping = 0
     self.default_config = '/etc/openshift_tools/etcd_metrics.yml'
     self.metric_sender = MetricSender()
Ejemplo n.º 5
0
 def __init__(self):
     ''' constructor '''
     self.args = None
     self.current_date = datetime.datetime.today()
     self.parse_args()
     self.msend = MetricSender(debug=self.args.debug)
     self.days_left_internal = None
     self.days_left_external = None
def send_metrics(usage, capacity, used):
    """ send data to MetricSender"""
    logger.debug("send_metrics()")
    ms_time = time.time()
    ms = MetricSender()
    logger.info("Send data to MetricSender")
    ms.add_metric({'openshift.master.pv.percent.usage': usage})
    ms.add_metric({'openshift.master.pv.capacity.max': capacity})
    ms.add_metric({'openshift.master.pv.capacity.used': used})
    ms.send_metrics()
    logger.info("Data sent to Zagg in %s seconds", str(time.time() - ms_time))
def main():
    '''get docker and openshift versions and send to metric sender
    '''

    args = parse_args()
    mts = MetricSender(verbose=args.verbose, debug=args.debug)

    # Check if host rpm db is mounted. Otherwise check againts container db
    rpm_db_path = "/host/var/lib/rpm"
    if not os.path.exists(rpm_db_path):
        rpm_db_path = "/var/lib/rpm"

    keys = {}

    # Accumulate failures
    failures = 0

    # Get docker version
    success, err = add_specific_rpm_version("docker", rpm_db_path, keys, mts)
    if not success:
        failures += 1
        print "Failed to get docker rpm version. " + err.output

    openshift_package_name = "origin"

    # Get openshift node version (attempt upstream)
    success, err = add_specific_rpm_version(
        "{}-node".format(openshift_package_name), rpm_db_path, keys, mts,
        "openshift.node.")
    if not success:
        # Get openshift version (attempt downstream)
        openshift_package_name = "atomic-openshift"
        success, err2 = add_specific_rpm_version(
            "{}-node".format(openshift_package_name), rpm_db_path, keys, mts,
            "openshift.node.")
        if not success:
            failures += 1
            print "Failed to get openshift rpm version:\n" + err.output + +err2.output

    # Get openshift master version (upstream or downstream) - only if node rpm found
    if success:
        success, err = add_specific_rpm_version(
            "{}-master".format(openshift_package_name), rpm_db_path, keys, mts,
            "openshift.master.")
        if not success:
            # Print notification but don't count this as failure
            print "Note: " + err.output

    print "Sending these metrics:"
    print json.dumps(keys, indent=4)
    mts.send_metrics()
    print "\nDone.\n"

    sys.exit(failures)
    def run(self):
        """  Main function to run the check """

        self.parse_args()
        self.metric_sender = MetricSender(verbose=self.args.verbose,
                                          debug=self.args.debug)

        if self.check_dns_port_alive():
            self.do_dns_check()

        self.metric_sender.send_metrics()
Ejemplo n.º 9
0
    def run(self):
        """  Main function to run the check """

        self.parse_args()
        self.metric_sender = MetricSender(verbose=self.args.verbose,
                                          debug=self.args.debug)

        status = self.parse_config()

        self.metric_sender.add_metric({"openshift.kubeconfig.status": status})

        self.metric_sender.send_metrics()
def main():
    """  Main function to run the check """

    args = parse_args()
    metric_sender = MetricSender(verbose=args.verbose, debug=args.debug)

    discovery_key_disk = 'disc.disk'
    interval = 3
    pcp_disk_dev_metrics = ['disk.dev.total', 'disk.dev.avactive']
    item_prototype_macro_disk = '#OSO_DISK'
    item_prototype_key_tps = 'disc.disk.tps'
    item_prototype_key_putil = 'disc.disk.putil'

    disk_metrics = pminfo.get_sampled_data(pcp_disk_dev_metrics, interval, 2)

    pcp_metrics_divided = {}
    for metric in pcp_disk_dev_metrics:
        pcp_metrics_divided[metric] = {
            k: v
            for k, v in disk_metrics.items() if metric in k
        }

    # do TPS checks; use disk.dev.total
    filtered_disk_totals = clean_up_metric_dict(
        pcp_metrics_divided[pcp_disk_dev_metrics[0]],
        pcp_disk_dev_metrics[0] + '.')

    # Add dynamic items
    metric_sender.add_dynamic_metric(discovery_key_disk,
                                     item_prototype_macro_disk,
                                     filtered_disk_totals.keys())

    # calculate the TPS and add them to the ZaggSender
    for disk, totals in filtered_disk_totals.iteritems():
        disk_tps = (totals[1] - totals[0]) / interval
        metric_sender.add_metric(
            {'%s[%s]' % (item_prototype_key_tps, disk): disk_tps})

    # do % Util checks; use disk.dev.avactive
    filtered_disk_totals = clean_up_metric_dict(
        pcp_metrics_divided[pcp_disk_dev_metrics[1]],
        pcp_disk_dev_metrics[1] + '.')

    # calculate the % Util and add them to the ZaggSender
    for disk, totals in filtered_disk_totals.iteritems():
        total_active = (float)(totals[1] - totals[0]) / 1000.0
        putil = 100 * total_active / interval

        metric_sender.add_metric(
            {'%s[%s]' % (item_prototype_key_putil, disk): putil})

    metric_sender.send_metrics()
Ejemplo n.º 11
0
    def __init__(self):
        """ initialize EBSStuckVolumesCheck class """
        self.args = None
        self.vol_state_data = None

        self.parse_args()

        # Make sure we're using the profile they've requested.
        if self.args.aws_creds_profile:
            os.environ['AWS_PROFILE'] = self.args.aws_creds_profile

        self.eu = EbsUtil(self.args.region, verbose=self.args.verbose)
        self.mts = MetricSender(verbose=self.args.verbose)
Ejemplo n.º 12
0
class OpenshiftKubeconfigChecker(object):
    """ Checks whether kubeconfig is valid yaml """
    def __init__(self):
        self.args = None
        self.metric_sender = None

    def run(self):
        """  Main function to run the check """

        self.parse_args()
        self.metric_sender = MetricSender(verbose=self.args.verbose,
                                          debug=self.args.debug)

        status = self.parse_config()

        self.metric_sender.add_metric({"openshift.kubeconfig.status": status})

        self.metric_sender.send_metrics()

    def parse_config(self):
        """ Load the kubeconfig """

        print "\nAttempt to load the kubeconfig\n"

        try:
            yaml.load(open(self.args.config))
            return 0

        except Exception as ex:
            print "Failed parsing config %s " % ex.message
            return 1

    def parse_args(self):
        """ parse the args from the cli """

        parser = argparse.ArgumentParser(
            description='Openshift kubeconfig checker')
        parser.add_argument('-c', '--config', \
            help='kubeconfig to parse (default /etc/origin/master/admin.kubeconfig)', \
            default='/etc/origin/master/admin.kubeconfig')
        parser.add_argument('-v',
                            '--verbose',
                            action='store_true',
                            default=None,
                            help='Verbose?')
        parser.add_argument('--debug',
                            action='store_true',
                            default=None,
                            help='Debug?')

        self.args = parser.parse_args()
    def __init__(self, config_file=None):
        if not config_file:
            self.config_file = '/etc/openshift_tools/container_metrics.yml'
        else:
            self.config_file = config_file

        self.config = None

        self.parse_config()

        self.cli = AutoVersionClient(base_url='unix://var/run/docker.sock',
                                     timeout=120)
        self.docker_util = DockerUtil(self.cli)
        self.metric_sender = MetricSender(verbose=True)
def send_metrics(curlresult, service_status):
    """ send data to MetricSender"""

    ms = MetricSender()
    ms.add_metric({'openshift.master.dnsmasq.curl.status': curlresult})
    ms.add_metric({'openshift.master.dnsmasq.service.status': service_status})
    ms.send_metrics()
def send_metrics(problems):
    """ send data to MetricSender"""
    logger.debug("send_metrics(problems)")

    ms_time = time.time()
    ms = MetricSender()
    logger.info("Send data to MetricSender")

    ms.add_metric({'aws.ec2.instance.instance_status': problems['InstanceStatus']})
    ms.add_metric({'aws.ec2.instance.system_status': problems['SystemStatus']})
    ms.add_metric({'aws.ec2.instance.events': problems['Events']})

    ms.send_metrics()
    logger.info("Data sent to Zagg in %s seconds", str(time.time() - ms_time))
Ejemplo n.º 16
0
    def run(self):
        """  Main function to run the check """

        self.parse_args()
        self.ora = OpenshiftRestApi()
        self.metric_sender = MetricSender(verbose=self.args.verbose,
                                          debug=self.args.debug)

        try:
            self.get_pods()

        except Exception as ex:
            print "Problem retreiving pod data: %s " % ex.message

        self.metric_sender.send_metrics()
    def config_metric_sender(self):
        """ configure the metric_sender """

        metric_verbose = self.args.verbose
        metric_debug = self.args.debug
        host = self.args.host if self.args.host else self.config['host']['name']

        if isinstance(metric_verbose, str):
            metric_verbose = (metric_verbose == 'True')

        if isinstance(metric_debug, str):
            metric_debug = (metric_debug == 'True')

        self.metric_sender = MetricSender(host=host, verbose=metric_verbose, debug=metric_debug,
                                          config_file=self.args.config_file)
    def run(self):
        """Main function to run the check"""

        self.ocutil = OCUtil(config_file=self.kubeconfig,
                             verbose=self.args.verbose)
        self.metrics = MetricSender(verbose=self.args.verbose,
                                    debug=self.args.debug)

        self.check_all_router_health()
        self.check_router_replica_count()

        if self.args.dry_run:
            self.metrics.print_unique_metrics_key_value()
        else:
            self.metrics.send_metrics()
Ejemplo n.º 19
0
    def run(self):
        ''' Main function that runs the check '''
        self.parse_args()
        self.metric_sender = MetricSender(verbose=self.args.verbose,
                                          debug=self.args.debug)

        self.oc = OCUtil(namespace='openshift-infra',
                         config_file='/tmp/admin.kubeconfig',
                         verbose=self.args.verbose)

        pod_report = self.check_pods()
        self.get_hawkular_creds()
        metrics_report = self.check_node_metrics()
        # if metrics_report = 0, we need this check run again
        if metrics_report['success'] == 0:
            # sleep for 5 seconds, then run the second time node check
            logger.info(
                "The first time metrics check failed, 5 seconds later will start a second time check"
            )
            time.sleep(commandDelay)
            logger.info("starting the second time metrics check")
            metrics_report = self.check_node_metrics()
            # persist second attempt if fails
            if metrics_report['success'] == 0:
                self.persist_details(metrics_report)
        self.report_to_zabbix(pod_report, metrics_report['success'])
Ejemplo n.º 20
0
def send_metric_data(bucket_list, bucket_stats, args):
    '''send data to zabbix '''
    discovery_key = "disc.aws"
    discovery_macro = "#S3_BUCKET"
    prototype_s3_size = "disc.aws.size"
    prototype_s3_count = "disc.aws.objects"

    mts = MetricSender(verbose=args.debug)
    mts.add_dynamic_metric(discovery_key, discovery_macro, bucket_list)
    for bucket in bucket_stats.keys():
        zab_key = "{}[{}]".format(prototype_s3_size, bucket)
        mts.add_metric({zab_key: int(round(bucket_stats[bucket]["size"]))})

        zab_key = "{}[{}]".format(prototype_s3_count, bucket)
        mts.add_metric({zab_key: bucket_stats[bucket]["objects"]})
    mts.send_metrics()
    def run(self):
        '''  Main function to run the check '''

        self.parse_args()
        self.metric_sender = MetricSender(verbose=self.args.verbose,
                                          debug=self.args.debug)

        master_cfg = []
        with open(self.args.master_config, 'r') as yml:
            master_cfg = yaml.load(yml)
        self.ora = OpenshiftRestApi(host=master_cfg['oauthConfig']['masterURL'],
                                    verify_ssl=True)

        self.cluster_capacity()

        if not self.args.dry_run:
            self.metric_sender.send_metrics()
    def run(self):
        """  Main function to run the check """

        self.parse_args()
        self.ora = OpenshiftRestApi()
        self.metric_sender = MetricSender(verbose=self.args.verbose, debug=self.args.debug)

        try:
            self.get_service()
            status = self.check_service()

        except Exception as ex:
            print "Problem retreiving data: %s " % ex.message

        self.metric_sender.add_metric({
            "openshift.webservice.{}.status".format(self.args.pod) : status})

        self.metric_sender.send_metrics()
    def run(self):
        """  Main function to run the check """

        self.parse_args()
        self.get_kubeconfig()
        ocutil = OCUtil(config_file=self.kubeconfig, verbose=self.args.verbose)
        self.metric_sender = MetricSender(verbose=self.args.verbose, debug=self.args.debug)

        try:
            oc_yaml = ocutil.get_service('docker-registry')
            self.get_registry_service(oc_yaml)
            oc_yaml = ocutil.get_endpoint('docker-registry')
            self.get_registry_endpoints(oc_yaml)
        except Exception as ex:
            print "Problem retreiving registry IPs: %s " % ex.message

        self.registry_service_check()
        self.registry_health_check()

        self.metric_sender.send_metrics()
Ejemplo n.º 24
0
def main():
    """ main() """
    logger.debug("main()")

    args = parse_args()
    if args.debug:
        logger.setLevel(logging.DEBUG)
    if args.verbose:
        logger.setLevel(logging.INFO)

    if args.aws_creds_profile:
        os.environ['AWS_PROFILE'] = args.aws_creds_profile

    ms = MetricSender(verbose=args.verbose, debug=args.debug)
    # get regions
    regions = Base.get_supported_regions()
    logger.debug("Get all regions: %s", regions)

    count = 0
    for region in regions:
        logger.info("Get Elastic IP in region %s", region)
        eips = getEIPByRegion(region.name)
        logger.debug("elastic ips: %s", eips)
        for eip in eips:
            if eip.instance_id is None:
                count += 1
                logger.warn("EIP: %s is not associated to any instance", eip)

    ms_time = time.time()
    logger.info("Send data to MetricSender")

    ms.add_metric({'aws.ec2.eip.status': count})
    ms.send_metrics()
    logger.info("Data sent to Zagg in %s seconds", str(time.time() - ms_time))
Ejemplo n.º 25
0
    def report_tags_to_zabbix(tags):
        """ Sends the commands exit code to zabbix. """
        mts = MetricSender(verbose=True)

        #######################################################
        # This reports the "config" tag from each instance
        #   If config ~= "true", report 0
        #   If config ~= "false", report 1
        #   If config not found, report 2
        #######################################################
        for tag in tags:
            if 'config' in tag.keys():
                if tag['config'].lower() == "true":
                    config_value = 0
                else:
                    config_value = 1
            else:
                config_value = 2

            mts.add_metric({CONFIG_LOOP_TAG_KEY: config_value},
                           host=tag['name'])
        ####################################
        # End of config tag checking
        ####################################

        # Actually send them
        mts.send_metrics()
Ejemplo n.º 26
0
    def run(self):
        ''' main function '''

        while True:
            event_list = []
            while not self.queue.empty():
                event = self.queue.get()
                if self.args.debug:
                    print "Processing event: {}".format(str(event))
                event_list.append(event)

            # initialize event counts so that we send '0' events
            # in the case where no events were received
            event_counts = {}
            for zbx_key in self.zbx_keys:
                event_counts[zbx_key] = 0

            # add up each distinct event
            for event in event_list:
                event_counts[event] += 1

            if self.args.verbose or self.args.dry_run:
                print "Got events: " + str(event_counts)

            if not self.args.dry_run:
                metric_sender = MetricSender(verbose=self.args.verbose,
                                             debug=self.args.debug)
                for event, count in event_counts.iteritems():
                    metric_sender.add_metric({event: count})
                metric_sender.send_metrics()

            time.sleep(self.args.reporting_period)
def main():
    """  Main function to run the check """
    argz = parse_args()
    conn_count = 0

    for proc in psutil.process_iter():
        try:
            if proc.name() == argz.proc_to_check:
                if argz.debug:
                    print proc.connections()
                for conn in proc.connections():
                    if conn.status == argz.conn_status and conn.laddr[1] == argz.port:
                        conn_count += 1
        except psutil.NoSuchProcess:
            pass

    if argz.debug:
        print 'Process ({0}) on port {1} has {2} connections in {3} status'.format(argz.proc_to_check,
                                                                                   argz.port,
                                                                                   conn_count,
                                                                                   argz.conn_status
                                                                                  )

    ms = MetricSender(debug=argz.debug)
    ms.add_metric({'{0}'.format(argz.zabbix_key) : conn_count})
    ms.send_metrics()
Ejemplo n.º 28
0
    def run(self):
        """ Main function that runs the check """
        self.parse_args()
        self.metric_sender = MetricSender(verbose=self.args.verbose,
                                          debug=self.args.debug)
        self.oc = OCUtil(namespace=self.get_logging_namespace(),
                         config_file='/tmp/admin.kubeconfig',
                         verbose=self.args.verbose)
        self.get_pods()

        oldest_buffer = self.check_fluentd_queues()

        self.send_metrics(oldest_buffer)
Ejemplo n.º 29
0
    def config_metric_sender(self):
        """ configure the metric_sender """

        if self.args.host:
            host = self.args.host
        elif self.args.synthetic:
            host = self.config['synthetic_clusterwide']['host']['name']
        else:
            host = self.config['host']['name']

        metric_verbose = self.args.verbose
        metric_debug = self.args.debug
        if isinstance(metric_verbose, str):
            metric_verbose = (metric_verbose == 'True')

        if isinstance(metric_debug, str):
            metric_debug = (metric_debug == 'True')

        self.metric_sender = MetricSender(host=host,
                                          verbose=metric_verbose,
                                          debug=metric_debug,
                                          config_file=self.args.config_file)
Ejemplo n.º 30
0
def main():
    """  Main function to run the check """

    args = parse_args()
    metric_sender = MetricSender(verbose=args.verbose, debug=args.debug)

    discovery_key_network = 'disc.network'
    pcp_network_dev_metrics = ['network.interface.in.bytes', 'network.interface.out.bytes']
    item_proto_macro_network = '#OSO_NET_INTERFACE'
    item_proto_key_in_bytes = 'disc.network.in.bytes'
    item_proto_key_out_bytes = 'disc.network.out.bytes'

    network_metrics = pminfo.get_metrics(pcp_network_dev_metrics)

    pcp_metrics_divided = {}
    for metric in pcp_network_dev_metrics:
        pcp_metrics_divided[metric] = {k: v for k, v in network_metrics.items() if metric in k}

    # do Network In; use network.interface.in.bytes
    filtered_network_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_network_dev_metrics[0]],
                                                   pcp_network_dev_metrics[0] + '.')

    # Add dynamic items
    metric_sender.add_dynamic_metric(discovery_key_network, item_proto_macro_network, filtered_network_totals.keys())

    # Report Network IN bytes; them to the MetricSender
    for interface, total in filtered_network_totals.iteritems():
        metric_sender.add_metric({'%s[%s]' % (item_proto_key_in_bytes, interface): total})

    # Report Network OUT Bytes;  use network.interface.out.bytes
    filtered_network_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_network_dev_metrics[1]],
                                                   pcp_network_dev_metrics[1] + '.')

    # calculate the % Util and add them to the MetricSender
    for interface, total in filtered_network_totals.iteritems():

        metric_sender.add_metric({'%s[%s]' % (item_proto_key_out_bytes, interface): total})

    metric_sender.send_metrics()