def run(self): ''' Main function that runs the check ''' self.parse_args() self.metric_sender = MetricSender(verbose=self.args.verbose, debug=self.args.debug) self.oc = OCUtil(namespace='openshift-infra', config_file='/tmp/admin.kubeconfig', verbose=self.args.verbose) pod_report = self.check_pods() self.get_hawkular_creds() metrics_report = self.check_node_metrics() # if metrics_report = 0, we need this check run again if metrics_report['success'] == 0: # sleep for 5 seconds, then run the second time node check logger.info( "The first time metrics check failed, 5 seconds later will start a second time check" ) time.sleep(commandDelay) logger.info("starting the second time metrics check") metrics_report = self.check_node_metrics() # persist second attempt if fails if metrics_report['success'] == 0: self.persist_details(metrics_report) self.report_to_zabbix(pod_report, metrics_report['success'])
def __init__( self, args=None, ): '''initial for the InfraNodePodStatus''' self.args = args self.kubeconfig = '/tmp/admin.kubeconfig' self.oc = OCUtil(namespace=self.args.namespace, config_file=self.kubeconfig)
def run(self): ''' Main function that runs the check ''' self.parse_args() self.zagg_sender = ZaggSender(verbose=self.args.verbose, debug=self.args.debug) self.oc = OCUtil(namespace='openshift-infra', config_file=self.kubeconfig, verbose=self.args.verbose) pod_report = self.check_pods() metrics_report = self.check_node_metrics() self.report_to_zabbix(pod_report, metrics_report)
def run(self): """ Main function that runs the check """ self.parse_args() self.metric_sender = MetricSender(verbose=self.args.verbose, debug=self.args.debug) self.oc = OCUtil(namespace=self.get_logging_namespace(), config_file='/tmp/admin.kubeconfig', verbose=self.args.verbose) self.get_pods() oldest_buffer = self.check_fluentd_queues() self.send_metrics(oldest_buffer)
def run(self): """Main function to run the check""" self.ocutil = OCUtil(config_file=self.kubeconfig, verbose=self.args.verbose) self.zgs = ZaggSender(verbose=self.args.verbose, debug=self.args.debug) self.check_all_router_health() self.check_router_replica_count() if self.args.dry_run: self.zgs.print_unique_metrics_key_value() else: self.zgs.send_metrics()
def __init__(self): """ initialize EBSStuckVolumesCheck class """ self.args = None self.vol_state_data = None self.parse_args() # Make sure we're using the profile they've requested. if self.args.aws_creds_profile: os.environ['AWS_PROFILE'] = self.args.aws_creds_profile self.eu = EbsUtil(self.args.region, verbose=self.args.verbose) self.ocutil = OCUtil(verbose=self.args.verbose) self.mts = MetricSender(verbose=self.args.verbose)
def main(): ''' Gather and send details on all visible S3 buckets ''' logger.info("start") discovery_key = "disc.aws" discovery_macro = "#S3_BUCKET" prototype_s3_size = "disc.aws.size" prototype_s3_count = "disc.aws.objects" args = parse_args() if args.verbose: logger.setLevel(logging.DEBUG) logger.debug("verbose flag set") ocutil = OCUtil() dc_yaml = ocutil.get_dc('docker-registry') registry_config_secret = get_registry_config_secret(dc_yaml) oc_yaml = ocutil.get_secrets(registry_config_secret) aws_access, aws_secret = get_aws_creds(oc_yaml) awsutil = AWSUtil(aws_access, aws_secret, args.debug) bucket_list = awsutil.get_bucket_list(args.debug) bucket_stats = {} for bucket in bucket_list: s3_size, s3_objects = awsutil.get_bucket_info(bucket, args.debug) bucket_stats[bucket] = {"size": s3_size, "objects": s3_objects} if args.debug: print "Bucket stats: " + str(bucket_stats) if args.test: print "Test-only. Received results: " + str(bucket_stats) else: zgs = ZaggSender(verbose=args.debug) zgs.add_zabbix_dynamic_item(discovery_key, discovery_macro, bucket_list) for bucket in bucket_stats.keys(): zab_key = "{}[{}]".format(prototype_s3_size, bucket) zgs.add_zabbix_keys( {zab_key: int(round(bucket_stats[bucket]["size"]))}) zab_key = "{}[{}]".format(prototype_s3_count, bucket) zgs.add_zabbix_keys({zab_key: bucket_stats[bucket]["objects"]}) zgs.send_metrics()
def run(self): ''' Main function that runs the check ''' self.parse_args() self.metric_sender = MetricSender(verbose=self.args.verbose, debug=self.args.debug) self.oc = OCUtil(namespace='openshift-infra', config_file='/tmp/admin.kubeconfig', verbose=self.args.verbose) pod_report = self.check_pods() self.get_hawkular_creds() metrics_report = self.check_node_metrics() self.report_to_zabbix(pod_report, metrics_report)
def get_registry_config_secret(yaml_results): ''' Find the docker registry config secret ''' ocutil = OCUtil() volumes = yaml_results['spec']['template']['spec']['volumes'] for volume in volumes: if 'emptyDir' in volume: continue secret_dict = ocutil.get_secrets(volume['secret']['secretName']) if 'config.yml' in secret_dict['data']: return volume['secret']['secretName'] print "Unable to find the %s the docker registry config" print "Please run \"oc get dc docker-registry\" to investigate" sys.exit(1)
def get_logging_namespace(self): """ Determine which logging namespace is in use """ # Assume the correct namespace is 'openshift-logging' and fall back to 'logging' # if that assumption ends up being wrong. oc_client = OCUtil(namespace='openshift-logging', config_file='/tmp/admin.kubeconfig', verbose=self.args.verbose) logger.info("Determining which namespace is in use...") try: oc_client.get_dc('logging-kibana') # If the previous call didn't throw an exception, logging is deployed in this namespace. logger.info("Using namespace: openshift-logging") return 'openshift-logging' except subprocess.CalledProcessError: logger.info("Using namespace: logging") return 'logging'
def main(): ''' main() ''' args = parse_args() if args.verbose: logger.setLevel(logging.DEBUG) logger.info("Starting") # TODO: include this in library projects_info = OCUtil().get_projects() maxDelta = testProjects( projects_info['items'], current_time=datetime.datetime.now(), ) send_zagg_data(maxDelta) logger.info('Oldest Terminating project: %s seconds', maxDelta)
def main(): ''' main() ''' args = parse_args() if args.verbose: logger.setLevel(logging.DEBUG) logger.info("Starting") # TODO: include this in library projects_info = OCUtil()._run_cmd("oc get projects -o yaml") time_keeps_max = testProjects( projects_info['items'], current_time=datetime.datetime.now(), ) send_zagg_data(time_keeps_max) logger.info('Oldest Terminating project: %s seconds', time_keeps_max)
def main(): ''' Gather and send details on all visible S3 buckets ''' discovery_key = "disc.gcp" discovery_macro = "#GCS_BUCKET" prototype_bucket_size = "disc.gcp.size" prototype_bucket_count = "disc.gcp.objects" args = parse_args() ocutil = OCUtil() dc_yaml = ocutil.get_dc('docker-registry') registry_config_secret = get_registry_config_secret(dc_yaml) oc_yaml = ocutil.get_secrets(registry_config_secret) bucket = get_gcp_info(oc_yaml) gsutil = GcloudUtil(verbose=args.debug) bucket_list = gsutil.get_bucket_list() bucket_stats = {} for bucket in bucket_list: size, objects = gsutil.get_bucket_info(bucket) bucket_stats[bucket] = {"size": size, "objects": objects} if args.debug: print "Bucket stats: " + str(bucket_stats) if args.test: print "Test-only. Received results: " + str(bucket_stats) else: zgs = ZaggSender(verbose=args.debug) zgs.add_zabbix_dynamic_item(discovery_key, discovery_macro, bucket_list) for bucket in bucket_stats.keys(): zab_key = "{}[{}]".format(prototype_bucket_size, bucket) zgs.add_zabbix_keys({zab_key: int(round(bucket_stats[bucket]["size"]))}) zab_key = "{}[{}]".format(prototype_bucket_count, bucket) zgs.add_zabbix_keys({zab_key: bucket_stats[bucket]["objects"]}) zgs.send_metrics()
def run(self): """ Main function to run the check """ self.parse_args() self.get_kubeconfig() ocutil = OCUtil(config_file=self.kubeconfig, verbose=self.args.verbose) self.metric_sender = MetricSender(verbose=self.args.verbose, debug=self.args.debug) try: oc_yaml = ocutil.get_service('docker-registry') self.get_registry_service(oc_yaml) oc_yaml = ocutil.get_endpoint('docker-registry') self.get_registry_endpoints(oc_yaml) except Exception as ex: print "Problem retreiving registry IPs: %s " % ex.message self.registry_service_check() self.registry_health_check() self.metric_sender.send_metrics()
def main(): ''' Gather and send details on all visible S3 buckets ''' #get the region with open('/container_setup/monitoring-config.yml', 'r') as f: doc = yaml.load(f) bucket_region = doc['oso_region'] args = parse_args() ocutil = OCUtil() dc_yaml = ocutil.get_dc('docker-registry') registry_config_secret = get_registry_config_secret(dc_yaml) oc_yaml = ocutil.get_secrets(registry_config_secret) aws_access, aws_secret = get_aws_creds(oc_yaml) awsutil = AWSUtil(aws_access, aws_secret, args.debug) bucket_list = awsutil.get_bucket_list(verbose=args.debug, BucketRegion=bucket_region) bucket_stats = {} for bucket in bucket_list: #print bucket s3_size, s3_objects = awsutil.get_bucket_info( bucket, verbose=args.debug, BucketRegion=bucket_region) bucket_stats[bucket] = {"size": s3_size, "objects": s3_objects} if args.debug: print "Bucket stats: " + str(bucket_stats) if args.test: print "Test-only. Received results: " + str(bucket_stats) else: send_zagg_data(bucket_list, bucket_stats, args)
def __init__(self): '''initial for the InfraNodePodStatus''' self.kubeconfig = '/tmp/admin.kubeconfig' self.oc = OCUtil(namespace='default', config_file=self.kubeconfig)
# Our jenkins server does not include these rpms. # In the future we might move this to a container where these # libs might exist #pylint: disable=import-error from openshift_tools.monitoring.ocutil import OCUtil from openshift_tools.monitoring.metric_sender import MetricSender import logging logging.basicConfig( format='%(asctime)s - %(relativeCreated)6d - %(levelname)-8s - %(message)s', ) logger = logging.getLogger() logger.setLevel(logging.INFO) ocutil = OCUtil() valid_build_states = ["cancelled", "complete", "new", "error", "failed"] def runOCcmd(cmd, base_cmd='oc'): """ log commands through ocutil """ logger.info(base_cmd + " " + cmd) return ocutil.run_user_cmd( cmd, base_cmd=base_cmd, ) def parse_args(): """ parse the args from the cli """