def process_options(self): super(CheckJenkinsJobExists, self).process_options() self.job = self.get_opt('job') self.list_jobs = self.get_opt('list') if not self.list_jobs: validate_chars(self.job, 'job', r'A-Za-z0-9\s\._-') self.msg += "'{job}' ".format(job=self.job)
def process_options(self): super(CheckHadoopDatanodeLastContact, self).process_options() self.datanode = self.get_opt('node') self.list_nodes = self.get_opt('list_nodes') if not self.list_nodes: validate_chars(self.datanode, 'datanode', 'A-Za-z0-9:_-') self.validate_thresholds()
def process_options(self): super(CheckHadoopYarnAppRunning, self).process_options() self.app = self.get_opt('app') self.app_user = self.get_opt('user') self.queue = self.get_opt('queue') self.min_containers = self.get_opt('min_containers') self.limit = self.get_opt('limit') self.warn_on_dup_app = self.get_opt('warn_on_duplicate_app') self.list_apps = self.get_opt('list_apps') if not self.list_apps: if not self.app: self.usage('--app regex not defined') validate_regex(self.app, 'app') if self.app_user is not None: validate_chars(self.app_user, 'app user', r'\w') if self.queue is not None: validate_chars(self.queue, 'queue', r'\w-') if self.min_containers is not None: validate_int(self.min_containers, 'min containers', 0, None) self.min_containers = int(self.min_containers) self.limit = self.get_opt('limit') validate_int(self.limit, 'num results', 1, None) self.path += '?states=running&limit={0}'.format(self.limit) self.validate_thresholds(optional=True)
def process_options(self): super(CheckJenkinsJobCount, self).process_options() self.view = self.get_opt('view') self.list_views = self.get_opt('list_views') if self.view: validate_chars(self.view, 'view', r'A-Za-z0-9\s\.,_-') self.validate_thresholds(optional=True)
def run(self): self.no_args() directory = self.get_opt('directory') validate_directory(directory) directory = os.path.abspath(directory) self.remote = self.get_opt('remote') validate_chars(self.remote, 'remote', r'A-Za-z0-9_\.-') try: repo = git.Repo(directory) except InvalidGitRepositoryError as _: raise CriticalError("directory '{}' does not contain a valid Git repository!".format(directory)) try: if not self.get_opt('no_fetch'): log.info('fetching from remote repo: {}'.format(self.remote)) repo.git.fetch(self.remote) branch = repo.active_branch log.info('active branch: %s', branch) commits_behind = repo.iter_commits('{branch}..{remote}/{branch}'.format(remote=self.remote, branch=branch)) commits_ahead = repo.iter_commits('{remote}/{branch}..{branch}'.format(remote=self.remote, branch=branch)) num_commits_behind = sum(1 for c in commits_behind) num_commits_ahead = sum(1 for c in commits_ahead) # happens with detached HEAD checkout like Travis CI does except TypeError as _: raise CriticalError(_) except GitCommandError as _: raise CriticalError(', '.join(str(_.stderr).split('\n'))) self.msg = "git checkout branch '{}' is ".format(branch) if num_commits_ahead + num_commits_behind == 0: self.ok() self.msg += 'up to date with' else: self.critical() self.msg += '{} commits behind, {} commits ahead of'.format(num_commits_behind, num_commits_ahead) self.msg += " remote '{}'".format(self.remote) self.msg += ' | commits_behind={};0;0 commits_ahead={};0;0'.format(num_commits_behind, num_commits_ahead)
def run(self): self.no_args() directory = self.get_opt('directory') validate_directory(directory) directory = os.path.abspath(directory) self.remote = self.get_opt('remote') validate_chars(self.remote, 'remote', r'A-Za-z0-9_\.-') try: repo = git.Repo(directory) except git.InvalidGitRepositoryError as _: raise CriticalError("directory '{}' does not contain a valid Git repository!".format(directory)) try: if not self.get_opt('no_fetch'): log.info('fetching from remote repo: {}'.format(self.remote)) repo.git.fetch(self.remote) branch = repo.active_branch log.info('active branch: %s', branch) commits_behind = repo.iter_commits('{branch}..{remote}/{branch}'.format(remote=self.remote, branch=branch)) commits_ahead = repo.iter_commits('{remote}/{branch}..{branch}'.format(remote=self.remote, branch=branch)) num_commits_behind = sum(1 for c in commits_behind) num_commits_ahead = sum(1 for c in commits_ahead) # happens with detached HEAD checkout like Travis CI does except TypeError as _: raise CriticalError(_) except git.GitCommandError as _: raise CriticalError(', '.join(str(_.stderr).split('\n'))) self.msg = "git checkout branch '{}' is ".format(branch) if num_commits_ahead + num_commits_behind == 0: self.ok() self.msg += 'up to date with' else: self.critical() self.msg += '{} commits behind, {} commits ahead of'.format(num_commits_behind, num_commits_ahead) self.msg += " remote '{}'".format(self.remote) self.msg += ' | commits_behind={};0;0 commits_ahead={};0;0'.format(num_commits_behind, num_commits_ahead)
def process_options(self): super(CheckJenkinsJobColor, self).process_options() self.job = self.get_opt('job') self.list_jobs = self.get_opt('list') if not self.list_jobs: validate_chars(self.job, 'job', r'A-Za-z0-9\s\._-') self.path = '/job/{job}/api/json'.format(job=self.job)
def process_args(self): self.host_list = self.args if not self.host_list: self.usage( 'no host arguments given, must give at least one regionserver host as an argument' ) for host in self.host_list: validate_host(host) self.port = self.get_opt('port') validate_port(self.port) self.table = self.get_opt('table') table_chars = 'A-Za-z0-9:._-' if self.table: validate_chars(self.table, 'table', table_chars) else: self.table = '[{}]+'.format(table_chars) self.namespace = self.get_opt('namespace') validate_chars(self.namespace, 'hbase namespace', 'A-Za-z0-9:._-') self.region_regex = re.compile('^Namespace_{namespace}_table_({table})_region_(.+)_metric_{metric}'\ .format(namespace=self.namespace, table=self.table, metric=self.metric)) self.top_n = self.get_opt('top') if self.top_n: validate_int(self.top_n, 'top N', 1) self.top_n = int(self.top_n)
def process_args(self): self.host_list = self.args if not self.host_list: self.usage( 'no host arguments given, must give at least one regionserver host as an argument' ) for host in self.host_list: validate_host(host) self.port = self.get_opt('port') validate_port(self.port) self.table = self.get_opt('table') table_chars = 'A-Za-z0-9:._-' if self.table: validate_chars(self.table, 'table', table_chars) else: self.table = '[{}]+'.format(table_chars) self.namespace = self.get_opt('namespace') validate_chars(self.namespace, 'hbase namespace', 'A-Za-z0-9:._-') self.top_n = self.get_opt('top') if self.top_n: validate_int(self.top_n, 'top N', 1) self.top_n = int(self.top_n) self.request_threshold = self.get_opt('requests') validate_int(self.request_threshold, 'request count threshold') self.request_threshold = int(self.request_threshold)
def process_options(self): super(CheckRabbitMQExchange, self).process_options() self.vhost = self.get_opt('vhost') validate_chars(self.vhost, 'vhost', r'/\w\+-') self.path += '/' + urllib.quote_plus(self.vhost) self.exchange = self.get_opt('exchange') if self.get_opt('list_exchanges'): pass else: if self.exchange == '': # nameless exchange pass else: validate_chars(self.exchange, 'exchange', r'/\w\.\+-') self.path += '/' + urllib.quote_plus(self.exchange) self.expected_type = self.get_opt('type') self.expected_durable = self.get_opt('durable') if self.expected_type and self.expected_type not in self.valid_exchange_types: self.usage("invalid --type '{0}' given, if specified must be one of: {1}"\ .format(self.expected_type, ', '.join(self.valid_exchange_types))) if self.expected_durable: self.expected_durable = self.expected_durable.lower() if self.expected_durable not in ('true', 'false'): self.usage("invalid --durable '{0}' given, if specified must be either 'true' or 'false'".\ format(self.expected_durable))
def process_args(self): self.host_list = self.args if not self.host_list: self.usage( 'no host arguments given, must give at least one regionserver host as an argument' ) for host in self.host_list: validate_host(host) self.port = self.get_opt('port') validate_port(self.port) self.table = self.get_opt('table') table_chars = 'A-Za-z0-9:._-' if self.table: validate_chars(self.table, 'table', table_chars) else: self.table = '[{}]+'.format(table_chars) self.namespace = self.get_opt('namespace') validate_chars(self.namespace, 'hbase namespace', 'A-Za-z0-9:._-') self.interval = self.get_opt('interval') self.count = self.get_opt('count') self.since_uptime = self.get_opt('average') validate_int(self.interval, 'interval') validate_int(self.count, 'count') self.interval = int(self.interval) self.count = int(self.count) if self.count == 0: self.disable_timeout() if self.get_opt('reads'): self.show.add('read') if self.get_opt('writes'): self.show.add('write') if self.get_opt('total'): self.show.add('total')
def process_options(self): super(CheckHadoopYarnAppLastFinishedState, self).process_options() self.app = self.get_opt('app') self.app_user = self.get_opt('user') self.queue = self.get_opt('queue') self.limit = self.get_opt('limit') self.warn_on_dup_app = self.get_opt('warn_on_duplicate_app') self.list_apps = self.get_opt('list_apps') if not self.list_apps: if not self.app: self.usage('--app name is not defined') validate_regex(self.app, 'app') if self.app_user is not None: validate_chars(self.app_user, 'app user', r'\w') if self.queue is not None: validate_chars(self.queue, 'queue', r'\w-') self.limit = self.get_opt('limit') validate_int(self.limit, 'num results', 1, None) # Not limited to states here in case we miss one, instead will return all and # then explicitly skip only RUNNING/ACCEPTED states self.path += '?limit={0}'.format(self.limit) self.validate_thresholds(optional=True)
def process_options(self): self.job_id = self.get_opt('job_id') self.travis_token = self.get_opt('travis_token') self.repo = self.get_opt('repo') #if travis_token is None: # self.usage('--travis-token option or ' + # '$TRAVIS_TOKEN environment variable required to authenticate to the API') if self.args: # assume arg is a repo in form of HariSekhon/nagios-plugins but do not use url which we are more likely to # have pasted a travis-ci url to a job, see a few lines further down if '/' in self.args[0] and '://' not in self.args[0]: if not self.repo: log.info('using argument as --repo') self.repo = self.args[0] elif not self.job_id: log.info('using argument as --job-id') self.job_id = self.args[0] if self.job_id: # convenience to be able to lazily paste a URL like the following and still have it extract the job_id # https://travis-ci.org/HariSekhon/nagios-plugins/jobs/283840596#L1079 self.job_id = self.job_id.split('/')[-1].split('#')[0] validate_chars(self.job_id, 'job id', '0-9') elif self.repo: validate_chars(self.repo, 'repo', r'\/\w\.-') else: self.usage('--job-id / --repo not specified') validate_alnum(self.travis_token, 'travis token') self.headers['Authorization'] = 'token {0}'.format(self.travis_token)
def process_options(self): self.travis_token = self.get_opt('travis_token') self.repo = self.get_opt('repo') self.job_id = self.get_opt('job_id') if self.args: if '/' in self.args[0] and '://' not in self.args[0]: if not self.repo: log.info('using argument as --repo') self.repo = self.args[0] elif not self.job_id: log.info('using argument as --job-id') self.job_id = self.args[0] if self.job_id: # convenience to be able to lazily paste a URL like the following and still have it extract the job_id # https://travis-ci.org/HariSekhon/nagios-plugins/jobs/283840596#L1079 self.job_id = self.job_id.split('/')[-1].split('#')[0] validate_chars(self.job_id, 'job id', '0-9') elif self.repo: travis_user = os.getenv('TRAVIS_USER') if '/' not in self.repo: self.repo = '/' + self.repo if self.repo[0] == '/' and travis_user: self.repo = travis_user + self.repo validate_chars(self.repo, 'repo', r'\/\w\.-') else: self.usage('--job-id / --repo not specified') validate_alnum(self.travis_token, 'travis token') self.headers['Authorization'] = 'token {0}'.format(self.travis_token) self.num = self.get_opt('num') validate_int(self.num, 'num', 1) self.num = int(self.num) self.completed = self.get_opt('completed') self.failed = self.get_opt('failed')
def process_docker_options(self): # should look like unix:///var/run/docker.sock or tcp://127.0.0.1:1234 self.base_url = self.get_opt('base_url') if self.base_url: validate_chars(self.base_url, 'base url', r'A-Za-z0-9\/\:\.') self.tls = self.get_opt('tls') if not self.tls and os.getenv('DOCKER_TLS_VERIFY'): self.tls = True log_option('tls', self.tls) if self.tls: ca_file = self.get_opt('tlscacert') cert_file = self.get_opt('tlscert') key_file = self.get_opt('tlskey') tls_verify = self.get_opt('tlsverify') docker_cert_path = os.getenv('DOCKER_CERT_PATH') if docker_cert_path: if not ca_file: ca_file = os.path.join(docker_cert_path, 'ca.pem') if not cert_file: cert_file = os.path.join(docker_cert_path, 'cert.pem') if not key_file: key_file = os.path.join(docker_cert_path, 'key.pem') if not tls_verify and os.getenv('DOCKER_TLS_VERIFY'): tls_verify = True validate_file(ca_file, 'TLS CA cert file') validate_file(cert_file, 'TLS cert file') validate_file(key_file, 'TLS key file') log_option('TLS verify', tls_verify) self.tls_config = docker.tls.TLSConfig( ca_cert=ca_file, # pylint: disable=redefined-variable-type verify=tls_verify, client_cert=(cert_file, key_file))
def process_options(self): super(CheckJenkinsJobHealthReport, self).process_options() self.job = self.get_opt('job') self.list_jobs = self.get_opt('list') if not self.list_jobs: validate_chars(self.job, 'job', r'A-Za-z0-9\s\._-') self.path = '/job/{job}/api/json'.format(job=self.job) self.validate_thresholds(percent=True, simple='lower')
def process_options(self): self.no_args() self.docker_image = self.get_opt('docker_image') validate_chars(self.docker_image, 'docker image', 'A-Za-z0-9/:-') self.expected_id = self.get_opt('id') if self.expected_id is not None: validate_chars(self.expected_id, 'expected id', 'A-Za-z0-9:-') self.validate_thresholds(optional=True)
def process_options(self): super(CheckLogstashPlugins, self).process_options() plugins = self.get_opt('plugins') if plugins: self.plugins = [plugin.strip() for plugin in plugins.split(',')] for plugin in self.plugins: validate_chars(plugin, 'plugin', 'A-Za-z0-9_-') self.validate_thresholds(optional=True)
def process_options(self): super(CheckJenkinsNode, self).process_options() self.node = self.get_opt('node') self.list_nodes = self.get_opt('list') if not self.list_nodes: validate_chars(self.node, 'node', r'A-Za-z0-9\._-') self.msg += '{0} is '.format(self.node) self.validate_thresholds(simple='lower')
def process_options(self): super(CheckJenkinsPlugin, self).process_options() self.plugin = self.get_opt('plugin') self.list_plugins = self.get_opt('list') if not self.list_plugins: validate_chars(self.plugin, 'plugin', r'A-Za-z0-9\s\.,_-') self.check_update = self.get_opt('check_update') log_option('check for updates', self.check_update)
def process_options(self): super(CheckDockerImage, self).process_options() self.docker_image = self.get_opt('docker_image') validate_chars(self.docker_image, 'docker image', r'A-Za-z0-9/:\.-') self.expected_id = self.get_opt('id') if self.expected_id is not None: validate_chars(self.expected_id, 'expected id', 'A-Za-z0-9:-') self.validate_thresholds(optional=True)
def process_options(self): super(CheckApacheDrillConfig, self).process_options() self.config_key = self.get_opt('key') self.expected_value = self.get_opt('expected') self.list_config = self.get_opt('list') if not self.list_config: validate_chars(self.config_key, 'config key', r'A-Za-z0-9_\.-') validate_regex(self.expected_value, 'expected value regex')
def run(self): validate_host(self.host) validate_port(self.port) validate_user(self.user) validate_password(self.password) all_services = self.get_opt('all') services_str = self.get_opt('services') cancel = self.get_opt('cancel') if cancel and (all_services or services_str): self.usage( 'cannot specify --cancel and --services/--all simultaneously' + ', --cancel will cancel all pending service checks') if all_services and services_str: self.usage( 'cannot specify --all and --services simultaneously, they are mutually exclusive' ) services_requested = [] if services_str: services_requested = [ service.strip().upper() for service in services_str.split(',') ] list_clusters = self.get_opt('list_clusters') list_services = self.get_opt('list_services') clusters = self.get_clusters() if list_clusters: if self.verbose > 0: print('Ambari Clusters:\n') print('\n'.join(clusters)) sys.exit(3) if not self.cluster and len(clusters) == 1: self.cluster = clusters[0] log.info( 'no --cluster specified, but only one cluster managed by Ambari' + ', inferring --cluster=\'%s\'', self.cluster) validate_chars(self.cluster, 'cluster', r'\w\s\.-') self.services = self.get_services() if list_services: if self.verbose > 0: print('Ambari Services:\n') print('\n'.join(self.services)) sys.exit(3) if not services_requested and not all_services and not cancel: self.usage('no --services specified, nor was --all requested') services_to_check = [] if all_services: services_to_check = self.services else: for service_requested in services_requested: if service_requested not in self.services: die('service \'{0}\' is not in the list of available services in Ambari!' .format(service_requested) + ' Here is the list of services available:\n' + '\n'.join(self.services)) services_to_check = services_requested if cancel: self.cancel_service_checks() else: self.request_service_checks(services_to_check)
def process_options(self): super(CheckDockerSwarmServiceStatus, self).process_options() self.service = self.get_opt('service') self.updated = self.get_opt('warn_if_last_updated_within') validate_chars(self.service, 'docker service', r'A-Za-z0-9/:\._-') if self.updated is not None: validate_int(self.updated, 'last updated threshold') self.updated = int(self.updated) self.validate_thresholds(simple='lower', positive=True, optional=True)
def process_options(self): super(CheckRabbitMQVhost, self).process_options() if not self.get_opt('list_vhosts'): self.vhost = self.get_opt('vhost') validate_chars(self.vhost, 'vhost', r'/\w\+-') # more concise but we'll get a more generic 404 object not found error # requires 'administrator' user tag #self.path += '/' + urllib.quote_plus(self.vhost) self.no_tracing = self.get_opt('no_tracing')
def process_args(self): self.no_args() self.repo = self.get_opt('repo') if self.repo is None: self.usage('--repo not defined') parts = self.repo.split('/') if len(parts) != 2 or not parts[0] or not parts[1]: self.usage("invalid --repo format, must be in form of 'user/repo'") validate_chars(self.repo, 'repo', r'\/\w\.-')
def process_options(self): super(CheckGoCDStageStatus, self).process_options() self.pipeline = self.get_opt('pipeline') self.stage = self.get_opt('stage') validate_chars(self.pipeline, 'pipeline', 'A-Za-z0-9-') validate_chars(self.stage, 'stage', 'A-Za-z0-9-') self.path += '/{pipeline}/{stage}/history'.format( pipeline=self.pipeline, stage=self.stage)
def process_options(self): super(CheckCouchDBDatabaseStats, self).process_options() if self.get_opt('list'): self.path = '/_all_dbs' else: self.database = self.get_opt('database') # lowercase characters (a-z), digits (0-9), and any of the characters _, $, (, ), +, -, and / validate_chars(self.database, 'database', r'a-z0-9_\$\(\)\+\-/') self.path = '/{0}'.format(self.database) self.validate_thresholds(optional=True)
def process_args(self): self.no_args() self.repo = self.get_opt('repo') if self.repo is None: self.usage('--repo not defined') parts = self.repo.split('/') if len(parts) != 2 or not parts[0] or not parts[1]: self.usage("invalid --repo format, must be in form of 'user/repo'") validate_chars(self.repo, 'repo', r'\/\w\.-') self.validate_thresholds(optional=True)
def process_options(self): super(CheckConsulServiceLeaderElected, self).process_options() self.key = self.get_opt('key') self.regex = self.get_opt('regex') if not self.key: self.usage('--key not defined') self.key = self.key.lstrip('/') validate_chars(self.key, 'key', r'\w\/-') if self.regex: validate_regex(self.regex, 'key') self.path += '{}'.format(self.key)
def process_options(self): super(CheckCouchDBDatabaseStats, self).process_options() if self.get_opt('list'): self.path = '/_all_dbs' else: self.database = self.get_opt('database') # lowercase characters (a-z), digits (0-9), and any of the characters _, $, (, ), +, -, and / validate_chars(self.database, 'database', r'a-z0-9_\$\(\)\+\-/') self.path = '/{0}'.format(self.database) if self.has_thresholds: self.validate_thresholds(optional=True)
def process_options(self): super(CheckAmbariClusterHdfsRackResilience, self).process_options() self.no_args() cluster = self.get_opt('cluster') validate_chars(cluster, 'cluster', 'A-Za-z0-9-_') self.path = self.path.format(cluster=cluster) # RestNagiosPlugin auto sets 'Accept'='application/json' but this breaks Ambari, fixed in pylib now #self.headers = {} if self.get_opt('list'): self.path = '/api/v1/clusters' self.list = True
def process_options(self): super(CheckJenkinsJob, self).process_options() self.job = self.get_opt('job') self.list_jobs = self.get_opt('list') if self.list_jobs: self.path = '/api/json' else: validate_chars(self.job, 'job', r'A-Za-z0-9\s\._-') self.path = '/job/{job}/api/json'.format(job=self.job) self.msg += "'{job}' is ".format(job=self.job) self.validate_thresholds(integer=False, optional=True)
def process_options(self): super(CheckJenkinsJob, self).process_options() self.job = self.get_opt('job') self.list_jobs = self.get_opt('list') if not self.list_jobs: validate_chars(self.job, 'job', r'A-Za-z0-9\s\._-') self.msg += "'{job}' ".format(job=self.job) self.age = self.get_opt('age') if self.age: validate_int(self.age, 'age') self.age = int(self.age) self.validate_thresholds(integer=False, optional=True)
def process_args(self): #log.setLevel(logging.INFO) self.no_args() self.host = self.get_opt('host') self.port = self.get_opt('port') self.table = self.get_opt('table') validate_host(self.host) validate_port(self.port) if not self.get_opt('list_tables'): validate_chars(self.table, 'hbase table', 'A-Za-z0-9:._-') self.short_region_name = self.get_opt('short_region_name') log_option('shorten region name', self.short_region_name)
def run(self): validate_host(self.host) validate_port(self.port) validate_user(self.user) validate_password(self.password) all_services = self.get_opt('all') services_str = self.get_opt('services') cancel = self.get_opt('cancel') if cancel and (all_services or services_str): self.usage('cannot specify --cancel and --services/--all simultaneously' + ', --cancel will cancel all pending service checks') if all_services and services_str: self.usage('cannot specify --all and --services simultaneously, they are mutually exclusive') services_requested = [] if services_str: services_requested = [service.strip().upper() for service in services_str.split(',')] list_clusters = self.get_opt('list_clusters') list_services = self.get_opt('list_services') clusters = self.get_clusters() if list_clusters: if self.verbose > 0: print('Ambari Clusters:\n') print('\n'.join(clusters)) sys.exit(3) if not self.cluster and len(clusters) == 1: self.cluster = clusters[0] log.info('no --cluster specified, but only one cluster managed by Ambari' + ', inferring --cluster=\'%s\'', self.cluster) validate_chars(self.cluster, 'cluster', r'\w\s\.-') self.services = self.get_services() if list_services: if self.verbose > 0: print('Ambari Services:\n') print('\n'.join(self.services)) sys.exit(3) if not services_requested and not all_services and not cancel: self.usage('no --services specified, nor was --all requested') services_to_check = [] if all_services: services_to_check = self.services else: for service_requested in services_requested: if service_requested not in self.services: die('service \'{0}\' is not in the list of available services in Ambari!'.format(service_requested) + ' Here is the list of services available:\n' + '\n'.join(self.services)) services_to_check = services_requested if cancel: self.cancel_service_checks() else: self.request_service_checks(services_to_check)
def process_options(self): super(CheckCouchDBDatabaseExists, self).process_options() if self.get_opt('list'): self.path = '/_all_dbs' else: self.database = self.get_opt('database') # lowercase characters (a-z), digits (0-9), and any of the characters _, $, (, ), +, -, and / validate_chars(self.database, 'database', r'a-z0-9_\$\(\)\+\-/') self.path = '/{0}'.format(self.database) if not self.get_opt('get'): self.request_method = 'head' self.json = False self.request.check_response_code = self.check_response_code self.msg += "'{0}' ".format(self.database)
def process_options(self): super(CheckAtlasEntity, self).process_options() self.entity_name = self.get_opt('entity_name') self.entity_id = self.get_opt('entity_id') self.list_entities = self.get_opt('list') if not self.list_entities: if not self.entity_name and not self.entity_id: self.usage('must supply an --entity-id/--entity-name to find or --list-entities') if self.entity_name and self.entity_id: self.usage('cannot specify both --entity-id and --entity-name as the search criteria ' + 'at the same time, prefer --entity-id it\'s more efficient') if self.entity_name: # this can contain pretty much anything including /haritest #validate_chars(self.entity_name, 'entity name', r'A-Za-z0-9\.\,_-') log_option('entity_name', self.entity_name) if self.entity_id: validate_chars(self.entity_id, 'entity id', r'A-Za-z0-9-') # v1 self.path += '/{0}'.format(self.entity_id) # v2 #self.path += '/guids?guid={0}'.format(self.entity_id) self._type = self.get_opt('type') self.tags = self.get_opt('tags') #self.traits = self.get_opt('traits') if self._type: validate_chars(self._type, 'type', r'A-Za-z0-9_-') if self.tags: self.tags = sorted(self.tags.split(',')) for tag in self.tags: validate_chars(tag, 'tag', r'A-Za-z0-9\.\,_-') if self.traits: self.traits = sorted(self.traits.split(',')) for trait in self.traits: validate_chars(trait, 'trait', r'A-Za-z0-9\.\,_-')
def process_args(self): #log.setLevel(logging.INFO) self.no_args() self.host = self.get_opt('host') self.port = self.get_opt('port') self.table = self.get_opt('table') self.prefix_length = self.get_opt('key_prefix_length') self.sort = self.get_opt('sort') self.sort_desc = self.get_opt('desc') validate_host(self.host) validate_port(self.port) if not self.get_opt('list_tables'): validate_chars(self.table, 'hbase table', 'A-Za-z0-9:._-') validate_int(self.prefix_length, 'row key prefix length', 1, 10) self.prefix_length = int(self.prefix_length)
def process_args(self): self.brokers = self.get_opt('brokers') # TODO: add broker list validation back in # validate_hostport(self.brokers) log_option('brokers', self.brokers) self.timeout_ms = max((self.timeout * 1000 - 1000) / 2, 1000) try: list_topics = self.get_opt('list_topics') list_partitions = self.get_opt('list_partitions') if list_topics: self.print_topics() sys.exit(ERRORS['UNKNOWN']) self.topic = self.get_opt('topic') except KafkaError: raise CriticalError(self.exception_msg()) if self.topic: validate_chars(self.topic, 'topic', 'A-Za-z-') elif list_topics or list_partitions: pass else: self.usage('--topic not specified') try: if list_partitions: if self.topic: self.print_topic_partitions(self.topic) else: for topic in self.get_topics(): self.print_topic_partitions(topic) sys.exit(ERRORS['UNKNOWN']) except KafkaError: raise CriticalError(self.exception_msg()) self.partition = self.get_opt('partition') # technically optional, will hash to a random partition, but need to know which partition to get offset # if self.partition is not None: validate_int(self.partition, "partition", 0, 10000) self.topic_partition = TopicPartition(self.topic, self.partition) self.acks = self.get_opt('acks') try: self.acks = int(self.acks) except ValueError: pass log_option('acks', self.acks) self.validate_thresholds()
def process_options(self): super(CheckRabbitMQQueue, self).process_options() self.vhost = self.get_opt('vhost') validate_chars(self.vhost, 'vhost', r'/\w\+-') self.path += '/' + urllib.quote_plus(self.vhost) self.queue = self.get_opt('queue') if self.get_opt('list_queues'): pass else: validate_chars(self.queue, 'queue', r'/\w\.\+-') self.path += '/' + urllib.quote_plus(self.queue) self.expected_durable = self.get_opt('durable') if self.expected_durable: self.expected_durable = self.expected_durable.lower() if self.expected_durable not in ('true', 'false'): self.usage("invalid --durable option '{0}' given, if specified must be either 'true' or 'false'".\ format(self.expected_durable))
def process_args(self): if not self.name: raise CodingError("didn't name check, please set self.name in __init__()") self.no_args() self.host = self.get_opt('host') self.port = self.get_opt('port') validate_host(self.host) validate_port(self.port) self.key = self.get_opt('key') self.regex = self.get_opt('regex') if not self.key: self.usage('--key not defined') self.key = self.key.lstrip('/') validate_chars(self.key, 'key', r'\w\/-') if self.regex: validate_regex(self.regex, 'key') self.validate_thresholds(optional=True)
def process_options(self): self.no_args() self.host = self.get_opt('host') self.port = self.get_opt('port') self.user = self.get_opt('user') self.password = self.get_opt('password') self._all = self.get_opt('all') self.workflow_id = self.get_opt('id') self.workflow_name = self.get_opt('name') self.max_age = self.get_opt('max_age') self.max_runtime = self.get_opt('max_runtime') self.min_runtime = self.get_opt('min_runtime') if self.get_opt('ssl'): self.protocol = 'https' validate_host(self.host) validate_port(self.port) validate_user(self.user) validate_password(self.password) if self._all and (self.workflow_name is not None or self.workflow_id is not None): self.usage('cannot specify both --all and --name/--id simultaneously') if self.workflow_id is not None: if self.workflow_name is not None: self.usage('cannot specify both --id and --name simultaneously') validate_int(self.workflow_id, 'workflow id', 1) self.workflow_id = int(self.workflow_id) elif self.workflow_name is not None: validate_chars(self.workflow_name, 'workflow name', r'\w\s-') elif self._all: pass elif self.get_opt('list'): pass else: self.usage('must specify one of --name / --id / --all or use --list to find workflow names/IDs to specify') if self.max_age is not None: validate_float(self.max_age, 'max age', 1) self.max_age = float(self.max_age) if self.max_runtime is not None: validate_float(self.max_runtime, 'max runtime', 1) self.max_runtime = float(self.max_runtime) if self.min_runtime is not None: validate_float(self.min_runtime, 'min runtime', 0) self.min_runtime = float(self.min_runtime) if self.max_runtime is not None and self.min_runtime > self.max_runtime: self.usage('--min-runtime cannot be greater than --max-runtime!')