def main(args, loglevel):
    logging.basicConfig(format="%(levelname)s: %(message)s", level=loglevel)

    uri_path = "/_cat/allocation/%s" % args.es_host
    uri_query = "?format=json"

    result = es_query_results(user=args.es_user,
                              passwd=args.es_passwd,
                              host=args.es_host,
                              port=args.es_port,
                              path=uri_path,
                              query=uri_query)
    values = json.loads(result)
    shard_count = int(values[0]['shards'])
    shard_usage = int((100 * shard_count / args.shard_max))
    if shard_usage > args.shard_critical:
        print("CRITICAL: Shard Usage {shard_usage}% \
({shard_count}/{args.shard_max})").format(**locals())
        sys.exit(2)
    elif shard_usage > args.shard_warning:
        print("WARNING: Shard Usage {shard_usage}% \
({shard_count}/{args.shard_max})").format(**locals())
        sys.exit(1)
    elif shard_usage < args.shard_warning:
        print("OK: Shard Usage {shard_usage}% \
({shard_count}/{args.shard_max})").format(**locals())
        sys.exit(0)
def main(args, loglevel):
    logging.basicConfig(format="%(levelname)s: %(message)s", level=loglevel)

    uri_path = "/_nodes/_local/stats"
    uri_query = ""

    result = es_query_results(user=args.es_user,
                              passwd=args.es_passwd,
                              host=args.es_host,
                              port=args.es_port,
                              path=uri_path,
                              query=uri_query)
    values = json.loads(result)
    keys = values['nodes'].keys()
    heap_used = values['nodes'][keys[0]]['jvm']['mem']['heap_used_in_bytes']
    heap_max = values['nodes'][keys[0]]['jvm']['mem']['heap_max_in_bytes']
    heap_usage = int((100 * heap_used / heap_max))
    if heap_usage > args.heap_critical:
        print("CRITICAL: JVM Heap Usage {heap_usage}%").format(**locals())
        sys.exit(2)
    elif heap_usage > args.heap_warning:
        print("WARNING: JVM Heap Usage {heap_usage}%").format(**locals())
        sys.exit(1)
    elif heap_usage < args.heap_warning:
        print("OK: JVM Heap Usage {heap_usage}%").format(**locals())
        sys.exit(0)
def main(args, loglevel):
    logging.basicConfig(format="%(levelname)s: %(message)s", level=loglevel)

    uri_path = "/_nodes/_local/stats/process"
    uri_query = ""

    result = es_query_results(user=args.es_user,
                              passwd=args.es_passwd,
                              host=args.es_host,
                              port=args.es_port,
                              path=uri_path,
                              query=uri_query)
    values = json.loads(result)
    keys = values['nodes'].keys()
    open_fds = values['nodes'][keys[0]]['process']['open_file_descriptors']
    max_fds = values['nodes'][keys[0]]['process']['max_file_descriptors']
    fds_usage = int((100 * open_fds / max_fds))
    if fds_usage > args.fds_critical:
        print("CRITICAL: File Descriptor Usage {fds_usage}%").format(
            **locals())
        sys.exit(2)
    elif fds_usage > args.fds_warning:
        print("WARNING: File Descriptor Usage {fds_usage}%").format(**locals())
        sys.exit(1)
    elif fds_usage < args.fds_warning:
        print("OK: File Descriptor Usage {fds_usage}%").format(**locals())
        sys.exit(0)
示例#4
0
def main(args, loglevel):
    logging.basicConfig(format="%(levelname)s: %(message)s", level=loglevel)

    uri_path = "/_cat/allocation/%s" % args.es_host
    uri_query = "?format=json"

    result = es_query_results(user=args.es_user,
                              passwd=args.es_passwd,
                              host=args.es_host,
                              port=args.es_port,
                              path=uri_path,
                              query=uri_query)
    values = json.loads(result)
    disk_usage = int(values[0]['disk.percent'])
    if disk_usage > args.flood_watermark:
        print("CRITICAL: Disk Usage {disk_usage}% \
Elasticsearch is enforcing a read-only index block on every index \
that has one or more shards allocated for this node").format(**locals())
        sys.exit(2)
    elif disk_usage > args.high_watermark:
        print("CRITICAL: Disk Usage {disk_usage}% \
Elasticsearch will attempt to relocate shards \
away from this node").format(**locals())
    elif disk_usage > args.low_watermark:
        print("WARNING: Disk Usage {disk_usage}% \
Elasticsearch will not allocate shards to this node").format(**locals())
        sys.exit(1)
    elif disk_usage < args.low_watermark:
        print("OK: Disk Usage {disk_usage}%").format(**locals())
        sys.exit(0)
def main(args, loglevel):
    logging.basicConfig(format="%(levelname)s: %(message)s", level=loglevel)

    uri_path = "/_nodes/_local/stats/thread_pool"
    uri_query = "?pretty"

    result = es_query_results(user=args.es_user,
                              passwd=args.es_passwd,
                              host=args.es_host,
                              port=args.es_port,
                              path=uri_path,
                              query=uri_query)
    values = json.loads(result)
    keys = values['nodes'].keys()
    tripped_breakers = {}
    healthy_breakers = {}
    count_total_breakers = len(values['nodes'][keys[0]]['breakers'])
    for breaker in values['nodes'][keys[0]]['breakers']:
        breaker_val = values['nodes'][keys[0]]['breakers'][breaker]
        if int(breaker_val['tripped']) != 0:
            tripped_breakers[breaker] = {}
            tripped_breakers[breaker]['limit_size'] = breaker_val['limit_size']
            tripped_breakers[breaker]['estimated_size'] = breaker_val[
                'estimated_size']
        else:
            healthy_breakers[breaker] = {}
            healthy_breakers[breaker]['limit_size'] = breaker_val['limit_size']
            healthy_breakers[breaker]['estimated_size'] = breaker_val[
                'estimated_size']
    if tripped_breakers:
        count_tripped_breakers = len(tripped_breakers)
        print(
            'CRITICAL: {count_tripped_breakers}/{count_total_breakers} \
Tripped'.format(**locals()))
        t = PrettyTable(['Breaker', 'Limit Size', 'Estimated Size'])
        for breaker in tripped_breakers:
            t.add_row([
                breaker, tripped_breakers[breaker]['limit_size'],
                tripped_breakers[breaker]['estimated_size']
            ])
        print(t)
        exit(2)
    else:
        count_healthy_breakers = len(healthy_breakers)
        print('OK: {count_healthy_breakers}/{count_total_breakers} \
Healthy'.format(**locals()))
        t = PrettyTable(['Breaker', 'Limit Size', 'Estimated Size'])
        for breaker in healthy_breakers:
            t.add_row([
                breaker, healthy_breakers[breaker]['limit_size'],
                healthy_breakers[breaker]['estimated_size']
            ])
        print(t)
        sys.exit(0)
def main(args, loglevel):
    logging.basicConfig(format="%(levelname)s: %(message)s", level=loglevel)

    uri_path = "/_nodes/_local/stats/thread_pool"
    uri_query = "?pretty"

    result = es_query_results(user=args.es_user,
                              passwd=args.es_passwd,
                              host=args.es_host,
                              port=args.es_port,
                              path=uri_path,
                              query=uri_query)
    values = json.loads(result)
    keys = values['nodes'].keys()
    rejected_pools = {}
    healthy_pools = {}
    count_total_pools = len(values['nodes'][keys[0]]['thread_pool'])
    for pool in values['nodes'][keys[0]]['thread_pool']:
        pool_val = values['nodes'][keys[0]]['thread_pool'][pool]
        if int(pool_val['rejected']) != 0:
            rejected_pools[pool] = {}
            rejected_pools[pool]['active'] = pool_val['active']
            rejected_pools[pool]['queue'] = pool_val['queue']
            rejected_pools[pool]['rejected'] = pool_val['rejected']
        else:
            healthy_pools[pool] = {}
            healthy_pools[pool]['active'] = pool_val['active']
            healthy_pools[pool]['queue'] = pool_val['queue']
            healthy_pools[pool]['rejected'] = pool_val['rejected']
    if rejected_pools:
        count_rejected_pools = len(rejected_pools)
        print('CRITICAL: {count_rejected_pools}/{count_total_pools} \
Tripped'.format(**locals()))
        t = PrettyTable(['Pool', 'Active', 'Queue', 'Rejected'])
        for pool in rejected_pools:
            t.add_row([
                pool, rejected_pools[pool]['active'],
                rejected_pools[pool]['queue'], rejected_pools[pool]['rejected']
            ])
        print(t)
        exit(2)
    else:
        count_healthy_pools = len(healthy_pools)
        print('OK: {count_healthy_pools}/{count_total_pools} \
Healthy'.format(**locals()))
        t = PrettyTable(['Pool', 'Active', 'Queue', 'Rejected'])
        for pool in healthy_pools:
            t.add_row([
                pool, healthy_pools[pool]['active'],
                healthy_pools[pool]['queue'], healthy_pools[pool]['rejected']
            ])
        print(t)
        sys.exit(0)
示例#7
0
def main(args, loglevel):
    logging.basicConfig(format="%(levelname)s: %(message)s", level=loglevel)

    uri_path = "/_cluster/health"
    uri_query = "?pretty"

    result = es_query_results(user=args.es_user,
                              passwd=args.es_passwd,
                              host=args.es_host,
                              port=args.es_port,
                              path=uri_path,
                              query=uri_query)
    values = json.loads(result)
    health = values['status'].upper()
    if health == "GREEN":
        print("OK: Cluster health is {health}").format(**locals())
        sys.exit(0)
    elif health == "YELLOW":
        print("WARNING: Cluster health is {health}").format(**locals())
        sys.exit(1)
    elif health == "RED":
        print("CRITICAL: Cluster health is {health}").format(**locals())
        sys.exit(2)