예제 #1
0
    def post_process(self, group, event, is_new, is_sample, **kwargs):
        """
        Process error.
        """
        if not self.is_configured(group.project):
            return

        host = self.get_option('server_host', group.project)
        port = int(self.get_option('server_port', group.project))
        prefix = self.get_option('prefix', group.project)
        hostname = self.get_option('hostname',
                                   group.project) or socket.gethostname()
        resolve_age = group.project.get_option('sentry:resolve_age', None)

        now = int(time.time())
        template = '%s.%%s[%s]' % (prefix, group.project.slug)

        level = group.get_level_display()
        label = template % level

        groups = group.project.group_set.filter(status=STATUS_UNRESOLVED)

        if resolve_age:
            oldest = timezone.now() - timedelta(hours=int(resolve_age))
            groups = groups.filter(last_seen__gt=oldest)

        num_errors = groups.filter(level=group.level).count()

        metric = Metric(hostname, label, num_errors, now)

        log.info('will send %s=%s to zabbix', label, num_errors)

        send_to_zabbix([metric], host, port)
예제 #2
0
def process_message(msg):
    """
    What to do with the message that's arrived.
    Looks up the topic in the KeyMap dictionary, and forwards
    the message onto Zabbix using the associated Zabbix key
    """
    logging.debug("Processing : " + msg.topic)
    if msg.topic in KeyMap.mapdict:
        if msg.payload == "ON":
            msg.payload = 1
        if msg.payload == "OFF":
            msg.payload = 0
        zbxKey = KeyMap.mapdict[msg.topic]
        (zbxKey, zbxHost) = zbxKey.split("::")
        if zbxHost == "":
            zbxHost = KEYHOST
        logging.info("Sending %s %s to Zabbix to host %s key %s", msg.topic,
                     msg.payload, zbxHost, zbxKey)
        # Zabbix can also accept text and character data...
        # should we sanitize input or just accept it as is?
        send_to_zabbix(
            [Metric(zbxHost, zbxKey, msg.payload, time.strftime("%s"))],
            ZBXSERVER, ZBXPORT)
    else:
        # Received something with a /raw/ topic,
        # but it didn't match anything. Log it, and discard it
        logging.debug("Unknown: %s", msg.topic)
예제 #3
0
    def run(self):
        """ this is our main proxy ping-pong loop """
        last_denied = 0

        while not self.finished:
            #
            #   Array of stats to send to Zabbix
            #
            denied = self.parent.statistics.total_overload - last_denied
            last_denied = self.parent.statistics.total_overload
            #
            metrics = []
            metrics.append(
                Metric(self.hostname, 'limiter.current',
                       len(self.parent.connection_map)))
            metrics.append(
                Metric(self.hostname, 'limiter.queued',
                       self.parent.request_queue.qsize()))
            metrics.append(Metric(self.hostname, 'limiter.rejected', denied))

            self.transport.write("ping:%d" % self.parent.id, self.address)

            sleep(1)
            lowest = 99
            for peer in self.peers.keys():
                row = self.peers[peer]
                age = time() - row[0]
                if age > 2: continue
                myid = int(row[3])
                if myid <= lowest: lowest = myid

            old_master = self.is_master
            self.is_master = self.parent.id == lowest
            if old_master <> self.is_master:
                if self.is_master:
                    log.msg("** [%d] Election :: We Won!" % self.parent.id)
                else:
                    log.msg("** [%d] Election :: We Lost!" % self.parent.id)

            monitor.send(metrics)
예제 #4
0
def batch(ctx, name):
    """Send values in batch to zabbix_host (server defined in config file)"""
    logger = logging.getLogger('es_stats_zabbix') # Since logging is set up in cli()
    logger.debug('Batch mode with named batch: {0}'.format(name))
    if name not in ctx.obj['batches']:
        click.echo(click.style('Batch {0} not found in configuration file.'.format(name), fg='red', bold=True))
        sys.exit(1)
    from zbxsend import Metric, send_to_zabbix
    b = ctx.obj['batches'][name]
    logger.debug('Batch config args: {0}'.format(b))
    metrics = []
    zserver = b.pop('server')
    zport = int(b.pop('port'))
    zhost = b.pop('host')
    # Should only be Item keys at this point.
    logger.debug('Batch keys: {0}'.format(b))
    # Separate keys into similar APIs
    apis = { 'health': [], 'clusterstats': [], 'clusterstate': [],
        'nodestats': [], 'nodeinfo': [],}
    for k in b:
        ztuple = parse_key(b[k])
        apis[ztuple[0]].append(ztuple)
    logger.debug('API-separated keys: {0}'.format(apis))
    for api in apis:
	# ignore empty list
        if not apis[api]:
            continue
	# get the base api object
        apiobj = map_api(apis[api][0], ctx.obj['client'])
	# iterate over each tuple 
        for ztuple in apis[api]:
            result = apiobj.get(ztuple[2], name=ztuple[1])
            if result == DotMap():
                result = 'ZBX_NOTSUPPORTED'
            # We do not have the key here, so we need to rebuild it.
            metrics.append(Metric(zhost, ztuple[0] + '[' + ztuple[2] + ']', result))

    logger.debug('Metrics: {0}'.format(metrics))
    result = send_to_zabbix(metrics, zserver, zport)
    logger.debug('Result = {0}'.format(result))
    # Spit out exit code to stdout
    click.echo(0 if result else 1)
    logger.info('Job completed.')
예제 #5
0
                    'select PoolID, ElementName from IBMTSSVC_ConcreteStoragePool'
            ):
                output.append('{"{#TYPE}":"%s","{#NAME}":"%s","{#ID}":"%s"}' %
                              ('pool', pool.properties['ElementName'].value,
                               pool.properties['PoolID'].value))

        json = []
        json.append('{"data":[')

        for i, v in enumerate(output):
            if i < len(output) - 1:
                json.append(v + ',')
            else:
                json.append(v)
        json.append(']}')

        json_string = ''.join(json)
        debug_print(json_string)

        trapper_key = 'svc.discovery.%s' % discovery
        debug_print('Sending to host=%s, key=%s' % (cluster, trapper_key))

        #send json to LLD trapper item with zbxsend module
        if debug:
            logging.basicConfig(level=logging.INFO)
        else:
            logging.basicConfig(level=logging.WARNING)
        send_to_zabbix([Metric(cluster, trapper_key, json_string)],
                       'localhost', 10051)
        debug_print('')
예제 #6
0
            data = getHostName(data, 'host')
            output = output + data

        if discovery == 'battery':
            data = conn.EnumerateInstances('HITACHI_Battery')
            data = getBatteryName(data, 'battery')
            output = output + data

        json = []
        json.append('{"data":[')

        for i, v in enumerate(output):
            if i < len(output) - 1:
                json.append(v + ',')
            else:
                json.append(v)
        json.append(']}')

        json_string = ''.join(json)
        print(json_string)

        trapper_key = 'svc.discovery.%s' % discovery
        debug_print('Sending to host=%s, key=%s' % (hostname, trapper_key))

        #send json to LLD trapper item with zbxsend module
        if debug:
            logging.basicConfig(level=logging.INFO)
        else:
            logging.basicConfig(level=logging.WARNING)
        send_to_zabbix([Metric(hostname, trapper_key, json_string)],
                       'localhost', 10051)
예제 #7
0
zabbix_metrics = []
events = json_data['result']['events']
for e in events:

    #Storwize Unified cluster status
    if e.get('clazz') == 'com.ibm.sonas.gui.events.pods.ConnectionStatusEvent':
        timestamp = float(e['timestamp']) / 1000
        debug_print('%s %s %s' %
                    (e.get('clazz'), e.get('id'),
                     str(datetime.datetime.fromtimestamp(timestamp))))
        for i in e['items'].keys():
            zabbix_item_key = UNIFIED_CONN_STATUS_TMPL % i
            zabbix_item_value = e['items'][i]
            #debug_print('host=%s, key=%s, value=%s, timestamp=%s' % (host, zabbix_item_key, zabbix_item_value, str(datetime.datetime.fromtimestamp(timestamp))))
            zabbix_metrics.append(
                Metric(host, zabbix_item_key, zabbix_item_value, timestamp))

    #Storwize block cluster status
    if e.get('clazz') == 'com.ibm.svc.gui.events.ConnectionStatusEvent':
        timestamp = float(e['timestamp']) / 1000
        debug_print('%s %s %s' %
                    (e.get('clazz'), e.get('id'),
                     str(datetime.datetime.fromtimestamp(timestamp))))
        for i in ['externalStorage', 'internalStorage', 'remotePartnerships']:
            zabbix_item_key = SVC_CONN_STATUS_TMPL % i
            zabbix_item_value = e[i]
            #debug_print('host=%s, key=%s, value=%s, timestamp=%s' % (host, zabbix_item_key, zabbix_item_value, str(datetime.datetime.fromtimestamp(timestamp))))
            zabbix_metrics.append(
                Metric(host, zabbix_item_key, zabbix_item_value, timestamp))

if debug:
예제 #8
0
  print >> sys.stderr, json.dumps(json_data, sort_keys = True, indent = 4).decode('utf-8')
  exit(1)

zabbix_metrics = []
events = json_data['result']['events']
for e in events:
  
  #Storwize Unified cluster status
  if e.get('clazz') == 'com.ibm.sonas.gui.events.pods.ConnectionStatusEvent':
    timestamp = float(e['timestamp'])/1000
    debug_print('%s %s %s' % (e.get('clazz'), e.get('id'), str(datetime.datetime.fromtimestamp(timestamp)) ) )
    for i in e['items'].keys():
      zabbix_item_key = UNIFIED_CONN_STATUS_TMPL % i
      zabbix_item_value = e['items'][i]
      #debug_print('host=%s, key=%s, value=%s, timestamp=%s' % (host, zabbix_item_key, zabbix_item_value, str(datetime.datetime.fromtimestamp(timestamp))))
      zabbix_metrics.append( Metric(host, zabbix_item_key, zabbix_item_value, timestamp))

  #Storwize block cluster status
  if e.get('clazz') == 'com.ibm.svc.gui.events.ConnectionStatusEvent':
    timestamp = float(e['timestamp'])/1000
    debug_print('%s %s %s' % (e.get('clazz'), e.get('id'), str(datetime.datetime.fromtimestamp(timestamp)) ) )
    for i in ['externalStorage', 'internalStorage', 'remotePartnerships']:
      zabbix_item_key = SVC_CONN_STATUS_TMPL % i
      zabbix_item_value = e[i]
      #debug_print('host=%s, key=%s, value=%s, timestamp=%s' % (host, zabbix_item_key, zabbix_item_value, str(datetime.datetime.fromtimestamp(timestamp))))
      zabbix_metrics.append( Metric(host, zabbix_item_key, zabbix_item_value, timestamp))

if debug:
  for m in zabbix_metrics:
    print str(m)
예제 #9
0
def main(arguments=None):
    """
    Program entry point.

    :param arguments:
    :return:
    """
    try:
        if arguments is None:  # __name__=__main__
            arguments = sys.argv[1:]
            progname = sys.argv[0]
        else:  # module entry
            arguments = arguments[1:]
            progname = arguments[0]
    except IndexError:
        print(return_epilog() + "\n")
        logging.error("Invalid options. Use --help for more information.")
        sys.exit(1)

    arg_parser = argparse.ArgumentParser(
        prog=progname,
        formatter_class=argparse.RawDescriptionHelpFormatter,
        description=descriptionmacro,
        epilog=return_epilog())
    arg_parser.add_argument('COMMAND')
    arg_parser.add_argument("-V",
                            "--version",
                            action='version',
                            version='UNSUPPORTED OPTION')
    arg_parser.add_argument(
        "--key",
        "-k",
        nargs='?',
        default=None,
        help="Optional with `check` command. Can be used to run checks on"
        " a limited subset of item headings under testSet from the yaml "
        "config.")
    arg_parser.add_argument(
        "--datatype",
        "-t",
        nargs='?',
        default=None,
        help="Required with `discover` command. This filters objects from"
        " the config that have a particular datatype. This data is used by"
        " low level discovery in Zabbix.")
    arg_parser.add_argument(
        "-c",
        "--config",
        default=None,
        help="Specify custom config file, system default /etc/url_monitor."
        "yaml")
    arg_parser.add_argument(
        "--loglevel",
        default=None,
        help="Specify custom loglevel override. Available options [debug,"
        " info, wrna, critical, error, exceptions]")
    arg_parser.add_argument(
        "--thresholdoperator",
        "-o",
        default=None,
        help=
        "Only used by zabbix low level discovery to create unique item keys")

    inputflag = arg_parser.parse_args(args=arguments)

    configinstance = configuration.ConfigObject()
    configinstance.load_yaml_file(inputflag.config)
    logger = configinstance.get_logger(inputflag.loglevel)

    configinstance.pre_flight_check()
    config = configinstance.load()

    # stage return code
    set_rc = 0

    # skip if skip conditions exist (for standby nodes)
    conditional_skip_queue = configinstance.skip_conditions
    if inputflag.COMMAND == "discover":
        conditional_skip_queue = []  # no need to disable this
    if len(conditional_skip_queue) > 0:
        logger.info("Checking {0} standby conditions to see if test execution"
                    " should skip.".format(len(conditional_skip_queue)))
    for test in conditional_skip_queue:
        for condition, condition_args in test.items():
            if commons.skip_on_external_condition(logger, condition,
                                                  condition_args):
                exit(0)

    if inputflag.COMMAND == "check":
        try:
            lock = lockfile.FileLock(config['config']['pidfile'])
        except lockfile.NotMyLock as e:
            logger.error(
                "lockfile exception: it appears this is not my lockfile {0}".
                format(e))
            exit(1)
        except Exception as e:
            logger.error(
                "lockfile exception: a general exception occured while acquiring "
                "lockfile.FileLock {0}".format(e))
            exit(1)

        if lock.is_locked():
            logger.critical(
                " Fail! Process already running with PID {0}. EXECUTION STOP.".
                format(lock.pid))
            exit(1)
        with lock:  # context will .release() automatically
            logger.info("PID lock acquired {0} {1}".format(
                lock.path, lock.pid))

            # run check
            completed_runs = []  # check results
            for checkitem in config['checks']:
                try:
                    if (inputflag.key is not None
                            and checkitem['key'] == inputflag.key):
                        # --key defined and name matched! only run 1 check
                        rc, checkobj = action.check(checkitem, configinstance,
                                                    logger)
                        completed_runs.append((rc, checkitem['key'], checkobj))
                    elif not inputflag.key:
                        # run all checks
                        rc, checkobj = action.check(checkitem, configinstance,
                                                    logger)
                        completed_runs.append((rc, checkitem['key'], checkobj))
                except Exception as e:
                    logger.exception(e)

            # set run status overall
            for check in completed_runs:
                rc, name, values = check
                if rc == 0 and set_rc == 0:
                    set_rc = 0
                else:
                    set_rc = 1

            # report errors
            badmsg = "with errors    [FAIL]"
            if set_rc == 0:
                badmsg = "without errors    [ OK ]"
            logger.info("Checks have completed {0}".format(badmsg))

            # Report final conditions to zabbix (so informational alerting can
            # be built around failed script runs, exceptions, network errors,
            # timeouts, etc)
            logger.info(
                "Sending execution summary to zabbix server as Metrics objects"
            )
            try:
                values
            except NameError:
                values = {'EXECUTION_STATUS': 1}  # trigger an alert

            metrickey = config['config']['zabbix']['checksummary_key_format']

            check_completion_status = [
                Metric(config['config']['zabbix']['host'], metrickey, set_rc)
            ]

            logger.debug("Summary: {0}".format(check_completion_status))
            if not action.transmitfacade(
                    config, check_completion_status, logger=logger):
                logger.critical(
                    "Sending execution summary to zabbix server failed!")
                set_rc = 1
    if inputflag.COMMAND == "discover":
        action.discover(inputflag, configinstance, logger)
        set_rc = 0

    # drop lockfile, then exit (if check mode is active)
    if inputflag.COMMAND == "check":
        print(set_rc)  # don't need print retcode in discover
        exit(set_rc)
예제 #10
0
    def flush(self):
        ts = int(time.time())
        stats = 0
        stat_string = ''
#        self.pct_threshold = 10
        
        metrics = []
        
        for k, v in self.counters.items():
            v = float(v) / (self.flush_interval / 1000)
            
            host, key = k.split(':',1)
            
            metrics.append(Metric(host, key, str(v), ts))

            self.counters[k] = 0
            stats += 1

        for k, v in self.timers.items():
            if len(v) > 0:
                v.sort()
                count = len(v)
                min = v[0]
                max = v[-1]

                mean = min
                max_threshold = max
                median = min

                if count > 1:
                    thresh_index = int(round(count*float(self.pct_threshold)/100))#count - int(round((100.0 - self.pct_threshold) / 100) * count)
                    max_threshold = v[thresh_index - 1]
                    total = sum(v[:thresh_index])
                    mean = total / thresh_index
                    
                    if count%2 == 0:
                        median = (v[count/2] + v[count/2-1])/2.0
                    else:
                        median = (v[count/2])

                self.timers[k] = []

                host, key = k.split(':', 1)
                metrics.extend([
                    Metric(host, key + '[mean]', mean, ts),
                    Metric(host, key + '[upper]', max, ts),
                    Metric(host, key + '[lower]', min, ts),
                    Metric(host, key + '[count]', count, ts),
                    Metric(host, key + '[upper_%s]' % self.pct_threshold, max_threshold, ts),
                    Metric(host, key + '[median]', median, ts),
                ])

                stats += 1

#        stat_string += 'statsd.numStats %s %d' % (stats, ts)

        send_to_zabbix(metrics, self.zabbix_host, self.zabbix_port)

        self._set_timer()

        if self.debug:
            print metrics
예제 #11
0
logging.info('sys.argv: ' + repr(sys.argv))

parser = argparse.ArgumentParser(
    formatter_class=RawTextHelpFormatter,
    description="""Simple script to send {1} operator reports to Zabbix.
Should be used in {1}-dir config instead of mail command:
    mail = root@localhost,admin@domain = all, !skipped
    operatorcommand = "{0} [--recipients '%r']"
Hostnames in Zabbix and {1} must correspond
""".format(os.path.realpath(__file__), conf['type'].title()))

parser.add_argument('--recipients',
                    action='store',
                    type=lambda x: x.split(),
                    default=[],
                    help='space-separated list of report recipients (%%r)')

args = parser.parse_args()

msg = sys.stdin.read()

metrics = [
    Metric(conf['hostname'], "{0}.custommessage".format(conf['type']), msg)
]
logging.info("sending custom message to '{0}': '{1}'".format(
    conf['zabbix_server'], metrics))
send_to_zabbix(metrics, conf['zabbix_server'], 10051, 20)

if args.recipients:
    sendmail(msg, args.recipients)
예제 #12
0
          output.append('{ "{#TYPE}":"%s","{#NAME}":"%s","{#ID}":"%s" }' % (discovery,x['ElementName'],x['Name']) )

    if discovery == 'baseSize':
      data = conn.EnumerateInstances('HuaSy_PrimordialStoragePool')
      for  x in data:
        if x['InstanceID'].startswith(serial):
          output.append('{ "{#TYPE}":"%s","{#NAME}":"%s","{#ID}":"%s" }' % (discovery,x['ElementName'],x['InstanceID']) )

    json = []
    json.append('{"data":[')

    for i, v in enumerate( output ):
      if i < len(output)-1:
        json.append(v+',')
      else:
        json.append(v)
    json.append(']}')

    json_string = ''.join(json)
    print(json_string) 

    trapper_key = 'svc.discovery.%s' % discovery
    debug_print('Sending to host=%s, key=%s' % (hostname, trapper_key))

    #send json to LLD trapper item with zbxsend module
    if debug:
      logging.basicConfig(level=logging.INFO)
    else:
      logging.basicConfig(level=logging.WARNING)	   
    send_to_zabbix([Metric(hostname, trapper_key, json_string)], 'localhost', 10051)
예제 #13
0
result['{0}.job_exit_code'.format(conf['type'])] = args.job_exit_code

logging.debug(repr(in_msg))
# DEBUG
logging.debug(repr(result))

metrics = []
for key, value in result.items():
    #print args.job_name
    match2 = bool(re.search(
        '(F|f)ull',
        args.job_name))  #Verifica se o Job de backup é Full Virtual (Archive)
    if match2:
        metrics.append(
            Metric(conf['hostname'], '{0}[{1}]'.format(key + '-full',
                                                       args.job_name), value))
    match2 = bool(re.search('^Consolidate', args.job_name))
    if match2:
        metrics.append(
            Metric(conf['hostname'], '{0}[{1}]'.format(key + '-consolidate',
                                                       args.job_name), value))
    match2 = bool(re.search('Copy-Consolidated([0-9{1,2}])?', args.job_name))
    if match2:
        metrics.append(
            Metric(conf['hostname'], '{0}[{1}]'.format(key + '-copy',
                                                       args.job_name), value))
    else:
        metrics.append(
            Metric(conf['hostname'], '{0}[{1}]'.format(key, args.job_name),
                   value))
#print metrics
예제 #14
0
def main():
    global args

    parser = argparse.ArgumentParser(
        description='Retrieve items for the etcd zabbix template')

    parser.add_argument('--host', default="localhost", help='host name')
    parser.add_argument('--zhost',
                        default="-",
                        help='host name as configured in Zabbix')
    parser.add_argument('--zserver', default="localhost", help='Zabbix server')
    parser.add_argument('--zport',
                        type=int,
                        default=10051,
                        help='Zabbix server trap port')
    parser.add_argument('--port', type=int, default=4001, help="service port")
    parser.add_argument('--url',
                        default="/v2/stats/store",
                        help="base url to be retrieved")
    parser.add_argument('--cert',
                        default="/etc/origin/master/master.etcd-client.crt",
                        help="ssl certificate file")
    parser.add_argument('--key',
                        default="/etc/origin/master/master.etcd-client.key",
                        help="ssl key file")
    parser.add_argument('--metrics',
                        nargs='+',
                        help="list of metrics to be retrieved")
    parser.add_argument('--metricnames',
                        nargs='+',
                        help="names of metrics to be retrieved")
    parser.add_argument('--verbose',
                        default=False,
                        action='store_true',
                        help='verbose output')

    args = parser.parse_args()

    n = min(len(args.metrics), len(args.metricnames))

    jd = get_json(args.host, args.port, args.url, args.key, args.cert)

    if jd is None:
        sys.exit(-1)

    if args.verbose:
        print jd

    metrics = args.metrics[:n]
    metricnames = args.metricnames[:n]
    values = [get_metric_value(m, jd) for m in metrics]

    mobjs = [
        Metric(args.zhost, tp[0], tp[1]) for tp in zip(metricnames, values)
        if tp[1] != ""
    ]

    if args.verbose:
        for m in mobjs:
            print m.host, m.key, m.value

    if not send_to_zabbix(mobjs, args.zserver, args.zport):
        print "zabbix trapper error!"
        sys.exit(-1)

    print "%d metrics sent successfully!" % len(mobjs)
예제 #15
0
def main():
    global args

    parser = argparse.ArgumentParser(
        description='Retrieve Zabbix stats about docker containers')

    parser.add_argument('--zhost',
                        default="-",
                        help='host name as configured in Zabbix')
    parser.add_argument('--zserver', default="localhost", help='Zabbix server')
    parser.add_argument('--zport',
                        type=int,
                        default=10051,
                        help='Zabbix server trap port')
    parser.add_argument('--nozsend',
                        default=False,
                        action='store_true',
                        help='do not send to Zabbix')
    parser.add_argument('--url',
                        default="unix://var/run/docker.sock",
                        help="base docker url")
    parser.add_argument('--metrics',
                        nargs='+',
                        help="list of metrics to be retrieved")
    parser.add_argument('--metricnames',
                        nargs='+',
                        help="names of metrics to be retrieved")
    parser.add_argument('--verbose',
                        default=False,
                        action='store_true',
                        help='verbose output')
    parser.add_argument('--image', help='container image')

    args = parser.parse_args()

    n = min(len(args.metrics), len(args.metricnames))

    if not args.image:
        print "error: need container image name!"
        sys.exit(-1)

    dc = docker.Client(base_url=args.url)

    conts = filter(lambda cnt: cnt['Image'] == args.image,
                   dc.containers(quiet=False))

    if len(conts) != 1:
        print "error: could not identify container with image=%s" % args.image
        sys.exit(-1)

    cid = conts[0]['Id']
    stats = dc.stats(cid)

    st0 = stats.next()
    time.sleep(1)
    st1 = stats.next()

    if args.verbose:
        print "cid=%s" % cid
        print st0, st1

    env = {'null': ''}

    metrics = args.metrics[:n]
    metricnames = args.metricnames[:n]
    values = [get_metric(m, eval(st0, env), eval(st1, env)) for m in metrics]

    zhost = "localhost" if args.zhost == '-' else args.zhost

    mobjs = [
        Metric(zhost, tp[0], tp[1]) for tp in zip(metricnames, values)
        if tp[1] != ""
    ]

    if args.verbose or args.nozsend:
        for m in mobjs:
            print args.zhost, m.key, m.value

    if not args.nozsend and send_to_zabbix(mobjs, args.zserver, args.zport):
        print "%d metrics sent successfully!" % len(mobjs)
예제 #16
0
            result[key] = value(match)
            continue

if not result:
    # TODO: send email?
    logging.info("It is not a message about job")
    exit(0)

result['{0}.job_exit_code'.format(conf['type'])] = args.job_exit_code

logging.debug(repr(in_msg))
# DEBUG
logging.debug(repr(result))

metrics = []
for key, value in result.items():
    metrics.append(
        Metric(conf['hostname'], '{0}[{1}]'.format(key, args.job_name), value))

# Send result to zabbix
logging.info("sending metrics to '{0}': '{1}'".format(conf['zabbix_server'],
                                                      metrics))
send_to_zabbix(metrics, conf['zabbix_server'], 10051, 20)

# Send emails (if requested)
if (args.recipients
        and ((args.job_exit_code == 'OK' and args.email_on_success) or
             (args.job_exit_code != 'OK' and args.email_on_fail))):
    sendmail(args.job_name, args.job_type, args.job_level, args.job_exit_code,
             in_msg, args.recipients)