def main():
    p = optparse.OptionParser(
        version=VERSION, usage="usage: %prog [options] juniper_host carbon_host", description="Juniper graphite agent"
    )
    p.add_option(
        "--log-level",
        "-l",
        help="Logging level (critical | error | warning | info | debug) [%default]",
        choices=("critical", "error", "warning", "info", "debug"),
        dest="loglevel",
        default="info",
    )
    p.add_option("--log-filename", "-o", help="Logging output filename", dest="logfile")
    p.add_option(
        "-s",
        "--skip-upload",
        action="store_true",
        dest="skip_upload",
        default=False,
        help="Skip metric upload step [%default]",
    )
    p.add_option("-u", "--user", help="Username and password for iControl authentication", dest="user")
    p.add_option("-p", "--port", help="Carbon port [%default]", type="int", dest="carbon_port", default=2004)
    p.add_option(
        "-r",
        "--carbon-retries",
        help="Number of carbon server delivery attempts [%default]",
        type="int",
        dest="carbon_retries",
        default=2,
    )
    p.add_option(
        "-i",
        "--carbon-interval",
        help="Interval between carbon delivery attempts [%default]",
        type="int",
        dest="carbon_interval",
        default=30,
    )
    p.add_option("-c", "--chunk-size", help="Carbon chunk size [%default]", type="int", dest="chunk_size", default=500)
    p.add_option("-n", "--netconf-port", help="NetConf port [%default]", type="int", dest="netconf_port", default=8020)
    p.add_option("--prefix", help="Metric name prefix [juniper.juniper_host]", dest="prefix")

    options, arguments = p.parse_args()

    # right number of arguments?
    if len(arguments) != 2:
        p.error("wrong number of arguments: juniper_host and carbon_host required")

    LOGGING_LEVELS = {
        "critical": logging.CRITICAL,
        "error": logging.ERROR,
        "warning": logging.WARNING,
        "info": logging.INFO,
        "debug": logging.DEBUG,
    }
    loglevel = LOGGING_LEVELS.get(options.loglevel, logging.NOTSET)
    logging.basicConfig(
        level=loglevel,
        filename=options.logfile,
        format="%(asctime)s %(levelname)s: %(message)s",
        datefmt="%Y-%m-%d %H:%M:%S",
    )
    logging.getLogger("suds").setLevel(logging.CRITICAL)

    skip_upload = options.skip_upload
    logging.debug("skip_upload = %s" % skip_upload)
    chunk_size = options.chunk_size
    logging.debug("chunk_size = %s" % chunk_size)
    carbon_port = options.carbon_port
    logging.debug("carbon_port = %s" % carbon_port)
    carbon_retries = options.carbon_retries
    logging.debug("carbon_retries = %s" % carbon_retries)
    carbon_interval = options.carbon_interval
    logging.debug("carbon_interval = %s" % carbon_interval)
    netconf_port = options.netconf_port
    logging.debug("netconf_port = %s" % netconf_port)

    if (not options.user) or (len(options.user) < 1):
        # empty or non-existent --user option
        # need to gather user and password
        user = raw_input("Enter username:"******"Enter password for user '%s':" % user)
    elif ":" in options.user:
        # --user option present with user and password
        user, password = options.user.split(":", 1)
    else:
        # --user option present with no password
        user = options.user
        password = getpass.getpass("Enter password for user '%s':" % user)
    logging.debug("user = %s" % user)
    logging.debug("password = %s" % password)

    juniper_host = arguments[0]
    logging.debug("juniper_host = %s" % juniper_host)

    if options.prefix:
        prefix = options.prefix.strip()
    else:
        scrubbed_juniper_host = juniper_host.replace(".", "_")
        logging.debug("scrubbed_juniper_host = %s" % scrubbed_juniper_host)
        prefix = "juniper.%s" % scrubbed_juniper_host
        logging.debug("prefix = %s" % prefix)

    carbon_host = arguments[1]
    logging.debug("carbon_host = %s" % carbon_host)

    start_timestamp = timestamp_local()
    logging.debug("start_timestamp = %s" % start_timestamp)

    metric_list = gather_juniper_metrics(juniper_host, user, password, netconf_port, prefix)

    if not skip_upload:
        upload_attempts = 0
        upload_success = False
        max_attempts = carbon_retries + 1
        while not upload_success and (upload_attempts < max_attempts):
            upload_attempts += 1
            logging.info("Uploading metrics (try #%d/%d)..." % (upload_attempts, max_attempts))
            try:
                send_metrics(carbon_host, carbon_port, metric_list, chunk_size)
            except Exception, detail:
                logging.error("Unable to upload metrics.")
                logging.debug(Exception)
                logging.debug(detail)
                upload_success = False
                if upload_attempts < max_attempts:  # don't sleep on last run
                    logging.info("Sleeping %d seconds before retry..." % carbon_interval)
                    time.sleep(carbon_interval)
            else:
                upload_success = True
        if not upload_success:
            logging.error("Unable to upload metrics after %d attempts." % upload_attempts)
            logging.info("Saving collected data to local disk for later replay...")
            date_str = datetime.now().strftime("%Y%m%dT%H%M%S")
            logging.debug("date_str = %s" % date_str)
            write_json_metrics(metric_list, "%s_%s_fail.json" % (prefix, date_str))
VERSION = "1.3"


def gather_juniper_metrics(juniper_host, user, password, port, prefix):
    """ Connects to a Juniper via NetConf and pulls statistics.
    """
    metric_list = []

    try:
        logging.info("Connecting to %s and pulling statistics..." % juniper_host)
        j = manager.connect_ssh(juniper_host, username=user, password=password, port=port)
    except Exception, detail:
        logging.debug("detail = %s" % pformat(detail))
        pass

    now = timestamp_local()
    logging.debug("Local timestamp is %s." % now)

    logging.info("Retrieving real-time performance monitoring probe statistics...")
    try:
        response = j.dispatch("get-probe-results")
        logging.debug("response = %s" % pformat(response))
    except Exception, detail:
        logging.debug("detail = %s" % pformat(detail))
        pass

    try:
        response_dict = xmltodict.parse(response.xml)
        logging.debug("response_dict = %s" % pformat(response_dict))
    except Exception, detail:
        logging.debug("detail = %s" % pformat(detail))
Exemplo n.º 3
0
def main():
    p = optparse.OptionParser(version=VERSION,
                              usage="usage: %prog [options] ltm_host carbon_host",
                              description="F5 BIG-IP graphite agent")
    p.add_option('--log-level', '-l',
                 help='Logging level (critical | error | warning | info | debug) [%default]',
                 choices=('critical', 'error', 'warning', 'info', 'debug'),
                 dest='loglevel', default="info")
    p.add_option('--log-filename', '-o', help='Logging output filename',
                 dest='logfile')
    p.add_option('-s', '--skip-upload', action="store_true", dest="skip_upload",
                 default=False, help="Skip metric upload step [%default]")
    p.add_option('-u', '--user', help='Username and password for iControl authentication', dest='user')
    p.add_option('-p', '--port', help="Carbon port [%default]", type="int", dest='carbon_port', default=2004)
    p.add_option('-r', '--carbon-retries', help="Number of carbon server delivery attempts [%default]", type="int", dest="carbon_retries", default=2)
    p.add_option('-i', '--carbon-interval', help="Interval between carbon delivery attempts [%default]", type="int", dest="carbon_interval", default=30)
    p.add_option('-c', '--chunk-size', help='Carbon chunk size [%default]', type="int", dest='chunk_size', default=500)
    p.add_option('-t', '--timestamp', help='Timestamp authority (local | remote) [%default]', type="choice", dest="ts_auth", choices=['local', 'remote'], default="local")
    p.add_option('--prefix', help="Metric name prefix [bigip.ltm_host]", dest="prefix")

    options, arguments = p.parse_args()

    # right number of arguments?
    if len(arguments) != 2:
        p.error("wrong number of arguments: ltm_host and carbon_host required")

    LOGGING_LEVELS = {'critical': logging.CRITICAL,
                      'error': logging.ERROR,
                      'warning': logging.WARNING,
                      'info': logging.INFO,
                      'debug': logging.DEBUG}
    loglevel = LOGGING_LEVELS.get(options.loglevel, logging.NOTSET)
    logging.basicConfig(level=loglevel, filename=options.logfile,
                        format='%(asctime)s %(levelname)s: %(message)s',
                        datefmt='%Y-%m-%d %H:%M:%S')
    logging.getLogger('suds').setLevel(logging.CRITICAL)

    skip_upload = options.skip_upload
    logging.debug("skip_upload = %s" % skip_upload)
    chunk_size = options.chunk_size
    logging.debug("chunk_size = %s" % chunk_size)
    carbon_port = options.carbon_port
    logging.debug("carbon_port = %s" % carbon_port)
    carbon_retries = options.carbon_retries
    logging.debug("carbon_retries = %s" % carbon_retries)
    carbon_interval = options.carbon_interval
    logging.debug("carbon_interval = %s" % carbon_interval)
    ts_auth = options.ts_auth.strip().lower()
    if ts_auth == "remote":
        remote_ts = True
    else:
        remote_ts = False
    logging.debug("timestamp_auth = %s" % ts_auth)
    logging.debug("remote_ts = %s" % remote_ts)

    if (not options.user) or (len(options.user) < 1):
        # empty or non-existent --user option
        # need to gather user and password
        user = raw_input("Enter username:"******"Enter password for user '%s':" % user)
    elif ":" in options.user:
        # --user option present with user and password
        user, password = options.user.split(':', 1)
    else:
        # --user option present with no password
        user = options.user
        password = getpass.getpass("Enter password for user '%s':" % user)
    logging.debug("user = %s" % user)
    logging.debug("password = %s" % password)

    ltm_host = arguments[0]
    logging.debug("ltm_host = %s" % ltm_host)

    if options.prefix:
        prefix = options.prefix.strip()
    else:
        scrubbed_ltm_host = ltm_host.replace(".", "_")
        logging.debug("scrubbed_ltm_host = %s" % scrubbed_ltm_host)
        prefix = "bigip.%s" % scrubbed_ltm_host
        logging.debug("prefix = %s" % prefix)

    carbon_host = arguments[1]
    logging.debug("carbon_host = %s" % carbon_host)

    start_timestamp = timestamp_local()
    logging.debug("start_timestamp = %s" % start_timestamp)

    metric_list = gather_f5_metrics(ltm_host, user, password, prefix, remote_ts)

    if not skip_upload:
        upload_attempts = 0
        upload_success = False
        max_attempts = carbon_retries + 1
        while not upload_success and (upload_attempts < max_attempts):
            upload_attempts += 1
            logging.info("Uploading metrics (try #%d/%d)..." % (upload_attempts, max_attempts))
            try:
                send_metrics(carbon_host, carbon_port, metric_list, chunk_size)
            except Exception, detail:
                logging.error("Unable to upload metrics.")
                logging.debug(Exception)
                logging.debug(detail)
                upload_success = False
                if upload_attempts < max_attempts:  # don't sleep on last run
                    logging.info("Sleeping %d seconds before retry..." % carbon_interval)
                    time.sleep(carbon_interval)
            else:
                upload_success = True
        if not upload_success:
            logging.error("Unable to upload metrics after %d attempts." % upload_attempts)
            logging.info("Saving collected data to local disk for later replay...")
            date_str = datetime.now().strftime("%Y%m%dT%H%M%S")
            logging.debug("date_str = %s" % date_str)
            write_json_metrics(metric_list, "%s_%s_fail.json" % (prefix, date_str))
Exemplo n.º 4
0
    b.System.Session.set_active_folder(folder="/")

    # IP

    logging.info("Retrieving global IP statistics...")
    ip_stats = b.System.Statistics.get_ip_statistics()
    logging.debug("ip_stats =\n%s" % pformat(ip_stats))
    statistics = ip_stats['statistics']
    ts = ip_stats['time_stamp']
    if remote_ts:
        logging.info("Calculating epoch time from remote timestamp...")
        now = convert_to_epoch(ts['year'], ts['month'], ts['day'],
                               ts['hour'], ts['minute'], ts['second'], tz)
        logging.debug("Remote timestamp is %s." % now)
    else:
        now = timestamp_local()
        logging.debug("Local timestamp is %s." % now)
    for y in statistics:
        stat_name = y['type'].split("STATISTIC_")[-1].lower()
        high = y['value']['high']
        low = y['value']['low']
        stat_val = convert_to_64_bit(high, low)
        stat_path = "%s.protocol.ip.%s" % (prefix, stat_name)
        metric = (stat_path, (now, stat_val))
        logging.debug("metric = %s" % str(metric))
        metric_list.append(metric)

    # IPv6

    logging.info("Retrieving global IPv6 statistics...")
    ipv6_stats = b.System.Statistics.get_ipv6_statistics()
Exemplo n.º 5
0
def main():
    parser = get_parser()
    args = parser.parse_args()

    LOGGING_LEVELS = {'critical': logging.CRITICAL,
                      'error': logging.ERROR,
                      'warning': logging.WARNING,
                      'info': logging.INFO,
                      'debug': logging.DEBUG}
    loglevel = LOGGING_LEVELS.get(args.loglevel, logging.NOTSET)
    logging.basicConfig(level=loglevel, filename=args.logfile,
                        format='%(asctime)s %(levelname)s: [%(thread)d ' +
                        '%(module)s:%(funcName)s %(lineno)d] %(message)s',
                        datefmt='%Y-%m-%d %H:%M:%S')
    logging.getLogger('suds').setLevel(logging.CRITICAL)
    logging.debug("args = %s" % args)

    if args.ts_auth.strip().lower() == "remote":
        remote_ts = True
    else:
        remote_ts = False

    if args.prefix:
        prefix = args.prefix.strip()
    else:
        scrubbed_f5_host = args.f5_host.replace(".", "_")
        logging.debug("scrubbed_f5_host = %s" % scrubbed_f5_host)
        prefix = "bigip.%s" % scrubbed_f5_host
        logging.debug("prefix = %s" % prefix)

    if args.sslverify == False:
        import ssl
        ssl._create_default_https_context = ssl._create_unverified_context

    running = True
    while running:
        start_timestamp = timestamp_local()
        logging.debug("start_timestamp = %s" % start_timestamp)
        if args.active_only:
            cstate = get_cluster_state(args.f5_host, args.f5_username, args.f5_password)

        if args.active_only and cstate != 'FAILOVER_STATE_ACTIVE':
            metric_list = gather_f5_metrics(args.f5_host, args.f5_username,
                                            args.f5_password, prefix, remote_ts,
                                            args.interfaces, args.no_ip,
                                            args.no_ipv6, args.no_icmp,
                                            args.no_icmpv6, args.no_tcp, True,
                                            True, args.no_interface,
                                            True, args.no_cpu, args.no_host,
                                            True,True,True,True, True)
        else:
            metric_list = gather_f5_metrics(args.f5_host, args.f5_username,
                                            args.f5_password, prefix, remote_ts,
                                            args.interfaces, args.no_ip,
                                            args.no_ipv6, args.no_icmp,
                                            args.no_icmpv6, args.no_tcp, args.no_tmm,
                                            args.no_client_ssl, args.no_interface,
                                            args.no_trunk, args.no_cpu, args.no_host,
                                            args.no_snat_pool,
                                            args.no_snat_translation,
                                            args.no_virtual_server, args.no_pool,
                                            args.no_pool_members)

        if not args.skip_upload and args.carbon_host:
            upload_attempts = 0
            upload_success = False
            max_attempts = args.carbon_retries + 1
            while not upload_success and (upload_attempts < max_attempts):
                upload_attempts += 1
                logging.info("Uploading metrics (try #%d/%d)..." %
                             (upload_attempts, max_attempts))
                try:
                    send_metrics(args.carbon_host, args.carbon_port, metric_list,
                                 args.chunk_size)
                except Exception, detail:
                    logging.error("Unable to upload metrics.")
                    logging.debug(Exception)
                    logging.debug(detail)
                    upload_success = False
                    if upload_attempts < max_attempts:  # don't sleep on last run
                        logging.info("Sleeping %d seconds before retry..." %
                                     args.carbon_interval)
                        time.sleep(args.carbon_interval)
                else:
                    upload_success = True
            if not upload_success:
                logging.error("Unable to upload metrics after %d attempts." %
                              upload_attempts)
                logging.info("Saving collected data to local disk for later " +
                             "replay...")
                date_str = datetime.now().strftime("%Y%m%dT%H%M%S")
                logging.debug("date_str = %s" % date_str)
                write_json_metrics(metric_list, "%s_%s_fail.json" %
                                   (prefix, date_str))
        else:
            logging.info("Dry-run or no carbon host provided -- skipping " +
                         "upload step.")

        finish_timestamp = timestamp_local()
        logging.debug("finish_timestamp = %s" % finish_timestamp)
        runtime = finish_timestamp - start_timestamp
        logging.info("Elapsed time in seconds is %d." % runtime)
        if args.loopInterval > 0:
            sleepDuration = args.loopInterval - runtime
            if sleepDuration > 0:
                logging.info("Sleeping for %d seconds" % sleepDuration)
                time.sleep(sleepDuration)
            else:
                logging.info("Sleep < 0 for loop of %d seconds - Not Sleeping" % args.loopInterval)
        else:
            running = False
Exemplo n.º 6
0
    # IP

    if not no_ip:
        logging.info("Retrieving global IP statistics...")
        ip_stats = b.System.Statistics.get_ip_statistics()
        logging.debug("ip_stats =\n%s" % pformat(ip_stats))
        statistics = ip_stats['statistics']
        ts = ip_stats['time_stamp']
        if remote_ts:
            logging.info("Calculating epoch time from remote timestamp...")
            now = convert_to_epoch(ts['year'], ts['month'], ts['day'],
                                   ts['hour'], ts['minute'], ts['second'], tz)
            logging.debug("Remote timestamp is %s." % now)
        else:
            now = timestamp_local()
            logging.debug("Local timestamp is %s." % now)
        metric_path = cleanStatPath("%s.protocol.ip" % (prefix))
        metrics = itterate_statistics(statistics, metric_path, now)
        metric_list.extend(metrics)
    else:
        logging.debug("Skipping IP...")

    # IPv6

    if not no_ipv6:
        logging.info("Retrieving global IPv6 statistics...")
        ipv6_stats = b.System.Statistics.get_ipv6_statistics()
        logging.debug("ipv6_stats =\n%s" % pformat(ipv6_stats))
        statistics = ipv6_stats['statistics']
        ts = ipv6_stats['time_stamp']