Exemple #1
0
def get_multiple_cpu_load(items, time_interval):
    """
    Gets the CPU load of netboxes, averaged over a time interval, and adds to
    the load properties of the items.

    :param items: A dictionary of {sysname: properties lazy_dict, ...}
    :param time_interval: A dict(start=..., end=...) describing the desired
                          time interval in terms valid to Graphite web.
    """
    target_map = {
        escape_metric_name(sysname): netbox
        for sysname, netbox in iteritems(items)
    }
    targets = []
    for sysname, netbox in iteritems(items):
        if not sysname:
            continue

        targets.extend([
            'highestMax(%s,1)' % path
            for path in (metric_path_for_cpu_load(sysname, '*', interval=5),
                         metric_path_for_cpu_utilization(sysname, '*'))
        ])

    _logger.debug("getting %s graphite cpu targets in chunks", len(targets))
    data = {}
    for chunk in chunks(targets, METRIC_CHUNK_SIZE):
        data.update(_get_metric_average(chunk, time_interval))

    for key, value in iteritems(data):
        for sysname, netbox in iteritems(target_map):
            if sysname in key:
                if not is_nan(value):
                    netbox['load'] = value
                    break
Exemple #2
0
def get_multiple_link_load(items, time_interval):
    """
    Gets the link load of the interfaces, averaged over a time interval,
    and adds to the load properties of the items.


    :param items: A dictionary of {(sysname, ifname): properties lazy_dict, ...}
    :param time_interval: A dict(start=..., end=...) describing the desired
                          time interval in terms valid to Graphite web.
    """
    target_map = {}
    for (sysname, ifname), properties in items.items():
        if not (sysname and ifname):
            continue

        targets = [
            metric_path_for_interface(sysname, ifname, counter)
            for counter in ('ifInOctets', 'ifOutOctets')
        ]
        targets = [get_metric_meta(t)['target'] for t in targets]
        target_map.update({t: properties for t in targets})

    _logger.debug(
        "getting %s graphite traffic targets in chunks", len(target_map.keys())
    )
    data = {}
    for chunk in chunks(target_map.keys(), METRIC_CHUNK_SIZE):
        data.update(_get_metric_average(chunk, time_interval))

    for key, value in data.items():
        properties = target_map.get(key, None)
        if properties:
            if value:
                bps = value / MEGABIT
                if 'ifInOctets' in key:
                    properties['load_in'] = bps
                elif 'ifOutOctets' in key:
                    properties['load_out'] = bps
        else:
            _logger.error(
                "no match for key %r (%r) in data returned from graphite", key, value
            )

    missing = set(target_map).difference(data)
    if missing:
        _logger.debug("missed %d targets in graphite response", len(missing))
Exemple #3
0
def get_traffic_for(interfaces):
    """Get traffic average for the given interfaces using one request

    :param QueryDict interfaces: interfaces to fetch data for
    :returns: A dict of {interface: { suffix: value, suffix: value}}
    """
    metric_mapping = {}  # Store metric_name -> interface
    metrics = []
    traffic = defaultdict(dict)

    _logger.debug("preparing to get traffic data for %d interfaces",
                  len(interfaces))

    # assume transform is the same for all octet counters
    transform = get_metric_meta("." + INOCTETS)["transform"]

    for interface in interfaces:
        # what we need
        ifc_metrics = _get_traffic_counter_metrics_for(interface)
        metrics.extend(ifc_metrics)
        # what to look for in the response
        transformed = [transform.format(id=m) for m in ifc_metrics]
        metric_mapping.update({target: interface for target in transformed})

    targets = [transform.format(id=m) for m in _merge_metrics(sorted(metrics))]

    _logger.debug(
        "getting data for %d targets in chunks of %d",
        len(targets),
        MAX_TARGETS_PER_REQUEST,
    )

    data = {}
    for request in chunks(targets, MAX_TARGETS_PER_REQUEST):
        data.update(get_metric_average(request, start=TRAFFIC_TIMEPERIOD))

    _logger.debug("received %d metrics in response", len(data))

    for metric, value in data.items():
        interface = metric_mapping[metric]
        if INOCTETS in metric:
            traffic[interface].update({INOCTETS: value})
        elif OUTOCTETS in metric:
            traffic[interface].update({OUTOCTETS: value})

    return traffic