コード例 #1
0
def apply_colors(graph):
    '''
    update target colors in a clever, dynamic way. basically it's about defining
    colors for certain metrics (such as cpu idle metric = green), but since you
    can group by arbitrary things, you might have a graph comparing the idle
    values for different servers, in which case they should not be all green.

    # the graph will look something like:
        {
            'promoted_constants': {'type': 'update_time', 'plugin': 'carbon'},
            'from': '-24hours',
            'until': 'now',
            'constants': {'unit': 'ms', 'target_type': 'gauge'},
            'targets': [
                {
                    'id': u'carbon.agents.dfvimeographite2-a.avgUpdateTime',
                    'variables': {'agent': u'dfvimeographite2-a'},
                    'target': u'carbon.agents.dfvimeographite2-a.avgUpdateTime'
                },
                (...)
            ]
        }
    '''

    # color targets based on tags, even when due to grouping metrics with the same tags (colors)
    # show up on the same graph
    rules_tags = [
        # http stuff, for swift and others
        [
            {},
            {
                'http_method': {
                    'GET': colors['blue'][0],
                    'HEAD': colors['yellow'][0],
                    'PUT': colors['green'][0],
                    'REPLICATE': colors['brown'][0],
                    'DELETE': colors['red'][0]
                }
            }
        ],
        [
            {'stat': ['upper', 'upper_90']},
            {
                'http_method': {
                    'GET': colors['blue'][1],
                    'HEAD': colors['yellow'][1],
                    'PUT': colors['green'][1],
                    'REPLICATE': colors['brown'][1],
                    'DELETE': colors['red'][1]
                }
            }
        ],
    ]

    # color targets based on tags, except when due to grouping metrics
    # with the same tags show up on the same graph
    rules_unique_tags = [
        [
            {'unit': 'cpu_state'},
            {
                'type': {
                    'idle': colors['green'][0],
                    'user': colors['blue'][0],
                    'system': colors['blue'][1],
                    'nice': colors['purple'][0],
                    'softirq': colors['red'][0],
                    'irq': colors['red'][1],
                    'iowait': colors['orange'][0],
                    'guest': colors['white'],
                    'guest_nice': colors['white'],
                    'steal': '#FFA791'  # brighter red
                }
            }
        ],
        [
            {},
            {
                'mountpoint': {
                    '_var': colors['red'][0],
                    '_lib': colors['orange'][1],
                    '_boot': colors['blue'][0],
                    '_tmp': colors['purple'][0],
                    'root': colors['green'][0]
                }
            }
        ],
        [
            {'plugin': 'load'},
            {
                'type': {
                    '01': colors['red'][1],
                    '05': colors['red'][0],
                    '15': '#FFA791'  # brighter red
                }
            }
        ],
        [
            {'unit': 'ms'},
            {
                'type': {
                    'update_time': colors['turq'][0]
                }
            }
        ],
        [
            {'unit': 'freq_abs'},
            bin_set_color
        ]
    ]

    for target in graph['targets']:
        tags = dict(graph['constants'].items() + graph['promoted_constants'].items() + target['variables'].items())

        for action in get_action_on_rules_match(rules_unique_tags, tags):
            if callable(action):  # hasattr(action, '__call__'):
                action(graph, target)
            else:
                for (tag_key, matches) in action.items():
                    t = get_unique_tag_value(graph, target, tag_key)
                    if t is not None and t in matches:
                        target['color'] = matches[t]

        for action in get_action_on_rules_match(rules_tags, target):
            for (tag_key, matches) in action.items():
                t = get_tag_value(graph, target, tag_key)
                if t is not None and t in matches:
                    target['color'] = matches[t]

    return graph
コード例 #2
0
def apply_colors(graph):
    '''
    update target colors in a clever, dynamic way. basically it's about defining
    colors for certain metrics (such as cpu idle metric = green), but since you
    can group by arbitrary things, you might have a graph comparing the idle
    values for different servers, in which case they should not be all green.

    # the graph will look something like:
        {
            'promoted_constants': {'type': 'update_time', 'plugin': 'carbon'},
            'from': '-24hours',
            'until': 'now',
            'constants': {'what': 'ms', 'target_type': 'gauge'},
            'targets': [
                {
                    'id': u'carbon.agents.dfvimeographite2-a.avgUpdateTime',
                    'variables': {'agent': u'dfvimeographite2-a'},
                    'target': u'carbon.agents.dfvimeographite2-a.avgUpdateTime'
                },
                (...)
            ]
        }
    '''

    color_assign_cpu = {
        'idle': colors['green'][0],
        'user': colors['blue'][0],
        'system': colors['blue'][1],
        'nice': colors['purple'][0],
        'softirq': colors['red'][0],
        'irq': colors['red'][1],
        'iowait': colors['orange'][0],
        'guest': colors['white'],
        'guest_nice': colors['white'],
        'steal': '#FFA791'  # brighter red
    }

    color_assign_mountpoint = {
        '_var': colors['red'][0],
        '_lib': colors['orange'][1],
        '_boot': colors['blue'][0],
        '_tmp': colors['purple'][0],
        'root': colors['green'][0]
    }

    color_assign_load = {
        '01': colors['red'][1],
        '05': colors['red'][0],
        '15': '#FFA791'  # brighter red
    }

    color_assign_timing = {'update_time': colors['turq'][0]}

    # object_server, object_auditor, proxy_server [...?]
    color_assign_swift = [({
        'm': 'GET',
        'w': ('lower', 'timeouts', 'xfer')
    }, colors['blue'][0]),
                          ({
                              'm': 'GET',
                              'w': ('upper_90', 'errors')
                          }, colors['blue'][1]),
                          ({
                              'm': 'HEAD',
                              'w': ('lower', 'timeouts', 'xfer')
                          }, colors['yellow'][0]),
                          ({
                              'm': 'HEAD',
                              'w': ('upper_90', 'errors')
                          }, colors['yellow'][1]),
                          ({
                              'm': 'PUT',
                              'w': ('lower', 'timeouts', 'xfer')
                          }, colors['green'][0]),
                          ({
                              'm': 'PUT',
                              'w': ('upper_90', 'errors')
                          }, colors['green'][1]),
                          ({
                              'm': 'REPLICATE',
                              'w': ('lower', 'timeouts', 'xfer')
                          }, colors['brown'][0]),
                          ({
                              'm': 'REPLICATE',
                              'w': ('upper_90', 'errors')
                          }, colors['brown'][1]),
                          ({
                              'm': 'DELETE',
                              'w': ('lower', 'timeouts', 'xfer')
                          }, colors['red'][0]),
                          ({
                              'm': 'DELETE',
                              'w': ('upper_90', 'errors')
                          }, colors['red'][1]),
                          ({
                              'w': 'async_pendings'
                          }, colors['turq'][0])]

    for (i, target) in enumerate(graph['targets']):
        if get_tag_value(graph, target, 'what') == 'cpu_state':
            t = get_unique_tag_value(graph, target, 'type')
            if t is not None:
                graph['targets'][i]['color'] = color_assign_cpu[t]

        if get_tag_value(graph, target, 'what') == 'ms':
            t = get_unique_tag_value(graph, target, 'type')
            if t in color_assign_timing:
                graph['targets'][i]['color'] = color_assign_timing[t]

        t = get_unique_tag_value(graph, target, 'mountpoint')
        if t is not None and t in color_assign_mountpoint:
            graph['targets'][i]['color'] = color_assign_mountpoint[t]

        if get_tag_value(graph, target, 'plugin') == 'load':
            t = get_unique_tag_value(graph, target, 'type')
            if t is not None and t in color_assign_load:
                graph['targets'][i]['color'] = color_assign_load[t]

        # swift
        # technically the use of the get [unique] tag values is not correct
        # here. a better approach would be just doing the matching and seeing
        # if we gave anything the same color, and then deal with that.  because
        # with multiple tags, one of which can have multiple values, etc,
        # things become a bit more complicated.  we basically want to know "is
        # there any other target in the graph that matches the same
        # conditions?"
        m = get_unique_tag_value(graph, target, 'http_method')
        w = get_tag_value(graph, target, 'what')
        if m is not None:
            t = {'m': m, 'w': w}
            for color in backend.get_action_on_rules_match(
                    color_assign_swift, t):
                graph['targets'][i]['color'] = color

    return graph
コード例 #3
0
ファイル: app.py プロジェクト: atdt/graph-explorer
def build_graphs_from_targets(targets, query={}, target_modifiers=[]):
    # merge default options..
    defaults = {
        'group_by': [],
        'sum_by': [],
        'avg_over': None,
        'avg_by': [],
        'from': '-24hours',
        'to': 'now',
        'statement': 'graph',
        'limit_targets': 500
    }
    query = dict(defaults.items() + query.items())
    graphs = {}
    if not targets:
        return (graphs, query)
    group_by = query['group_by']
    sum_by = query['sum_by']
    avg_by = query['avg_by']
    avg_over = query['avg_over']
    # i'm gonna assume you never use second and your datapoints are stored with
    # minutely resolution. later on we can use config options for this (or
    # better: somehow query graphite about it)
    # note, the day/week/month numbers are not technically accurate, but
    # since we're doing movingAvg that's ok
    averaging = {
        'M': 1,
        'h': 60,
        'd': 60 * 24,
        'w': 60 * 24 * 7,
        'mo': 60 * 24 * 30
    }
    if avg_over is not None:
        avg_over_amount = avg_over[0]
        avg_over_unit = avg_over[1]
        if avg_over_unit in averaging.keys():
            multiplier = averaging[avg_over_unit]
            target_modifier = ['movingAverage', str(avg_over_amount * multiplier)]
            target_modifiers.append(target_modifier)

    # for each combination of values of tags from group_by, make 1 graph with
    # all targets that have these values. so for each graph, we have:
    # the "constants": tags in the group_by
    # the "variables": tags not in the group_by, which can have arbitrary values
    # go through all targets and group them into graphs:
    for (i, target_id) in enumerate(sorted(targets.iterkeys())):
        constants = {}
        variables = {}
        target_data = targets[target_id]
        for (tag_name, tag_value) in target_data['tags'].items():
            if tag_name in group_by or '%s=' % tag_name in group_by:
                constants[tag_name] = tag_value
            else:
                variables[tag_name] = tag_value
        graph_key = '__'.join([target_data['tags'][tag_name] for tag_name in constants])
        if graph_key not in graphs:
            graph = {'from': query['from'], 'until': query['to']}
            graph.update({'constants': constants, 'targets': []})
            graphs[graph_key] = graph
        target = target_data['id']
        # set all options needed for timeserieswidget/flot:
        t = {
            'variables': variables,
            'id': target_data['id'],  # timeserieswidget doesn't care about this
            'target': target
        }
        if 'color' in target_data:
            t['color'] = target_data['color']
        graphs[graph_key]['targets'].append(t)

    # ok so now we have a graphs dictionary with a graph for every approriate
    # combination of group_by tags, and each graphs contains all targets that
    # should be shown on it.  but the user may have asked to aggregate certain
    # targets together, by summing and/or averaging across different values of
    # (a) certain tag(s). let's process the aggregations now.
    if (sum_by or avg_by):
        for (graph_key, graph_config) in graphs.items():
            graph_config['targets_sum_candidates'] = {}
            graph_config['targets_avg_candidates'] = {}
            graph_config['normal_targets'] = []
            all_targets = graph_config['targets'][:]  # Get a copy.

            for target in all_targets:
                # targets that can get summed together with other tags, must
                # have at least 1 'sum_by' tags in the variables list.
                # targets that can get summed together must have:
                # * the same 'sum_by' tag keys (not values, because we
                # aggregate across different values for these tags)
                # * the same variables (key and val), except those vals that
                # are being summed by.
                # so for every group of sum_by tags and variables we build a
                # list of targets that can be summed together

                # of course it only makes sense to sum by tags that the target
                # actually has, and that are not already constants (meaning
                # every target in the graph has the same value)
                variables = target['variables'].keys()
                sum_constants = set(sum_by).intersection(set(variables))
                if sum_constants:
                    sum_constants_str = '_'.join(sorted(sum_constants))
                    variables_str = '_'.join(
                        ['%s_%s' % (k, target['variables'][k])
                            for k in sorted(variables)
                            if k not in sum_constants])
                    sum_id = '%s__%s' % (sum_constants_str, variables_str)
                    if sum_id not in graph_config['targets_sum_candidates']:
                        graphs[graph_key]['targets_sum_candidates'][sum_id] = []
                    graph_config['targets_sum_candidates'][sum_id].append(target)

            for (sum_id, targets) in graph_config['targets_sum_candidates'].items():
                if len(targets) > 1:
                    for t in targets:
                        all_targets.remove(t)
                    all_targets.append(
                        graphite_func_aggregate(targets, sum_by, "sumSeries"))

            for target in all_targets:
                # Now that any summing is done, we look at aggregating by
                # averaging because avg(foo+bar+baz) is more efficient
                # than avg(foo)+avg(bar)+avg(baz)
                # It's pretty similar than what happened above and aggregates
                # targets (whether those are sums or regular ones)
                variables = target['variables'].keys()
                avg_constants = set(avg_by).intersection(set(variables))
                if avg_constants:
                    avg_constants_str = '_'.join(sorted(avg_constants))
                    variables_str = '_'.join(
                        ['%s_%s' % (k, target['variables'][k])
                            for k in sorted(variables)
                            if k not in avg_constants])
                    # some values can be like 'sumSeries (8 values)' due to an
                    # earlier aggregation. if now targets have a different amount of
                    # values matched, that doesn't matter and they should still
                    # be aggregated together if the rest of the conditions are met
                    variables_str = re.sub('\([0-9]+ values\)', '(Xvalues)', variables_str)
                    avg_id = '%s__%s' % (avg_constants_str, variables_str)
                    if avg_id not in graph_config['targets_avg_candidates']:
                        graph_config['targets_avg_candidates'][avg_id] = []
                    graph_config['targets_avg_candidates'][avg_id].append(target)

            for (avg_id, targets) in graph_config['targets_avg_candidates'].items():
                if len(targets) > 1:
                    for t in targets:
                        all_targets.remove(t)
                    all_targets.append(
                        graphite_func_aggregate(targets, avg_by, "averageSeries"))

            graph_config["targets"] = all_targets

    # remove targets/graphs over the limit
    graphs = graphs_limit_targets(graphs, query['limit_targets'])

    # Apply target modifiers (like movingAverage, summarize, ...)
    for (graph_key, graph_config) in graphs.items():
        for target in graph_config['targets']:
            for target_modifier in target_modifiers:
                target['target'] = "%s(%s,%s)" % (target_modifier[0],
                                                  target['target'],
                                                  ','.join(target_modifier[1:]))
    # if in a graph all targets have a tag with the same value, they are
    # effectively constants, so promote them.  this makes the display of the
    # graphs less rendundant and makes it easier to do config/preferences
    # on a per-graph basis.
    for (graph_key, graph_config) in graphs.items():
        # get all variable tags throughout all targets in this graph
        tags_seen = set()
        for target in graph_config['targets']:
            for tag_name in target['variables'].keys():
                tags_seen.add(tag_name)

        # find effective constants from those variables,
        # and effective variables. (unset tag is a value too)
        first_values_seen = {}
        effective_variables = set()  # tags for which we've seen >1 values
        for target in graph_config['targets']:
            for tag_name in tags_seen:
                # already known that we can't promote, continue
                if tag_name in effective_variables:
                    continue
                tag_value = target['variables'].get(tag_name, None)
                if tag_name not in first_values_seen:
                    first_values_seen[tag_name] = tag_value
                elif tag_value != first_values_seen[tag_name]:
                    effective_variables.add(tag_name)
        effective_constants = tags_seen - effective_variables

        # promote the effective_constants by adjusting graph and targets:
        graphs[graph_key]['promoted_constants'] = {}
        for tag_name in effective_constants:
            graphs[graph_key]['promoted_constants'][tag_name] = first_values_seen[tag_name]
            for (i, target) in enumerate(graph_config['targets']):
                if tag_name in graphs[graph_key]['targets'][i]['variables']:
                    del graphs[graph_key]['targets'][i]['variables'][tag_name]

        # now that graph config is "rich", merge in settings from preferences
        constants = dict(graphs[graph_key]['constants'].items() + graphs[graph_key]['promoted_constants'].items())
        for graph_option in get_action_on_rules_match(preferences.graph_options, constants):
            if isinstance(graph_option, dict):
                graphs[graph_key].update(graph_option)
            else:
                graphs[graph_key] = graph_option(graphs[graph_key])
    return (graphs, query)
コード例 #4
0
ファイル: app.py プロジェクト: xiian/graph-explorer
def build_graphs_from_targets(targets, query):
    graphs = {}
    if not targets:
        return (graphs, query)
    group_by = query['group_by']
    sum_by = query['sum_by']
    avg_by = query['avg_by']
    avg_over = query['avg_over']
    # i'm gonna assume you never use second and your datapoints are stored with
    # minutely resolution. later on we can use config options for this (or
    # better: somehow query graphite about it)
    # note, the day/week/month numbers are not technically accurate, but
    # since we're doing movingAvg that's ok
    averaging = {
        'M': 1,
        'h': 60,
        'd': 60 * 24,
        'w': 60 * 24 * 7,
        'mo': 60 * 24 * 30
    }
    if avg_over is not None:
        avg_over_amount = avg_over[0]
        avg_over_unit = avg_over[1]
        if avg_over_unit in averaging.keys():
            multiplier = averaging[avg_over_unit]
            query['target_modifiers'].append(
                Query.graphite_function_applier('movingAverage', avg_over_amount * multiplier))

    # for each group_by bucket, make 1 graph.
    # so for each graph, we have:
    # the "constants": tags in the group_by
    # the "variables": tags not in the group_by, which can have arbitrary
    # values, or different values from a group_by tag that match the same
    # bucket pattern
    # go through all targets and group them into graphs:
    for _target_id, target_data in sorted(targets.items()):
        # FWIW. has an 'id' which timeserieswidget doesn't care about
        target = Target(target_data)
        target['target'] = target['id']

        (graph_key, constants) = target.get_graph_info(group_by)
        if graph_key not in graphs:
            graph = {'from': query['from'], 'until': query['to']}
            graph.update({'constants': constants, 'targets': []})
            graphs[graph_key] = graph
        graphs[graph_key]['targets'].append(target)

    # ok so now we have a graphs dictionary with a graph for every appropriate
    # combination of group_by tags, and each graph contains all targets that
    # should be shown on it.  but the user may have asked to aggregate certain
    # targets together, by summing and/or averaging across different values of
    # (a) certain tag(s). let's process the aggregations now.
    if (sum_by or avg_by):
        for (graph_key, graph_config) in graphs.items():
            graph_config['targets_sum_candidates'] = {}
            graph_config['targets_avg_candidates'] = {}
            graph_config['normal_targets'] = []

            for target in graph_config['targets']:
                sum_id = target.get_agg_key(sum_by)
                if sum_id:
                    if sum_id not in graph_config['targets_sum_candidates']:
                        graphs[graph_key]['targets_sum_candidates'][sum_id] = []
                    graph_config['targets_sum_candidates'][sum_id].append(target)

            for (sum_id, targets) in graph_config['targets_sum_candidates'].items():
                if len(targets) > 1:
                    for t in targets:
                        graph_config['targets'].remove(t)
                    graph_config['targets'].append(
                        graphite_func_aggregate(targets, sum_by, "sumSeries"))

            for target in graph_config['targets']:
                # Now that any summing is done, we look at aggregating by
                # averaging because avg(foo+bar+baz) is more efficient
                # than avg(foo)+avg(bar)+avg(baz)
                # aggregate targets (whether those are sums or regular ones)
                avg_id = target.get_agg_key(avg_by)
                if avg_id:
                    if avg_id not in graph_config['targets_avg_candidates']:
                        graph_config['targets_avg_candidates'][avg_id] = []
                    graph_config['targets_avg_candidates'][avg_id].append(target)

            for (avg_id, targets) in graph_config['targets_avg_candidates'].items():
                if len(targets) > 1:
                    for t in targets:
                        graph_config['targets'].remove(t)
                    graph_config['targets'].append(
                        graphite_func_aggregate(targets, avg_by, "averageSeries"))

    # remove targets/graphs over the limit
    graphs = graphs_limit_targets(graphs, query['limit_targets'])

    # Apply target modifiers (like movingAverage, summarize, ...)
    for (graph_key, graph_config) in graphs.items():
        for target in graph_config['targets']:
            for target_modifier in query['target_modifiers']:
                target_modifier(target, graph_config)

    # if in a graph all targets have a tag with the same value, they are
    # effectively constants, so promote them.  this makes the display of the
    # graphs less rendundant and makes it easier to do config/preferences
    # on a per-graph basis.
    for (graph_key, graph_config) in graphs.items():
        # get all variable tags throughout all targets in this graph
        tags_seen = set()
        for target in graph_config['targets']:
            for tag_name in target['variables'].keys():
                tags_seen.add(tag_name)

        # find effective constants from those variables,
        # and effective variables. (unset tag is a value too)
        first_values_seen = {}
        effective_variables = set()  # tags for which we've seen >1 values
        for target in graph_config['targets']:
            for tag_name in tags_seen:
                # already known that we can't promote, continue
                if tag_name in effective_variables:
                    continue
                tag_value = target['variables'].get(tag_name, None)
                if tag_name not in first_values_seen:
                    first_values_seen[tag_name] = tag_value
                elif tag_value != first_values_seen[tag_name]:
                    effective_variables.add(tag_name)
        effective_constants = tags_seen - effective_variables

        # promote the effective_constants by adjusting graph and targets:
        graph_config['promoted_constants'] = {}
        for tag_name in effective_constants:
            graph_config['promoted_constants'][tag_name] = first_values_seen[tag_name]
            for target in graph_config['targets']:
                target['variables'].pop(tag_name, None)

        # now that graph config is "rich", merge in settings from preferences
        constants = dict(graph_config['constants'].items() + graph_config['promoted_constants'].items())
        for graph_option in get_action_on_rules_match(preferences.graph_options, constants):
            if isinstance(graph_option, dict):
                graph_config.update(graph_option)
            else:
                graph_config = graphs[graph_key] = graph_option(graph_config)

        # but, the query may override some preferences:
        override = {}
        if query['statement'] == 'lines':
            override['state'] = 'lines'
        if query['statement'] == 'stack':
            override['state'] = 'stacked'
        if query['min'] is not None:
            override['yaxis'] = override.get('yaxis', {})
            override['yaxis'].update({'min': convert.parse_str(query['min'])})
        if query['max'] is not None:
            override['yaxis'] = override.get('yaxis', {})
            override['yaxis'].update({'max': convert.parse_str(query['max'])})

        graphs[graph_key].update(override)

    # now that some constants are promoted, we can give the graph more
    # unique keys based on all (original + promoted) constants. this is in
    # line with the meaning of the graph ("all targets with those constant
    # tags"), but more importantly: this fixes cases where some graphs
    # would otherwise have the same key, even though they have a different
    # set of constants, this can manifest itself on dashboard pages where
    # graphs for different queries are shown.
    # note that we can't just compile constants + promoted_constants,
    # part of the original graph key is also set by the group by (which, by
    # means of the bucket patterns doesn't always translate into constants),
    # we solve this by just including the old key.
    new_graphs = {}
    for (graph_key, graph_config) in graphs.items():
        new_key = ','.join('%s=%s' % i for i in graph_config['promoted_constants'].items())
        new_key = '%s__%s' % (graph_key, new_key)
        new_graphs[new_key] = graph_config
    graphs = new_graphs

    return (graphs, query)
コード例 #5
0
def build_graphs_from_targets(targets, query={}, target_modifiers=[]):
    # merge default options..
    defaults = {
        'group_by': [],
        'sum_by': [],
        'avg_over': None,
        'avg_by': [],
        'from': '-24hours',
        'to': 'now',
        'statement': 'graph',
        'limit_targets': 500
    }
    query = dict(defaults.items() + query.items())
    graphs = {}
    if not targets:
        return (graphs, query)
    group_by = query['group_by']
    sum_by = query['sum_by']
    avg_by = query['avg_by']
    avg_over = query['avg_over']
    # i'm gonna assume you never use second and your datapoints are stored with
    # minutely resolution. later on we can use config options for this (or
    # better: somehow query graphite about it)
    # note, the day/week/month numbers are not technically accurate, but
    # since we're doing movingAvg that's ok
    averaging = {
        'M': 1,
        'h': 60,
        'd': 60 * 24,
        'w': 60 * 24 * 7,
        'mo': 60 * 24 * 30
    }
    if avg_over is not None:
        avg_over_amount = avg_over[0]
        avg_over_unit = avg_over[1]
        if avg_over_unit in averaging.keys():
            multiplier = averaging[avg_over_unit]
            target_modifier = [
                'movingAverage',
                str(avg_over_amount * multiplier)
            ]
            target_modifiers.append(target_modifier)

    # for each combination of values of tags from group_by, make 1 graph with
    # all targets that have these values. so for each graph, we have:
    # the "constants": tags in the group_by
    # the "variables": tags not in the group_by, which can have arbitrary values
    # go through all targets and group them into graphs:
    for (i, target_id) in enumerate(sorted(targets.iterkeys())):
        constants = {}
        variables = {}
        target_data = targets[target_id]
        for (tag_name, tag_value) in target_data['tags'].items():
            if tag_name in group_by or '%s=' % tag_name in group_by:
                constants[tag_name] = tag_value
            else:
                variables[tag_name] = tag_value
        graph_key = '__'.join(
            [target_data['tags'][tag_name] for tag_name in constants])
        if graph_key not in graphs:
            graph = {'from': query['from'], 'until': query['to']}
            graph.update({'constants': constants, 'targets': []})
            graphs[graph_key] = graph
        target = target_data['id']
        # set all options needed for timeserieswidget/flot:
        t = {
            'variables': variables,
            'id':
            target_data['id'],  # timeserieswidget doesn't care about this
            'target': target
        }
        if 'color' in target_data:
            t['color'] = target_data['color']
        graphs[graph_key]['targets'].append(t)

    # ok so now we have a graphs dictionary with a graph for every approriate
    # combination of group_by tags, and each graphs contains all targets that
    # should be shown on it.  but the user may have asked to aggregate certain
    # targets together, by summing and/or averaging across different values of
    # (a) certain tag(s). let's process the aggregations now.
    if (sum_by or avg_by):
        for (graph_key, graph_config) in graphs.items():
            graph_config['targets_sum_candidates'] = {}
            graph_config['targets_avg_candidates'] = {}
            graph_config['normal_targets'] = []
            all_targets = graph_config['targets'][:]  # Get a copy.

            for target in all_targets:
                # targets that can get summed together with other tags, must
                # have at least 1 'sum_by' tags in the variables list.
                # targets that can get summed together must have:
                # * the same 'sum_by' tag keys (not values, because we
                # aggregate across different values for these tags)
                # * the same variables (key and val), except those vals that
                # are being summed by.
                # so for every group of sum_by tags and variables we build a
                # list of targets that can be summed together

                # of course it only makes sense to sum by tags that the target
                # actually has, and that are not already constants (meaning
                # every target in the graph has the same value)
                variables = target['variables'].keys()
                sum_constants = set(sum_by).intersection(set(variables))
                if sum_constants:
                    sum_constants_str = '_'.join(sorted(sum_constants))
                    variables_str = '_'.join([
                        '%s_%s' % (k, target['variables'][k])
                        for k in sorted(variables) if k not in sum_constants
                    ])
                    sum_id = '%s__%s' % (sum_constants_str, variables_str)
                    if sum_id not in graph_config['targets_sum_candidates']:
                        graphs[graph_key]['targets_sum_candidates'][
                            sum_id] = []
                    graph_config['targets_sum_candidates'][sum_id].append(
                        target)

            for (sum_id,
                 targets) in graph_config['targets_sum_candidates'].items():
                if len(targets) > 1:
                    for t in targets:
                        all_targets.remove(t)
                    all_targets.append(
                        graphite_func_aggregate(targets, sum_by, "sumSeries"))

            for target in all_targets:
                # Now that any summing is done, we look at aggregating by
                # averaging because avg(foo+bar+baz) is more efficient
                # than avg(foo)+avg(bar)+avg(baz)
                # It's pretty similar than what happened above and aggregates
                # targets (whether those are sums or regular ones)
                variables = target['variables'].keys()
                avg_constants = set(avg_by).intersection(set(variables))
                if avg_constants:
                    avg_constants_str = '_'.join(sorted(avg_constants))
                    variables_str = '_'.join([
                        '%s_%s' % (k, target['variables'][k])
                        for k in sorted(variables) if k not in avg_constants
                    ])
                    # some values can be like 'sumSeries (8 values)' due to an
                    # earlier aggregation. if now targets have a different amount of
                    # values matched, that doesn't matter and they should still
                    # be aggregated together if the rest of the conditions are met
                    variables_str = re.sub('\([0-9]+ values\)', '(Xvalues)',
                                           variables_str)
                    avg_id = '%s__%s' % (avg_constants_str, variables_str)
                    if avg_id not in graph_config['targets_avg_candidates']:
                        graph_config['targets_avg_candidates'][avg_id] = []
                    graph_config['targets_avg_candidates'][avg_id].append(
                        target)

            for (avg_id,
                 targets) in graph_config['targets_avg_candidates'].items():
                if len(targets) > 1:
                    for t in targets:
                        all_targets.remove(t)
                    all_targets.append(
                        graphite_func_aggregate(targets, avg_by,
                                                "averageSeries"))

            graph_config["targets"] = all_targets

    # remove targets/graphs over the limit
    graphs = graphs_limit_targets(graphs, query['limit_targets'])

    # Apply target modifiers (like movingAverage, summarize, ...)
    for (graph_key, graph_config) in graphs.items():
        for target in graph_config['targets']:
            for target_modifier in target_modifiers:
                target['target'] = "%s(%s,%s)" % (target_modifier[0],
                                                  target['target'], ','.join(
                                                      target_modifier[1:]))
    # if in a graph all targets have a tag with the same value, they are
    # effectively constants, so promote them.  this makes the display of the
    # graphs less rendundant and makes it easier to do config/preferences
    # on a per-graph basis.
    for (graph_key, graph_config) in graphs.items():
        # get all variable tags throughout all targets in this graph
        tags_seen = set()
        for target in graph_config['targets']:
            for tag_name in target['variables'].keys():
                tags_seen.add(tag_name)

        # find effective constants from those variables,
        # and effective variables. (unset tag is a value too)
        first_values_seen = {}
        effective_variables = set()  # tags for which we've seen >1 values
        for target in graph_config['targets']:
            for tag_name in tags_seen:
                # already known that we can't promote, continue
                if tag_name in effective_variables:
                    continue
                tag_value = target['variables'].get(tag_name, None)
                if tag_name not in first_values_seen:
                    first_values_seen[tag_name] = tag_value
                elif tag_value != first_values_seen[tag_name]:
                    effective_variables.add(tag_name)
        effective_constants = tags_seen - effective_variables

        # promote the effective_constants by adjusting graph and targets:
        graphs[graph_key]['promoted_constants'] = {}
        for tag_name in effective_constants:
            graphs[graph_key]['promoted_constants'][
                tag_name] = first_values_seen[tag_name]
            for (i, target) in enumerate(graph_config['targets']):
                if tag_name in graphs[graph_key]['targets'][i]['variables']:
                    del graphs[graph_key]['targets'][i]['variables'][tag_name]

        # now that graph config is "rich", merge in settings from preferences
        constants = dict(graphs[graph_key]['constants'].items() +
                         graphs[graph_key]['promoted_constants'].items())
        for graph_option in get_action_on_rules_match(
                preferences.graph_options, constants):
            if isinstance(graph_option, dict):
                graphs[graph_key].update(graph_option)
            else:
                graphs[graph_key] = graph_option(graphs[graph_key])
    return (graphs, query)
コード例 #6
0
ファイル: app.py プロジェクト: Finkregh/graph-explorer
def build_graphs_from_targets(targets, query={}):
    # merge default options..
    defaults = {
        'group_by': [],
        'from': '-24hours',
        'to': 'now',
        'statement': 'graph',
        'limit_targets': 500
    }
    query = dict(defaults.items() + query.items())
    graphs = {}
    if not targets:
        return (graphs, query)
    group_by = query['group_by']
    # for each combination of values of tags from group_by, make 1 graph with
    # all targets that have these values. so for each graph, we have:
    # the "constants": tags in the group_by
    # the "variables": tags not in the group_by, which can have arbitrary values
    # go through all targets and group them into graphs:
    for (i, target_id) in enumerate(sorted(targets.iterkeys())):
        constants = {}
        variables = {}
        target_data = targets[target_id]
        for (tag_name, tag_value) in target_data['tags'].items():
            if tag_name in group_by or '%s=' % tag_name in group_by:
                constants[tag_name] = tag_value
            else:
                variables[tag_name] = tag_value
        graph_key = '__'.join([target_data['tags'][tag_name] for tag_name in constants])
        if graph_key not in graphs:
            graph = {'from': query['from'], 'until': query['to']}
            graph.update({'constants': constants, 'targets': []})
            graphs[graph_key] = graph
        # set all options needed for timeserieswidget/flot:
        t = {
            'variables': variables,
            'graphite_metric': target_data['graphite_metric'],
            'target': target_data['target']
        }
        if 'color' in target_data:
            t['color'] = target_data['color']
        graphs[graph_key]['targets'].append(t)

        if i + 1 == query['limit_targets']:
            break
    # if in a graph all targets have a tag with the same value, they are
    # effectively constants, so promote them.  this makes the display of the
    # graphs less rendundant and paves the path
    # for later configuration on a per-graph basis.
    for (graph_key, graph_config) in graphs.items():
        # get all variable tags throughout all targets in this graph
        tags_seen = set()
        for target in graph_config['targets']:
            for tag_name in target['variables'].keys():
                tags_seen.add(tag_name)

        # find effective constants from those variables,
        # and effective variables. (unset tag is a value too)
        first_values_seen = {}
        effective_variables = set()  # tags for which we've seen >1 values
        for target in graph_config['targets']:
            for tag_name in tags_seen:
                # already known that we can't promote, continue
                if tag_name in effective_variables:
                    continue
                tag_value = target['variables'].get(tag_name, None)
                if tag_name not in first_values_seen:
                    first_values_seen[tag_name] = tag_value
                elif tag_value != first_values_seen[tag_name]:
                    effective_variables.add(tag_name)
        effective_constants = tags_seen - effective_variables

        # promote the effective_constants by adjusting graph and targets:
        graphs[graph_key]['promoted_constants'] = {}
        for tag_name in effective_constants:
            graphs[graph_key]['promoted_constants'][tag_name] = first_values_seen[tag_name]
            for (i, target) in enumerate(graph_config['targets']):
                if tag_name in graphs[graph_key]['targets'][i]['variables']:
                    del graphs[graph_key]['targets'][i]['variables'][tag_name]

        # now that graph config is "rich", merge in settings from preferences
        constants = dict(graphs[graph_key]['constants'].items() + graphs[graph_key]['promoted_constants'].items())
        for graph_option in get_action_on_rules_match(preferences.graph_options, constants):
            if isinstance(graph_option, dict):
                graphs[graph_key].update(graph_option)
            else:
                graphs[graph_key] = graph_option(graphs[graph_key])
    return (graphs, query)
コード例 #7
0
def apply_colors(graph):
    '''
    update target colors in a clever, dynamic way. basically it's about defining
    colors for certain metrics (such as cpu idle metric = green), but since you
    can group by arbitrary things, you might have a graph comparing the idle
    values for different servers, in which case they should not be all green.

    # the graph will look something like:
        {
            'promoted_constants': {'type': 'update_time', 'plugin': 'carbon'},
            'from': '-24hours',
            'until': 'now',
            'constants': {'what': 'ms', 'target_type': 'gauge'},
            'targets': [
                {
                    'graphite_metric': u'carbon.agents.dfvimeographite2-a.avgUpdateTime',
                    'variables': {'agent': u'dfvimeographite2-a'},
                    'target': u'carbon.agents.dfvimeographite2-a.avgUpdateTime'
                },
                (...)
            ]
        }
    '''

    color_assign_cpu = {
        'idle': colors['green'][0],
        'user': colors['blue'][0],
        'system': colors['blue'][1],
        'nice': colors['purple'][0],
        'softirq': colors['red'][0],
        'irq': colors['red'][1],
        'iowait': colors['orange'][0],
        'guest': colors['white'],
        'guest_nice': colors['white'],
        'steal': '#FFA791'  # brighter red
    }

    color_assign_mountpoint = {
        '_var': colors['red'][0],
        '_lib': colors['orange'][1],
        '_boot': colors['blue'][0],
        '_tmp': colors['purple'][0],
        'root': colors['green'][0]
    }

    color_assign_load = {
        '01': colors['red'][1],
        '05': colors['red'][0],
        '15': '#FFA791'  # brighter red
    }

    color_assign_timing = {
        'update_time': colors['turq'][0]
    }

    # object_server, object_auditor, proxy_server [...?]
    color_assign_swift = [
        ({'m': 'GET',       'w': ('lower',    'timeouts', 'xfer')}, colors['blue'][0]),
        ({'m': 'GET',       'w': ('upper_90', 'errors')},           colors['blue'][1]),
        ({'m': 'HEAD',      'w': ('lower',    'timeouts', 'xfer')}, colors['yellow'][0]),
        ({'m': 'HEAD',      'w': ('upper_90', 'errors')},           colors['yellow'][1]),
        ({'m': 'PUT',       'w': ('lower',    'timeouts', 'xfer')}, colors['green'][0]),
        ({'m': 'PUT',       'w': ('upper_90', 'errors')},           colors['green'][1]),
        ({'m': 'REPLICATE', 'w': ('lower',    'timeouts', 'xfer')}, colors['brown'][0]),
        ({'m': 'REPLICATE', 'w': ('upper_90', 'errors')},           colors['brown'][1]),
        ({'m': 'DELETE',    'w': ('lower',    'timeouts', 'xfer')}, colors['red'][0]),
        ({'m': 'DELETE',    'w': ('upper_90', 'errors')},           colors['red'][1]),
        ({'w': 'async_pendings'},                                   colors['turq'][0])
    ]

    for (i, target) in enumerate(graph['targets']):
        if get_tag_value(graph, target, 'what') == 'cpu_state':
            t = get_unique_tag_value(graph, target, 'type')
            if t is not None:
                graph['targets'][i]['color'] = color_assign_cpu[t]

        if get_tag_value(graph, target, 'what') == 'ms':
            t = get_unique_tag_value(graph, target, 'type')
            if t in color_assign_timing:
                graph['targets'][i]['color'] = color_assign_timing[t]

        t = get_unique_tag_value(graph, target, 'mountpoint')
        if t is not None and t in color_assign_mountpoint:
            graph['targets'][i]['color'] = color_assign_mountpoint[t]

        if get_tag_value(graph, target, 'plugin') == 'load':
            t = get_unique_tag_value(graph, target, 'type')
            if t is not None and t in color_assign_load:
                graph['targets'][i]['color'] = color_assign_load[t]

        # swift
        # technically the use of the get [unique] tag values is not correct
        # here. a better approach would be just doing the matching and seeing
        # if we gave anything the same color, and then deal with that.  because
        # with multiple tags, one of which can have multiple values, etc,
        # things become a bit more complicated.  we basically want to know "is
        # there any other target in the graph that matches the same
        # conditions?"
        m = get_unique_tag_value(graph, target, 'http_method')
        w = get_tag_value(graph, target, 'what')
        if m is not None:
            t = {'m': m, 'w': w}
            for color in backend.get_action_on_rules_match(color_assign_swift, t):
                graph['targets'][i]['color'] = color

    return graph
コード例 #8
0
ファイル: app.py プロジェクト: hangongithub/graph-explorer
def build_graphs_from_targets(targets, query={}):
    # merge default options..
    defaults = {
        'group_by': [],
        'sum_by': [],
        'from': '-24hours',
        'to': 'now',
        'statement': 'graph',
        'limit_targets': 500
    }
    query = dict(defaults.items() + query.items())
    graphs = {}
    if not targets:
        return (graphs, query)
    group_by = query['group_by']
    sum_by = query['sum_by']
    # for each combination of values of tags from group_by, make 1 graph with
    # all targets that have these values. so for each graph, we have:
    # the "constants": tags in the group_by
    # the "variables": tags not in the group_by, which can have arbitrary values
    # go through all targets and group them into graphs:
    for (i, target_id) in enumerate(sorted(targets.iterkeys())):
        constants = {}
        variables = {}
        target_data = targets[target_id]
        for (tag_name, tag_value) in target_data['tags'].items():
            if tag_name in group_by or '%s=' % tag_name in group_by:
                constants[tag_name] = tag_value
            else:
                variables[tag_name] = tag_value
        graph_key = '__'.join([target_data['tags'][tag_name] for tag_name in constants])
        if graph_key not in graphs:
            graph = {'from': query['from'], 'until': query['to']}
            graph.update({'constants': constants, 'targets': []})
            graphs[graph_key] = graph
        # set all options needed for timeserieswidget/flot:
        t = {
            'variables': variables,
            'graphite_metric': target_data['id'],
            'target': target_data['id']
        }
        if 'color' in target_data:
            t['color'] = target_data['color']
        graphs[graph_key]['targets'].append(t)

    # sum targets together if appropriate
    if len(query['sum_by']):
        for (graph_key, graph_config) in graphs.items():
            graph_config['targets_sum_candidates'] = {}
            graph_config['normal_targets'] = []
            for target in graph_config['targets']:
                # targets that can get summed together with other tags, must
                # have at least 1 'sum_by' tags in the variables list.
                # targets that can get summed together must have:
                # * the same 'sum_by' tags
                # * the same variables (key and val), except those vals that
                # are being summed by.
                # so for every group of sum_by tags and variables we build a
                # list of targets that can be summed together
                sum_constants = set(query['sum_by']).intersection(set(target['variables'].keys()))
                if(sum_constants):
                    sum_constants_str = '_'.join(sorted(sum_constants))
                    variables_str = '_'.join(['%s_%s' % (k, target['variables'][k]) for k in sorted(target['variables'].keys()) if k not in sum_constants])
                    sum_id = '%s__%s' % (sum_constants_str, variables_str)
                    if sum_id not in graphs[graph_key]['targets_sum_candidates']:
                        graphs[graph_key]['targets_sum_candidates'][sum_id] = []
                    graphs[graph_key]['targets_sum_candidates'][sum_id].append(target)
                else:
                    graph_config['normal_targets'].append(target)
            graph_config['targets'] = graph_config['normal_targets']
            for (sum_id, targets) in graphs[graph_key]['targets_sum_candidates'].items():
                if (len(targets) == 1):
                    graph_config['targets'].append(targets[0])
                else:
                    t = {
                        'target': 'sumSeries(%s)' % (','.join([t['graphite_metric'] for t in targets])),
                        'graphite_metric': [t['graphite_metric'] for t in targets],
                        'variables': targets[0]['variables']
                    }
                    for s_b in sum_by:
                        t['variables'][s_b] = 'multi (%s values)' % len(targets)

                    graph_config['targets'].append(t)

    # remove targets/graphs over the limit
    graphs = graphs_limit_targets(graphs, query['limit_targets'])

    # if in a graph all targets have a tag with the same value, they are
    # effectively constants, so promote them.  this makes the display of the
    # graphs less rendundant and paves the path
    # for later configuration on a per-graph basis.
    for (graph_key, graph_config) in graphs.items():
        # get all variable tags throughout all targets in this graph
        tags_seen = set()
        for target in graph_config['targets']:
            for tag_name in target['variables'].keys():
                tags_seen.add(tag_name)

        # find effective constants from those variables,
        # and effective variables. (unset tag is a value too)
        first_values_seen = {}
        effective_variables = set()  # tags for which we've seen >1 values
        for target in graph_config['targets']:
            for tag_name in tags_seen:
                # already known that we can't promote, continue
                if tag_name in effective_variables:
                    continue
                tag_value = target['variables'].get(tag_name, None)
                if tag_name not in first_values_seen:
                    first_values_seen[tag_name] = tag_value
                elif tag_value != first_values_seen[tag_name]:
                    effective_variables.add(tag_name)
        effective_constants = tags_seen - effective_variables

        # promote the effective_constants by adjusting graph and targets:
        graphs[graph_key]['promoted_constants'] = {}
        for tag_name in effective_constants:
            graphs[graph_key]['promoted_constants'][tag_name] = first_values_seen[tag_name]
            for (i, target) in enumerate(graph_config['targets']):
                if tag_name in graphs[graph_key]['targets'][i]['variables']:
                    del graphs[graph_key]['targets'][i]['variables'][tag_name]

        # now that graph config is "rich", merge in settings from preferences
        constants = dict(graphs[graph_key]['constants'].items() + graphs[graph_key]['promoted_constants'].items())
        for graph_option in get_action_on_rules_match(preferences.graph_options, constants):
            if isinstance(graph_option, dict):
                graphs[graph_key].update(graph_option)
            else:
                graphs[graph_key] = graph_option(graphs[graph_key])
    return (graphs, query)
コード例 #9
0
def apply_colors(graph):
    '''
    update target colors in a clever, dynamic way. basically it's about defining
    colors for certain metrics (such as cpu idle metric = green), but since you
    can group by arbitrary things, you might have a graph comparing the idle
    values for different servers, in which case they should not be all green.

    # the graph will look something like:
        {
            'promoted_constants': {'type': 'update_time', 'plugin': 'carbon'},
            'from': '-24hours',
            'until': 'now',
            'constants': {'unit': 'ms', 'target_type': 'gauge'},
            'targets': [
                {
                    'id': u'carbon.agents.dfvimeographite2-a.avgUpdateTime',
                    'variables': {'agent': u'dfvimeographite2-a'},
                    'target': u'carbon.agents.dfvimeographite2-a.avgUpdateTime'
                },
                (...)
            ]
        }
    '''

    # color targets based on tags, even when due to grouping metrics with the same tags (colors)
    # show up on the same graph
    rules_tags = [
        # http stuff, for swift and others
        [{}, {
            'http_method': {
                'GET': colors['blue'][0],
                'HEAD': colors['yellow'][0],
                'PUT': colors['green'][0],
                'REPLICATE': colors['brown'][0],
                'DELETE': colors['red'][0]
            }
        }],
        [{
            'stat': ['upper', 'upper_90']
        }, {
            'http_method': {
                'GET': colors['blue'][1],
                'HEAD': colors['yellow'][1],
                'PUT': colors['green'][1],
                'REPLICATE': colors['brown'][1],
                'DELETE': colors['red'][1]
            }
        }],
    ]

    # color targets based on tags, except when due to grouping metrics
    # with the same tags show up on the same graph
    rules_unique_tags = [
        [
            {
                'what': 'cpu_usage'
            },
            {
                'type': {
                    'idle': colors['green'][0],
                    'user': colors['blue'][0],
                    'system': colors['blue'][1],
                    'nice': colors['purple'][0],
                    'softirq': colors['red'][0],
                    'irq': colors['red'][1],
                    'iowait': colors['orange'][0],
                    'guest': colors['white'],
                    'guest_nice': colors['white'],
                    'steal': '#FFA791'  # brighter red
                }
            }
        ],
        [{}, {
            'mountpoint': {
                '_var': colors['red'][0],
                '_lib': colors['orange'][1],
                '_boot': colors['blue'][0],
                '_tmp': colors['purple'][0],
                'root': colors['green'][0]
            }
        }],
        [
            {
                'plugin': 'load'
            },
            {
                'type': {
                    '01': colors['red'][1],
                    '05': colors['red'][0],
                    '15': '#FFA791'  # brighter red
                }
            }
        ],
        [{
            'unit': 'ms'
        }, {
            'type': {
                'update_time': colors['turq'][0]
            }
        }],
        [{
            'unit': 'freq_abs'
        }, bin_set_color]
    ]

    for target in graph['targets']:
        tags = dict(graph['constants'].items() +
                    graph['promoted_constants'].items() +
                    target['variables'].items())

        for action in get_action_on_rules_match(rules_unique_tags, tags):
            if callable(action):  # hasattr(action, '__call__'):
                action(graph, target)
            else:
                for (tag_key, matches) in action.items():
                    t = get_unique_tag_value(graph, target, tag_key)
                    if t is not None and t in matches:
                        target['color'] = matches[t]

        for action in get_action_on_rules_match(rules_tags, target):
            for (tag_key, matches) in action.items():
                t = get_tag_value(graph, target, tag_key)
                if t is not None and t in matches:
                    target['color'] = matches[t]

    return graph
コード例 #10
0
def apply_colors(graph):
    '''
    update target colors in a clever, dynamic way. basically it's about defining
    colors for certain metrics (such as cpu idle metric = green), but since you
    can group by arbitrary things, you might have a graph comparing the idle
    values for different servers, in which case they should not be all green.

    # the graph will look something like:
        {
            'promoted_constants': {'type': 'update_time', 'plugin': 'carbon'},
            'from': '-24hours',
            'until': 'now',
            'constants': {'what': 'ms', 'target_type': 'gauge'},
            'targets': [
                {
                    'id': u'carbon.agents.dfvimeographite2-a.avgUpdateTime',
                    'variables': {'agent': u'dfvimeographite2-a'},
                    'target': u'carbon.agents.dfvimeographite2-a.avgUpdateTime'
                },
                (...)
            ]
        }
    '''

    color_assign_cpu = {
        'idle': colors['green'][0],
        'user': colors['blue'][0],
        'system': colors['blue'][1],
        'nice': colors['purple'][0],
        'softirq': colors['red'][0],
        'irq': colors['red'][1],
        'iowait': colors['orange'][0],
        'guest': colors['white'],
        'guest_nice': colors['white'],
        'steal': '#FFA791'  # brighter red
    }

    color_assign_mountpoint = {
        '_var': colors['red'][0],
        '_lib': colors['orange'][1],
        '_boot': colors['blue'][0],
        '_tmp': colors['purple'][0],
        'root': colors['green'][0]
    }

    color_assign_load = {
        '01': colors['red'][1],
        '05': colors['red'][0],
        '15': '#FFA791'  # brighter red
    }

    color_assign_timing = {
        'update_time': colors['turq'][0]
    }

    # http stuff, for swift and others
    color_assign_http = [
        ({'http_method': 'GET'},       colors['blue']),
        ({'http_method': 'HEAD'},      colors['yellow']),
        ({'http_method': 'PUT'},       colors['green']),
        ({'http_method': 'REPLICATE'}, colors['brown']),
        ({'http_method': 'DELETE'},    colors['red']),
    ]

    for (i, target) in enumerate(graph['targets']):
        if get_tag_value(graph, target, 'what') == 'cpu_state':
            t = get_unique_tag_value(graph, target, 'type')
            if t is not None:
                graph['targets'][i]['color'] = color_assign_cpu[t]

        if get_tag_value(graph, target, 'what') == 'ms':
            t = get_unique_tag_value(graph, target, 'type')
            if t in color_assign_timing:
                graph['targets'][i]['color'] = color_assign_timing[t]

        t = get_unique_tag_value(graph, target, 'mountpoint')
        if t is not None and t in color_assign_mountpoint:
            graph['targets'][i]['color'] = color_assign_mountpoint[t]

        if get_tag_value(graph, target, 'plugin') == 'load':
            t = get_unique_tag_value(graph, target, 'type')
            if t is not None and t in color_assign_load:
                graph['targets'][i]['color'] = color_assign_load[t]

        # swift
        w = get_tag_value(graph, target, 'what')
        if w == 'async_pendings':
            graph['targets'][i]['color'] = colors['turq'][0]

        # http.
        # note this doesn't prevent targets possibly having the same color
        m = get_tag_value(graph, target, 'http_method')
        if m is not None:
            t = {'http_method': m}
            for color in backend.get_action_on_rules_match(color_assign_http, t):
                # in some cases we want the color extra strong:
                if w in ('upper_90', 'errors'):
                    color = color[1]
                else:
                    color = color[0]
                graph['targets'][i]['color'] = color

    return graph
コード例 #11
0
def apply_colors(graph):
    '''
    update target colors in a clever, dynamic way. basically it's about defining
    colors for certain metrics (such as cpu idle metric = green), but since you
    can group by arbitrary things, you might have a graph comparing the idle
    values for different servers, in which case they should not be all green.

    # the graph will look something like:
        {
            'promoted_constants': {'type': 'update_time', 'plugin': 'carbon'},
            'from': '-24hours',
            'until': 'now',
            'constants': {'what': 'ms', 'target_type': 'gauge'},
            'targets': [
                {
                    'id': u'carbon.agents.dfvimeographite2-a.avgUpdateTime',
                    'variables': {'agent': u'dfvimeographite2-a'},
                    'target': u'carbon.agents.dfvimeographite2-a.avgUpdateTime'
                },
                (...)
            ]
        }
    '''

    color_assign_cpu = {
        'idle': colors['green'][0],
        'user': colors['blue'][0],
        'system': colors['blue'][1],
        'nice': colors['purple'][0],
        'softirq': colors['red'][0],
        'irq': colors['red'][1],
        'iowait': colors['orange'][0],
        'guest': colors['white'],
        'guest_nice': colors['white'],
        'steal': '#FFA791'  # brighter red
    }

    color_assign_mountpoint = {
        '_var': colors['red'][0],
        '_lib': colors['orange'][1],
        '_boot': colors['blue'][0],
        '_tmp': colors['purple'][0],
        'root': colors['green'][0]
    }

    color_assign_load = {
        '01': colors['red'][1],
        '05': colors['red'][0],
        '15': '#FFA791'  # brighter red
    }

    color_assign_timing = {'update_time': colors['turq'][0]}

    # http stuff, for swift and others
    color_assign_http = [
        ({
            'http_method': 'GET'
        }, colors['blue']),
        ({
            'http_method': 'HEAD'
        }, colors['yellow']),
        ({
            'http_method': 'PUT'
        }, colors['green']),
        ({
            'http_method': 'REPLICATE'
        }, colors['brown']),
        ({
            'http_method': 'DELETE'
        }, colors['red']),
    ]

    for (i, target) in enumerate(graph['targets']):
        if get_tag_value(graph, target, 'what') == 'cpu_state':
            t = get_unique_tag_value(graph, target, 'type')
            if t is not None:
                graph['targets'][i]['color'] = color_assign_cpu[t]

        if get_tag_value(graph, target, 'what') == 'ms':
            t = get_unique_tag_value(graph, target, 'type')
            if t in color_assign_timing:
                graph['targets'][i]['color'] = color_assign_timing[t]

        t = get_unique_tag_value(graph, target, 'mountpoint')
        if t is not None and t in color_assign_mountpoint:
            graph['targets'][i]['color'] = color_assign_mountpoint[t]

        if get_tag_value(graph, target, 'plugin') == 'load':
            t = get_unique_tag_value(graph, target, 'type')
            if t is not None and t in color_assign_load:
                graph['targets'][i]['color'] = color_assign_load[t]

        # swift
        w = get_tag_value(graph, target, 'what')
        if w == 'async_pendings':
            graph['targets'][i]['color'] = colors['turq'][0]

        # http.
        # note this doesn't prevent targets possibly having the same color
        m = get_tag_value(graph, target, 'http_method')
        if m is not None:
            t = {'http_method': m}
            for color in backend.get_action_on_rules_match(
                    color_assign_http, t):
                # in some cases we want the color extra strong:
                if w in ('upper_90', 'errors'):
                    color = color[1]
                else:
                    color = color[0]
                graph['targets'][i]['color'] = color

    return graph
コード例 #12
0
def build_graphs_from_targets(targets, query={}):
    # merge default options..
    defaults = {
        'group_by': [],
        'sum_by': [],
        'from': '-24hours',
        'to': 'now',
        'statement': 'graph',
        'limit_targets': 500
    }
    query = dict(defaults.items() + query.items())
    graphs = {}
    if not targets:
        return (graphs, query)
    group_by = query['group_by']
    sum_by = query['sum_by']
    # for each combination of values of tags from group_by, make 1 graph with
    # all targets that have these values. so for each graph, we have:
    # the "constants": tags in the group_by
    # the "variables": tags not in the group_by, which can have arbitrary values
    # go through all targets and group them into graphs:
    for (i, target_id) in enumerate(sorted(targets.iterkeys())):
        constants = {}
        variables = {}
        target_data = targets[target_id]
        for (tag_name, tag_value) in target_data['tags'].items():
            if tag_name in group_by or '%s=' % tag_name in group_by:
                constants[tag_name] = tag_value
            else:
                variables[tag_name] = tag_value
        graph_key = '__'.join(
            [target_data['tags'][tag_name] for tag_name in constants])
        if graph_key not in graphs:
            graph = {'from': query['from'], 'until': query['to']}
            graph.update({'constants': constants, 'targets': []})
            graphs[graph_key] = graph
        # set all options needed for timeserieswidget/flot:
        t = {
            'variables': variables,
            'graphite_metric': target_data['graphite_metric'],
            'target': target_data['target']
        }
        if 'color' in target_data:
            t['color'] = target_data['color']
        graphs[graph_key]['targets'].append(t)

    # sum targets together if appropriate
    if len(query['sum_by']):
        for (graph_key, graph_config) in graphs.items():
            graph_config['targets_sum_candidates'] = {}
            graph_config['normal_targets'] = []
            for target in graph_config['targets']:
                # targets that can get summed together with other tags, must
                # have at least 1 'sum_by' tags in the variables list.
                # targets that can get summed together must have:
                # * the same 'sum_by' tags
                # * the same variables (key and val), except those vals that
                # are being summed by.
                # so for every group of sum_by tags and variables we build a
                # list of targets that can be summed together
                sum_constants = set(query['sum_by']).intersection(
                    set(target['variables'].keys()))
                if (sum_constants):
                    sum_constants_str = '_'.join(sorted(sum_constants))
                    variables_str = '_'.join([
                        '%s_%s' % (k, target['variables'][k])
                        for k in sorted(target['variables'].keys())
                        if k not in sum_constants
                    ])
                    sum_id = '%s__%s' % (sum_constants_str, variables_str)
                    if sum_id not in graphs[graph_key][
                            'targets_sum_candidates']:
                        graphs[graph_key]['targets_sum_candidates'][
                            sum_id] = []
                    graphs[graph_key]['targets_sum_candidates'][sum_id].append(
                        target)
                else:
                    graph_config['normal_targets'].append(target)
            graph_config['targets'] = graph_config['normal_targets']
            for (sum_id, targets
                 ) in graphs[graph_key]['targets_sum_candidates'].items():
                if (len(targets) == 1):
                    graph_config['targets'].append(targets[0])
                else:
                    t = {
                        'target':
                        'sumSeries(%s)' %
                        (','.join([t['graphite_metric'] for t in targets])),
                        'graphite_metric':
                        [t['graphite_metric'] for t in targets],
                        'variables':
                        targets[0]['variables']
                    }
                    for s_b in sum_by:
                        t['variables'][s_b] = 'multi (%s values)' % len(
                            targets)

                    graph_config['targets'].append(t)

    # remove targets/graphs over the limit
    graphs = graphs_limit_targets(graphs, query['limit_targets'])

    # if in a graph all targets have a tag with the same value, they are
    # effectively constants, so promote them.  this makes the display of the
    # graphs less rendundant and paves the path
    # for later configuration on a per-graph basis.
    for (graph_key, graph_config) in graphs.items():
        # get all variable tags throughout all targets in this graph
        tags_seen = set()
        for target in graph_config['targets']:
            for tag_name in target['variables'].keys():
                tags_seen.add(tag_name)

        # find effective constants from those variables,
        # and effective variables. (unset tag is a value too)
        first_values_seen = {}
        effective_variables = set()  # tags for which we've seen >1 values
        for target in graph_config['targets']:
            for tag_name in tags_seen:
                # already known that we can't promote, continue
                if tag_name in effective_variables:
                    continue
                tag_value = target['variables'].get(tag_name, None)
                if tag_name not in first_values_seen:
                    first_values_seen[tag_name] = tag_value
                elif tag_value != first_values_seen[tag_name]:
                    effective_variables.add(tag_name)
        effective_constants = tags_seen - effective_variables

        # promote the effective_constants by adjusting graph and targets:
        graphs[graph_key]['promoted_constants'] = {}
        for tag_name in effective_constants:
            graphs[graph_key]['promoted_constants'][
                tag_name] = first_values_seen[tag_name]
            for (i, target) in enumerate(graph_config['targets']):
                if tag_name in graphs[graph_key]['targets'][i]['variables']:
                    del graphs[graph_key]['targets'][i]['variables'][tag_name]

        # now that graph config is "rich", merge in settings from preferences
        constants = dict(graphs[graph_key]['constants'].items() +
                         graphs[graph_key]['promoted_constants'].items())
        for graph_option in get_action_on_rules_match(
                preferences.graph_options, constants):
            if isinstance(graph_option, dict):
                graphs[graph_key].update(graph_option)
            else:
                graphs[graph_key] = graph_option(graphs[graph_key])
    return (graphs, query)
コード例 #13
0
ファイル: __init__.py プロジェクト: kgeis/graph-explorer
def build_from_targets(targets, query, preferences):
    graphs = {}
    if not targets:
        return (graphs, query)
    group_by = query['group_by']
    sum_by = query['sum_by']
    avg_by = query['avg_by']
    avg_over = query['avg_over']
    # i'm gonna assume you never use second and your datapoints are stored with
    # minutely resolution. later on we can use config options for this (or
    # better: somehow query graphite about it)
    # note, the day/week/month numbers are not technically accurate, but
    # since we're doing movingAvg that's ok
    averaging = {
        'M': 1,
        'h': 60,
        'd': 60 * 24,
        'w': 60 * 24 * 7,
        'mo': 60 * 24 * 30
    }
    if avg_over is not None:
        avg_over_amount = avg_over[0]
        avg_over_unit = avg_over[1]
        if avg_over_unit in averaging.keys():
            multiplier = averaging[avg_over_unit]
            query['target_modifiers'].append(
                Query.graphite_function_applier('movingAverage',
                                                avg_over_amount * multiplier))

    # for each group_by bucket, make 1 graph.
    # so for each graph, we have:
    # the "constants": tags in the group_by
    # the "variables": tags not in the group_by, which can have arbitrary
    # values, or different values from a group_by tag that match the same
    # bucket pattern
    # go through all targets and group them into graphs:
    for _target_id, target_data in sorted(targets.items()):
        # FWIW. has an 'id' which timeserieswidget doesn't care about
        target = Target(target_data)
        target['target'] = target['id']

        (graph_key, constants) = target.get_graph_info(group_by)
        if graph_key not in graphs:
            graph = {'from': query['from'], 'until': query['to']}
            graph.update({'constants': constants, 'targets': []})
            graphs[graph_key] = graph
        graphs[graph_key]['targets'].append(target)

    # ok so now we have a graphs dictionary with a graph for every appropriate
    # combination of group_by tags, and each graph contains all targets that
    # should be shown on it.  but the user may have asked to aggregate certain
    # targets together, by summing and/or averaging across different values of
    # (a) certain tag(s). let's process the aggregations now.
    if (sum_by or avg_by):
        for (graph_key, graph_config) in graphs.items():
            graph_config['targets_sum_candidates'] = {}
            graph_config['targets_avg_candidates'] = {}
            graph_config['normal_targets'] = []

            # process equivalence rules, see further down.
            filter_candidates = {}
            for tag, buckets in sum_by.items():

                # first separate the individuals from the _sum_

                filter_candidates[tag] = {}
                for target in graph_config['targets']:
                    # we can use agg_key to find out if they all have the same values
                    # other than this one particular key
                    key = target.get_agg_key({tag: buckets})
                    if key not in filter_candidates[tag]:
                        filter_candidates[tag][key] = {'individuals': []}
                    if target['tags'].get(tag, '') == '_sum_':
                        filter_candidates[tag][key]['_sum_'] = target
                    else:
                        filter_candidates[tag][key]['individuals'].append(
                            target)

                # for all agg keys that only have the '' bucket,
                # if targets are identical except that some have tag
                # foo={bar,baz,0,quux, ...} and one of them has foo=_sum_ and we're
                # summing by that tag, and we didn't filter on foo,
                # remove all the ones except the sum one

                if len(buckets) == 1 and buckets[0] == '':
                    if not Query.filtered_on(query, tag):
                        for key in filter_candidates[tag].keys():
                            if '_sum_' in filter_candidates[tag][key]:
                                for i in filter_candidates[tag][key][
                                        'individuals']:
                                    graph_config['targets'].remove(i)

                # if we are summing, and we have a filter, and we have individual ones and a _sum_, remove the _sum_
                # irrespective of buckets.  note that this removes the _sum_ target without the user needing to filter it out explicitly
                # this is the only place we do that, but it makes sense.  we wouldn't want users to specify the _sum_ removal explicitly
                # all the time, esp for multiple tag keys
                if Query.filtered_on(query, tag):
                    for key in filter_candidates[tag].keys():
                        if '_sum_' in filter_candidates[tag][key]:
                            graph_config['targets'].remove(
                                filter_candidates[tag][key]['_sum_'])

            for target in graph_config['targets']:
                sum_id = target.get_agg_key(sum_by)
                if sum_id:
                    if sum_id not in graph_config['targets_sum_candidates']:
                        graphs[graph_key]['targets_sum_candidates'][
                            sum_id] = []
                    graph_config['targets_sum_candidates'][sum_id].append(
                        target)

            for (sum_id,
                 targets) in graph_config['targets_sum_candidates'].items():
                if len(targets) > 1:
                    for candidate in targets:
                        graph_config['targets'].remove(candidate)
                    graph_config['targets'].append(
                        t.graphite_func_aggregate(targets, sum_by,
                                                  "sumSeries"))

            for target in graph_config['targets']:
                # Now that any summing is done, we look at aggregating by
                # averaging because avg(foo+bar+baz) is more efficient
                # than avg(foo)+avg(bar)+avg(baz)
                # aggregate targets (whether those are sums or regular ones)
                avg_id = target.get_agg_key(avg_by)
                if avg_id:
                    if avg_id not in graph_config['targets_avg_candidates']:
                        graph_config['targets_avg_candidates'][avg_id] = []
                    graph_config['targets_avg_candidates'][avg_id].append(
                        target)

            for (avg_id,
                 targets) in graph_config['targets_avg_candidates'].items():
                if len(targets) > 1:
                    for candidate in targets:
                        graph_config['targets'].remove(candidate)
                    graph_config['targets'].append(
                        t.graphite_func_aggregate(targets, avg_by,
                                                  "averageSeries"))

    # remove targets/graphs over the limit
    graphs = limit_targets(graphs, query['limit_targets'])

    # Apply target modifiers (like movingAverage, summarize, ...)
    for (graph_key, graph_config) in graphs.items():
        for target in graph_config['targets']:
            for target_modifier in query['target_modifiers']:
                target_modifier(target, graph_config)

    # if in a graph all targets have a tag with the same value, they are
    # effectively constants, so promote them.  this makes the display of the
    # graphs less rendundant and makes it easier to do config/preferences
    # on a per-graph basis.
    for (graph_key, graph_config) in graphs.items():
        # get all variable tags throughout all targets in this graph
        tags_seen = set()
        for target in graph_config['targets']:
            for tag_name in target['variables'].keys():
                tags_seen.add(tag_name)

        # find effective constants from those variables,
        # and effective variables. (unset tag is a value too)
        first_values_seen = {}
        effective_variables = set()  # tags for which we've seen >1 values
        for target in graph_config['targets']:
            for tag_name in tags_seen:
                # already known that we can't promote, continue
                if tag_name in effective_variables:
                    continue
                tag_value = target['variables'].get(tag_name, None)
                if tag_name not in first_values_seen:
                    first_values_seen[tag_name] = tag_value
                elif tag_value != first_values_seen[tag_name]:
                    effective_variables.add(tag_name)
        effective_constants = tags_seen - effective_variables

        # promote the effective_constants by adjusting graph and targets:
        graph_config['promoted_constants'] = {}
        for tag_name in effective_constants:
            graph_config['promoted_constants'][tag_name] = first_values_seen[
                tag_name]
            for target in graph_config['targets']:
                target['variables'].pop(tag_name, None)

        # now that graph config is "rich", merge in settings from preferences
        constants = dict(graph_config['constants'].items() +
                         graph_config['promoted_constants'].items())
        for graph_option in get_action_on_rules_match(
                preferences.graph_options, constants):
            if isinstance(graph_option, dict):
                graph_config.update(graph_option)
            else:
                graph_config = graphs[graph_key] = graph_option(graph_config)

        # but, the query may override some preferences:
        override = {}
        if query['statement'] == 'lines':
            override['state'] = 'lines'
        if query['statement'] == 'stack':
            override['state'] = 'stacked'
        if query['min'] is not None:
            override['yaxis'] = override.get('yaxis', {})
            override['yaxis'].update({'min': convert.parse_str(query['min'])})
        if query['max'] is not None:
            override['yaxis'] = override.get('yaxis', {})
            override['yaxis'].update({'max': convert.parse_str(query['max'])})

        graphs[graph_key].update(override)

    # now that some constants are promoted, we can give the graph more
    # unique keys based on all (original + promoted) constants. this is in
    # line with the meaning of the graph ("all targets with those constant
    # tags"), but more importantly: this fixes cases where some graphs
    # would otherwise have the same key, even though they have a different
    # set of constants, this can manifest itself on dashboard pages where
    # graphs for different queries are shown.
    # note that we can't just compile constants + promoted_constants,
    # part of the original graph key is also set by the group by (which, by
    # means of the bucket patterns doesn't always translate into constants),
    # we solve this by just including the old key.
    new_graphs = {}
    for (graph_key, graph_config) in graphs.items():
        new_key = ','.join('%s=%s' % i
                           for i in graph_config['promoted_constants'].items())
        new_key = '%s__%s' % (graph_key, new_key)
        new_graphs[new_key] = graph_config
    graphs = new_graphs

    return (graphs, query)
コード例 #14
0
def apply_colors(graph):
    """
    update target colors in a clever, dynamic way. basically it's about defining
    colors for certain metrics (such as cpu idle metric = green), but since you
    can group by arbitrary things, you might have a graph comparing the idle
    values for different servers, in which case they should not be all green.

    # the graph will look something like:
        {
            'promoted_constants': {'type': 'update_time', 'plugin': 'carbon'},
            'from': '-24hours',
            'until': 'now',
            'constants': {'what': 'ms', 'target_type': 'gauge'},
            'targets': [
                {
                    'id': u'carbon.agents.dfvimeographite2-a.avgUpdateTime',
                    'variables': {'agent': u'dfvimeographite2-a'},
                    'target': u'carbon.agents.dfvimeographite2-a.avgUpdateTime'
                },
                (...)
            ]
        }
    """

    color_assign_cpu = {
        "idle": colors["green"][0],
        "user": colors["blue"][0],
        "system": colors["blue"][1],
        "nice": colors["purple"][0],
        "softirq": colors["red"][0],
        "irq": colors["red"][1],
        "iowait": colors["orange"][0],
        "guest": colors["white"],
        "guest_nice": colors["white"],
        "steal": "#FFA791",  # brighter red
    }

    color_assign_mountpoint = {
        "_var": colors["red"][0],
        "_lib": colors["orange"][1],
        "_boot": colors["blue"][0],
        "_tmp": colors["purple"][0],
        "root": colors["green"][0],
    }

    color_assign_load = {"01": colors["red"][1], "05": colors["red"][0], "15": "#FFA791"}  # brighter red

    color_assign_timing = {"update_time": colors["turq"][0]}

    # http stuff, for swift and others
    color_assign_http = [
        ({"http_method": "GET"}, colors["blue"]),
        ({"http_method": "HEAD"}, colors["yellow"]),
        ({"http_method": "PUT"}, colors["green"]),
        ({"http_method": "REPLICATE"}, colors["brown"]),
        ({"http_method": "DELETE"}, colors["red"]),
    ]

    for (i, target) in enumerate(graph["targets"]):
        if get_tag_value(graph, target, "what") == "cpu_state":
            t = get_unique_tag_value(graph, target, "type")
            if t is not None:
                graph["targets"][i]["color"] = color_assign_cpu[t]

        if get_tag_value(graph, target, "what") == "ms":
            t = get_unique_tag_value(graph, target, "type")
            if t in color_assign_timing:
                graph["targets"][i]["color"] = color_assign_timing[t]

        t = get_unique_tag_value(graph, target, "mountpoint")
        if t is not None and t in color_assign_mountpoint:
            graph["targets"][i]["color"] = color_assign_mountpoint[t]

        if get_tag_value(graph, target, "plugin") == "load":
            t = get_unique_tag_value(graph, target, "type")
            if t is not None and t in color_assign_load:
                graph["targets"][i]["color"] = color_assign_load[t]

        # swift
        w = get_tag_value(graph, target, "what")
        if w == "async_pendings":
            graph["targets"][i]["color"] = colors["turq"][0]

        # http.
        # note this doesn't prevent targets possibly having the same color
        m = get_tag_value(graph, target, "http_method")
        if m is not None:
            t = {"http_method": m}
            for color in backend.get_action_on_rules_match(color_assign_http, t):
                # in some cases we want the color extra strong:
                if w in ("upper_90", "errors"):
                    color = color[1]
                else:
                    color = color[0]
                graph["targets"][i]["color"] = color

    return graph
コード例 #15
0
ファイル: app.py プロジェクト: rlodge/graph-explorer
def build_graphs_from_targets(targets, query={}, target_modifiers=[]):
    # merge default options..
    defaults = {
        "group_by": [],
        "sum_by": [],
        "avg_over": None,
        "avg_by": [],
        "from": "-24hours",
        "to": "now",
        "statement": "graph",
        "limit_targets": 500,
    }
    query = dict(defaults.items() + query.items())
    graphs = {}
    if not targets:
        return (graphs, query)
    group_by = query["group_by"]
    sum_by = query["sum_by"]
    avg_by = query["avg_by"]
    avg_over = query["avg_over"]
    # i'm gonna assume you never use second and your datapoints are stored with
    # minutely resolution. later on we can use config options for this (or
    # better: somehow query graphite about it)
    # note, the day/week/month numbers are not technically accurate, but
    # since we're doing movingAvg that's ok
    averaging = {"M": 1, "h": 60, "d": 60 * 24, "w": 60 * 24 * 7, "mo": 60 * 24 * 30}
    if avg_over is not None:
        avg_over_amount = avg_over[0]
        avg_over_unit = avg_over[1]
        if avg_over_unit in averaging.keys():
            multiplier = averaging[avg_over_unit]
            target_modifier = ["movingAverage", str(avg_over_amount * multiplier)]
            target_modifiers.append(target_modifier)

    # for each combination of values of tags from group_by, make 1 graph with
    # all targets that have these values. so for each graph, we have:
    # the "constants": tags in the group_by
    # the "variables": tags not in the group_by, which can have arbitrary values
    # go through all targets and group them into graphs:
    for (i, target_id) in enumerate(sorted(targets.iterkeys())):
        constants = {}
        variables = {}
        target_data = targets[target_id]
        for (tag_name, tag_value) in target_data["tags"].items():
            if tag_name in group_by or "%s=" % tag_name in group_by:
                constants[tag_name] = tag_value
            else:
                variables[tag_name] = tag_value
        graph_key = "__".join([target_data["tags"][tag_name] for tag_name in constants])
        if graph_key not in graphs:
            graph = {"from": query["from"], "until": query["to"]}
            graph.update({"constants": constants, "targets": []})
            graphs[graph_key] = graph
        target = target_data["id"]
        # set all options needed for timeserieswidget/flot:
        t = {
            "variables": variables,
            "id": target_data["id"],  # timeserieswidget doesn't care about this
            "target": target,
        }
        if "color" in target_data:
            t["color"] = target_data["color"]
        graphs[graph_key]["targets"].append(t)

    # ok so now we have a graphs dictionary with a graph for every approriate
    # combination of group_by tags, and each graphs contains all targets that
    # should be shown on it.  but the user may have asked to aggregate certain
    # targets together, by summing and/or averaging across different values of
    # (a) certain tag(s). let's process the aggregations now.
    if sum_by or avg_by:
        for (graph_key, graph_config) in graphs.items():
            graph_config["targets_sum_candidates"] = {}
            graph_config["targets_avg_candidates"] = {}
            graph_config["normal_targets"] = []
            all_targets = graph_config["targets"][:]  # Get a copy.

            for target in all_targets:
                # targets that can get summed together with other tags, must
                # have at least 1 'sum_by' tags in the variables list.
                # targets that can get summed together must have:
                # * the same 'sum_by' tag keys (not values, because we
                # aggregate across different values for these tags)
                # * the same variables (key and val), except those vals that
                # are being summed by.
                # so for every group of sum_by tags and variables we build a
                # list of targets that can be summed together

                # of course it only makes sense to sum by tags that the target
                # actually has, and that are not already constants (meaning
                # every target in the graph has the same value)
                variables = target["variables"].keys()
                sum_constants = set(sum_by).intersection(set(variables))
                if sum_constants:
                    sum_constants_str = "_".join(sorted(sum_constants))
                    variables_str = "_".join(
                        ["%s_%s" % (k, target["variables"][k]) for k in sorted(variables) if k not in sum_constants]
                    )
                    sum_id = "%s__%s" % (sum_constants_str, variables_str)
                    if sum_id not in graph_config["targets_sum_candidates"]:
                        graphs[graph_key]["targets_sum_candidates"][sum_id] = []
                    graph_config["targets_sum_candidates"][sum_id].append(target)

            for (sum_id, targets) in graph_config["targets_sum_candidates"].items():
                if len(targets) > 1:
                    for t in targets:
                        all_targets.remove(t)
                    all_targets.append(graphite_func_aggregate(targets, sum_by, "sumSeries"))

            for target in all_targets:
                # Now that any summing is done, we look at aggregating by
                # averaging because avg(foo+bar+baz) is more efficient
                # than avg(foo)+avg(bar)+avg(baz)
                # It's pretty similar than what happened above and aggregates
                # targets (whether those are sums or regular ones)
                variables = target["variables"].keys()
                avg_constants = set(avg_by).intersection(set(variables))
                if avg_constants:
                    avg_constants_str = "_".join(sorted(avg_constants))
                    variables_str = "_".join(
                        ["%s_%s" % (k, target["variables"][k]) for k in sorted(variables) if k not in avg_constants]
                    )
                    # some values can be like 'sumSeries (8 values)' due to an
                    # earlier aggregation. if now targets have a different amount of
                    # values matched, that doesn't matter and they should still
                    # be aggregated together if the rest of the conditions are met
                    variables_str = re.sub("\([0-9]+ values\)", "(Xvalues)", variables_str)
                    avg_id = "%s__%s" % (avg_constants_str, variables_str)
                    if avg_id not in graph_config["targets_avg_candidates"]:
                        graph_config["targets_avg_candidates"][avg_id] = []
                    graph_config["targets_avg_candidates"][avg_id].append(target)

            for (avg_id, targets) in graph_config["targets_avg_candidates"].items():
                if len(targets) > 1:
                    for t in targets:
                        all_targets.remove(t)
                    all_targets.append(graphite_func_aggregate(targets, avg_by, "averageSeries"))

            graph_config["targets"] = all_targets

    # remove targets/graphs over the limit
    graphs = graphs_limit_targets(graphs, query["limit_targets"])

    # Apply target modifiers (like movingAverage, summarize, ...)
    for (graph_key, graph_config) in graphs.items():
        for target in graph_config["targets"]:
            for target_modifier in target_modifiers:
                target["target"] = "%s(%s,%s)" % (target_modifier[0], target["target"], ",".join(target_modifier[1:]))
    # if in a graph all targets have a tag with the same value, they are
    # effectively constants, so promote them.  this makes the display of the
    # graphs less rendundant and makes it easier to do config/preferences
    # on a per-graph basis.
    for (graph_key, graph_config) in graphs.items():
        # get all variable tags throughout all targets in this graph
        tags_seen = set()
        for target in graph_config["targets"]:
            for tag_name in target["variables"].keys():
                tags_seen.add(tag_name)

        # find effective constants from those variables,
        # and effective variables. (unset tag is a value too)
        first_values_seen = {}
        effective_variables = set()  # tags for which we've seen >1 values
        for target in graph_config["targets"]:
            for tag_name in tags_seen:
                # already known that we can't promote, continue
                if tag_name in effective_variables:
                    continue
                tag_value = target["variables"].get(tag_name, None)
                if tag_name not in first_values_seen:
                    first_values_seen[tag_name] = tag_value
                elif tag_value != first_values_seen[tag_name]:
                    effective_variables.add(tag_name)
        effective_constants = tags_seen - effective_variables

        # promote the effective_constants by adjusting graph and targets:
        graphs[graph_key]["promoted_constants"] = {}
        for tag_name in effective_constants:
            graphs[graph_key]["promoted_constants"][tag_name] = first_values_seen[tag_name]
            for (i, target) in enumerate(graph_config["targets"]):
                if tag_name in graphs[graph_key]["targets"][i]["variables"]:
                    del graphs[graph_key]["targets"][i]["variables"][tag_name]

        # now that graph config is "rich", merge in settings from preferences
        constants = dict(graphs[graph_key]["constants"].items() + graphs[graph_key]["promoted_constants"].items())
        for graph_option in get_action_on_rules_match(preferences.graph_options, constants):
            if isinstance(graph_option, dict):
                graphs[graph_key].update(graph_option)
            else:
                graphs[graph_key] = graph_option(graphs[graph_key])

        # but, the query may override some preferences:
        override = {}
        if query["statement"] == "lines":
            override["state"] = "lines"
        if query["statement"] == "stack":
            override["state"] = "stacked"

        graphs[graph_key].update(override)

    # now that some constants are promoted, we can give the graph more
    # unique keys based on all (original + promoted) constants. this is in
    # line with the meaning of the graph ("all targets with those constant
    # tags"), but more importantly: this fixes cases where some graphs
    # would otherwise have the same key, even though they have a different
    # set of constants, this can manifest itself on dashboard pages where
    # graphs for different queries are shown.
    new_graphs = {}
    for (graph_key, graph_config) in graphs.items():
        better_graph_key_1 = "__".join("%s_%s" % i for i in graph_config["constants"].items())
        better_graph_key_2 = "__".join("%s_%s" % i for i in graph_config["promoted_constants"].items())
        better_graph_key = "%s___%s" % (better_graph_key_1, better_graph_key_2)
        new_graphs[better_graph_key] = graph_config
    graphs = new_graphs

    return (graphs, query)