Exemplo n.º 1
0
    def check_values(self, config, s_metrics, preferences):
        worst = 0
        results = []
        if " " in self.expr:  # looks like a GEQL query
            query = Query(self.expr)
            (query, targets_matching) = s_metrics.matching(query)
            graphs_targets_matching = g.build_from_targets(targets_matching, query, preferences)[0]
            for graph_id, graph in graphs_targets_matching.items():
                for target in graph['targets']:
                    target = target['target']
                    value = check_graphite(target, config)
                    status = self.check(value)
                    results.append((target, value, status))
                    # if worst so far is ok and we have an unknown, that takes precedence
                    if status == 3 and worst == 0:
                        worst = 3
                    # if the current status is not unknown and it's worse than whatever we have, update worst
                    if status != 3 and status > worst:
                        worst = status

        else:
            target = self.expr
            value = check_graphite(target, config)
            status = self.check(value)
            results.append((target, value, status))
            if status > worst:
                worst = status
        self.results = results
        return results, worst
Exemplo n.º 2
0
    def check_values(self, config, s_metrics, preferences):
        worst = 0
        results = []
        if " " in self.expr:  # looks like a GEQL query
            query = Query(self.expr)
            (query, targets_matching) = s_metrics.matching(query)
            graphs_targets_matching = g.build_from_targets(
                targets_matching, query, preferences)[0]
            for graph_id, graph in graphs_targets_matching.items():
                for target in graph['targets']:
                    target = target['target']
                    value = check_graphite(target, config)
                    status = self.check(value)
                    results.append((target, value, status))
                    # if worst so far is ok and we have an unknown, that takes precedence
                    if status == 3 and worst == 0:
                        worst = 3
                    # if the current status is not unknown and it's worse than whatever we have, update worst
                    if status != 3 and status > worst:
                        worst = status

        else:
            target = self.expr
            value = check_graphite(target, config)
            status = self.check(value)
            results.append((target, value, status))
            if status > worst:
                worst = status
        self.results = results
        return results, worst
Exemplo n.º 3
0
    tags = set()
    for target in targets_matching.values():
        for tag_name in target['tags'].keys():
            tags.add(tag_name)
    graphs_matching = filter_matching(query['ast'], graphs_all)
    graphs_matching = g.build(graphs_matching, query)
    stats = {'len_targets_all': s_metrics.count_metrics(),
             'len_graphs_all': len(graphs_all),
             'len_targets_matching': len(targets_matching),
             'len_graphs_matching': len(graphs_matching),
             }
    graphs = []
    targets_list = {}
    if query['statement'] in ('graph', 'lines', 'stack'):
        graphs_targets_matching = g.build_from_targets(targets_matching, query, preferences)[0]
        stats['len_graphs_targets_matching'] = len(graphs_targets_matching)
        graphs_matching.update(graphs_targets_matching)
        stats['len_graphs_matching_all'] = len(graphs_matching)
        for key in sorted(graphs_matching.iterkeys()):
            graphs.append((key, graphs_matching[key]))
    elif query['statement'] == 'list':
        # for now, only supports targets, not graphs
        targets_list = targets_matching
        stats['len_graphs_targets_matching'] = 0
        stats['len_graphs_matching_all'] = 0

    del query['target_modifiers']  # callback functions that are not serializable
    args = {'errors': errors,
            'query': query,
            'graphs': graphs,
Exemplo n.º 4
0
    tags = set()
    for target in targets_matching.values():
        for tag_name in target['tags'].keys():
            tags.add(tag_name)
    graphs_matching = filter_matching(query['ast'], graphs_all)
    graphs_matching = g.build(graphs_matching, query)
    stats = {
        'len_targets_all': s_metrics.count_metrics(),
        'len_graphs_all': len(graphs_all),
        'len_targets_matching': len(targets_matching),
        'len_graphs_matching': len(graphs_matching),
    }
    graphs = []
    targets_list = {}
    if query['statement'] in ('graph', 'lines', 'stack'):
        graphs_targets_matching = g.build_from_targets(targets_matching, query,
                                                       preferences)[0]
        stats['len_graphs_targets_matching'] = len(graphs_targets_matching)
        graphs_matching.update(graphs_targets_matching)
        stats['len_graphs_matching_all'] = len(graphs_matching)
        for key in sorted(graphs_matching.iterkeys()):
            graphs.append((key, graphs_matching[key]))
    elif query['statement'] == 'list':
        # for now, only supports targets, not graphs
        targets_list = targets_matching
        stats['len_graphs_targets_matching'] = 0
        stats['len_graphs_matching_all'] = 0

    del query[
        'target_modifiers']  # callback functions that are not serializable
    args = {
        'errors': errors,
Exemplo n.º 5
0
def test_equivalence():
    preferences = DummyPrefs()
    query = Query("")
    query['sum_by'] = {'core': ['']}
    targets = {
        'servers.host.cpu.cpu0.irq': {
            'id': 'servers.host.cpu.cpu0.irq',
            'tags': {
                'core': 'cpu0',
                'plugin': 'cpu',
                'server': 'host',
                'target_type': 'gauge_pct',
                'type': 'irq',
                'unit': 'cpu_state'
            }
        },
        'servers.host.cpu.cpu0.softirq': {
            'id': 'servers.host.cpu.cpu0.softirq',
            'tags': {
                'core': 'cpu0',
                'plugin': 'cpu',
                'server': 'host',
                'target_type': 'gauge_pct',
                'type': 'softirq',
                'unit': 'cpu_state'
            }
        },
        'servers.host.cpu.cpu2.irq': {
            'id': 'servers.host.cpu.cpu2.irq',
            'tags': {
                'core': 'cpu2',
                'plugin': 'cpu',
                'server': 'host',
                'target_type': 'gauge_pct',
                'type': 'irq',
                'unit': 'cpu_state'
            }
        },
        'servers.host.cpu.cpu2.softirq': {
            'id': 'servers.host.cpu.cpu2.softirq',
            'tags': {
                'core': 'cpu2',
                'plugin': 'cpu',
                'server': 'host',
                'target_type': 'gauge_pct',
                'type': 'softirq',
                'unit': 'cpu_state'
            }
        },
        'servers.host.cpu.total.irq': {
            'id': 'servers.host.cpu.total.irq',
            'tags': {
                'core': '_sum_',
                'plugin': 'cpu',
                'server': 'host',
                'target_type': 'gauge_pct',
                'type': 'irq',
                'unit': 'cpu_state'
            }
        },
        'servers.host.cpu.total.softirq': {
            'id': 'servers.host.cpu.total.softirq',
            'tags': {
                'core': '_sum_',
                'plugin': 'cpu',
                'server': 'host',
                'target_type': 'gauge_pct',
                'type': 'softirq',
                'unit': 'cpu_state'
            }
        }
    }

    graphs, _query = g.build_from_targets(targets, query, preferences)
    assert len(graphs) == 1
    _, graph = graphs.popitem()
    assert len(graph['targets']) == 2
    ids = [t['id'] for t in graph['targets']]
    assert ids == [
        'servers.host.cpu.total.irq', 'servers.host.cpu.total.softirq'
    ]

    # if there's a filter, equivalence doesn't hold and we should get 2 targets,
    # each the sum of two non-sums
    # and the _sum_ metrics should be removed
    query = Query("core:(_sum_|cpu0|cpu2) sum by core")
    #query['sum_by'] = {'core': ['']}
    #query['patterns'].append('core:(_sum_|cpu0|cpu2)')
    graphs, _query = g.build_from_targets(targets, query, preferences)
    assert len(graphs) == 1
    _, graph = graphs.popitem()
    assert len(graph['targets']) == 2
    ids = [t['id'] for t in graph['targets']]
    assert ids == [[
        'servers.host.cpu.cpu0.softirq', 'servers.host.cpu.cpu2.softirq'
    ], ['servers.host.cpu.cpu0.irq', 'servers.host.cpu.cpu2.irq']]
Exemplo n.º 6
0
def test_aggregation():
    preferences = DummyPrefs()
    # note: uneven aggregation: we only want 1 resulting metric,
    query = Query("")
    query['avg_by'] = {'server': ['']}
    query['sum_by'] = {'type': ['']}

    targets = {
        'web1.db': {
            'id': 'web1.db',
            'tags': {
                'server': 'web1',
                'type': 'db',
                'n3': 'foo'
            }
        },
        'web1.php': {
            'id': 'web1.php',
            'tags': {
                'server': 'web1',
                'type': 'php',
                'n3': 'foo'
            }
        },
        'web2.db': {
            'id': 'web2.db',
            'tags': {
                'server': 'web2',
                'type': 'db',
                'n3': 'foo'
            }
        },
        'web2.php': {
            'id': 'web2.php',
            'tags': {
                'server': 'web2',
                'type': 'php',
                'n3': 'foo'
            }
        },
        'web2.memcache': {
            'id': 'web2.memcache',
            'tags': {
                'server': 'web2',
                'type': 'memcache',
                'n3': 'foo'
            }
        }
    }
    from pprint import pprint
    for (k, v) in targets.items():
        v = Target(v)
        v.get_graph_info(group_by={})
        targets[k] = v
    graphs, _query = g.build_from_targets(targets, query, preferences)
    # TODO: there should be only 1 graph, containing all 5 items
    print "Graphs:"
    for (k, v) in graphs.items():
        print "graph key"
        pprint(k)
        print "val:"
        pprint(v)
    assert {} == graphs
Exemplo n.º 7
0
def test_nontrivial_implicit_aggregation():
    preferences = DummyPrefs()
    # we ultimately want 1 graph with 1 line for each server,
    # irrespective of the values of the other tags (n1 and n2)
    # and even whether or not the metrics have those tags at all.
    query = Query("")
    query['group_by'] = {}
    query['sum_by'] = {'n1': [''], 'n2': ['']}

    targets = {
        # web1 : one with and without n2
        'web1.a.a': {
            'id': 'web1.a.a',
            'tags': {
                'server': 'web1',
                'n1': 'a',
                'n2': 'a'
            }
        },
        'web1.a': {
            'id': 'web1.a',
            'tags': {
                'server': 'web1',
                'n1': 'a',
            }
        },
        # web 2: 2 different values of n2
        'web2.a.a': {
            'id': 'web2.a.a',
            'tags': {
                'server': 'web2',
                'n1': 'a',
                'n2': 'a'
            }
        },
        'web2.a.b': {
            'id': 'web2.a.b',
            'tags': {
                'server': 'web2',
                'n1': 'a',
                'n2': 'b'
            }
        },
        # web3: with and without n2, diff value for n1
        'web3.a.a': {
            'id': 'web3.a.a',
            'tags': {
                'server': 'web3',
                'n1': 'a',
                'n2': 'a'
            }
        },
        'web3.b': {
            'id': 'web3.b',
            'tags': {
                'server': 'web3',
                'n1': 'b'
            }
        }
    }
    from pprint import pprint
    for (k, v) in targets.items():
        v = Target(v)
        v.get_graph_info(group_by={})
        targets[k] = v
    graphs, _query = g.build_from_targets(targets, query, preferences)
    # TODO: there should be only 1 graph, containing 3 lines, with each 2 targets per server
    # i.e. something like this:
    expected = {
        'targets': {
            'web1.a.a__web1.a': {
                'id': ['web1.a.a', 'web1.a']
            },
            'web2.a.a__web2.a.b': {
                'id': ['web2.a.a', 'web2.a.b']
            },
            'web3.a.a__web3.b': {
                'id': ['web3.a.a', 'web3.b']
            }
        }
    }

    print "Graphs:"
    for (k, v) in graphs.items():
        print "graph key"
        pprint(k)
        print "val:"
        pprint(v)
    assert expected == graphs