def test_classify_single_metric(self): mc = MetricClassifier(goodmetricdefs) for m in okmetrics: self.assertEqual(mc.classify(m[0], m[1]), 'OK') for m in warnmetrics: self.assertEqual(mc.classify(m[0], m[1]), 'WARNING') for m in criticalmetrics: self.assertEqual(mc.classify(m[0], m[1]), 'CRITICAL')
def test_classify_process(self): mc = MetricClassifier(goodmetricdefs) self.assertEqual(mc.classify_metrics(samplemetrics), samplemetricresults) self.assertEqual(mc.worst_metric(samplemetrics), ('d', 3)) self.assertEqual(mc.return_code(samplemetrics), 3) self.assertEqual(mc.return_code(samplemetrics, unknown_as_critical=True), 2)
def test_classify_multiple_metrics(self): mc = MetricClassifier(goodmetricdefs) self.assertEqual(mc.classify_metrics(samplemetrics), samplemetricresults)
def test_create_good_metricclassifier(self): mc = MetricClassifier(goodmetricdefs) self.assertNotEqual(mc, None)
def __init__(self, checks, objs): self.checks = checks self.objs = objs self.mc = MetricClassifier(_metricdefs)
def __init__(self, checks): self.checks = checks self.mc = MetricClassifier(_metricdefs) self.metrics = {} self.objs = {}