示例#1
0
 def test_one_anomaly(self):
     """One anomalous metric"""
     self.job.tolerances = {'a': 1.0, 'b': 1.0, 'c': 1.0}
     current = {'a': 4, 'b': 2, 'c': 3}
     anomalies = analyze(current, self.job, self.history)
     expected = [('a', 4, 0.0, 3.0)]
     self.assertCountEqual(expected, anomalies)
示例#2
0
 def test_nested_names(self):
     """Nested tolerance dicts work"""
     self.job.tolerances = {'a': {'b': 1}}
     # Job constructor uses Metrics#flatten for tolerances as well
     self.job.tolerances = Metrics.flatten(self.job.tolerances)
     self.history.load_historical_results.return_value = [
         metrics({'a.b': 1})
     ]
     current = {'a.b': 4}
     anomalies = analyze(current, self.job, self.history)
     expected = [('a.b', 4, 0.0, 2.0)]
     self.assertCountEqual(expected, anomalies)
示例#3
0
文件: run.py 项目: santakd/fbkutils
    def run(self, args, jobs):
        reporter = ReporterFactory.create('default')

        if len(args.jobs) > 0:
            for name in args.jobs:
                if name not in jobs:
                    logger.error('No job "{}" found'.format(name))
                    exit(1)
            jobs = {name: jobs[name] for name in args.jobs}

        jobs = jobs.values()
        print('Will run {} job(s)'.format(len(jobs)))

        history = History(args.results)
        now = datetime.now(timezone.utc)

        for job in jobs:
            print('Running "{}": {}'.format(job.name, job.description))

            if not args.clowntown and not history.is_job_config_consistent(
                    job):
                logger.error('There was a previous run of "{}" that had a'
                             ' different configuration, this is likely to make'
                             ' your results confusing.'.format(job.name))
                logger.error('You can proceed anyway using --clowntown')
                exit(3)

            metrics = job.run()

            anomalies = analyze(metrics, job, history)

            reporter.report(job, metrics, anomalies)

            history.save_job_result(job, metrics, now)

            # if a correctness test failed, make it obvious
            if False in metrics.correctness_tests().values():
                # find which one(s) failed
                for key, val in metrics.correctness_tests().items():
                    if not val:
                        logger.error('Correctness test "%s" failed', key)

        reporter.close()
示例#4
0
 def test_no_anomalies(self):
     """No anomalous metric"""
     self.job.tolerances = {'a': 1.0, 'b': 1.0, 'c': 1.0}
     current = {'a': 2, 'b': 2, 'c': 3}
     anomalies = analyze(current, self.job, self.history)
     self.assertEqual(0, len(anomalies))
示例#5
0
 def test_no_thresholds(self):
     """No thresholds -> no anomalies"""
     self.job.tolerances = {}
     current = {'a': 1, 'b': 2, 'c': 3}
     anomalies = analyze(current, self.job, self.history)
     self.assertEqual(0, len(anomalies))