Esempio n. 1
0
    def test_hooks(self):
        """Job runs hooks before/after in stack order"""
        self.mock_benchmark['path'] = 'true'
        self.job_config['hooks'] = [
            {'hook': 'first', 'options': {'a': 1}},
            {'hook': 'second', 'options': {'b': 1}},
        ]
        mock = MagicMock()
        first = mock.first
        second = mock.second

        def get_mock_hook(name):
            if name == 'first':
                return first
            else:
                return second

        HookFactory.create.side_effect = get_mock_hook

        job = Job(self.job_config, self.mock_benchmark)
        job.run()

        self.assertListEqual([
            call.first.before_job({'a': 1}, job),
            call.second.before_job({'b': 1}, job),
            # post hooks run in reverse order
            call.second.after_job({'b': 1}, job),
            call.first.after_job({'a': 1}, job),
        ], mock.method_calls)
Esempio n. 2
0
    def test_run_no_binary(self):
        """Nonexistent binary raises an error"""
        self.mock_benchmark['path'] = 'somethingthatdoesntexist'
        self.mock_benchmark['metrics'] = []

        job = Job(self.job_config, self.mock_benchmark)

        with self.assertRaises(OSError):
            job.run()
Esempio n. 3
0
    def test_run_no_binary(self):
        """Nonexistent binary raises an error"""
        self.mock_benchmark["path"] = "somethingthatdoesntexist"
        self.mock_benchmark["metrics"] = []

        job = Job(self.job_config, self.mock_benchmark)

        with self.assertRaises(OSError):
            job.run()
Esempio n. 4
0
    def test_run_timeout(self):
        """Binary running past timeout raises an error"""
        self.job_config['timeout'] = 0.1
        self.mock_benchmark['path'] = 'sleep'
        self.job_config['args'] = ['1']

        job = Job(self.job_config, self.mock_benchmark)

        with self.assertRaises(subprocess.TimeoutExpired):
            job.run()
Esempio n. 5
0
    def test_run_timeout(self):
        """Binary running past timeout raises an error"""
        self.job_config["timeout"] = 0.1
        self.mock_benchmark["path"] = "sleep"
        self.job_config["args"] = ["1"]

        job = Job(self.job_config, self.mock_benchmark)

        with self.assertRaises(subprocess.TimeoutExpired):
            job.run()
Esempio n. 6
0
    def test_run_parser_error(self):
        """A crashed parser raises an error"""
        self.mock_benchmark["path"] = "true"
        self.mock_benchmark["metrics"] = []
        self.mock_parser.parse.side_effect = ValueError("")

        job = Job(self.job_config, self.mock_benchmark)

        with self.assertRaises(ValueError):
            job.run()
Esempio n. 7
0
    def test_run_timeout(self):
        """Binary running past timeout raises an error"""
        self.job_config['timeout'] = 0.1
        self.mock_benchmark['path'] = 'sleep'
        self.job_config['args'] = ['1']

        job = Job(self.job_config, self.mock_benchmark)

        with self.assertRaises(subprocess.TimeoutExpired):
            job.run()
Esempio n. 8
0
    def test_run_parser_error(self):
        """A crashed parser raises an error"""
        self.mock_benchmark['path'] = 'true'
        self.mock_benchmark['metrics'] = []
        self.mock_parser.parse.side_effect = ValueError('')

        job = Job(self.job_config, self.mock_benchmark)

        with self.assertRaises(ValueError):
            job.run()
Esempio n. 9
0
    def test_run_fail_no_check_returncode(self):
        """Bad return code doesn't fail when check_returncode is False"""
        self.job_config["args"] = ["-c", 'echo "error" >&2; exit 1']

        self.mock_benchmark["path"] = "sh"
        self.mock_benchmark["check_returncode"] = False

        job = Job(self.job_config, self.mock_benchmark)

        # job.run won't raise an exception
        job.run()
Esempio n. 10
0
    def test_run_fail_no_check_returncode(self):
        """Bad return code doesn't fail when check_returncode is False"""
        self.job_config['args'] = ['-c', 'echo "error" >&2; exit 1']

        self.mock_benchmark['path'] = 'sh'
        self.mock_benchmark['check_returncode'] = False

        job = Job(self.job_config, self.mock_benchmark)

        # job.run won't raise an exception
        job.run()
Esempio n. 11
0
    def test_run_fail_no_check_returncode(self):
        """Bad return code doesn't fail when check_returncode is False"""
        self.job_config['args'] = ['-c', 'echo "error" >&2; exit 1']

        self.mock_benchmark['path'] = 'sh'
        self.mock_benchmark['check_returncode'] = False

        job = Job(self.job_config, self.mock_benchmark)

        # job.run won't raise an exception
        job.run()
Esempio n. 12
0
    def test_arg_list(self):
        """Argument list is formatted correctly with lists or dicts"""
        self.assertListEqual(['--output-format=json', 'a'],
                             Job.arg_list(['--output-format=json', 'a']))

        expected = ['--output-format', 'json', '--file']
        actual = Job.arg_list({'output-format': 'json', 'file': None})
        # items are the same regardless of order
        self.assertCountEqual(expected, actual)
        # '--output-format' comes immediately before 'json'
        self.assertEqual(
            actual.index('--output-format') + 1, actual.index('json'))
Esempio n. 13
0
    def test_run_fail(self):
        """Exit 1 raises an exception"""
        self.job_config["args"] = ["-c", 'echo "error" >&2; exit 1']

        self.mock_benchmark["path"] = "sh"

        job = Job(self.job_config, self.mock_benchmark)

        with self.assertRaises(subprocess.CalledProcessError) as e:
            job.run()
        e = e.exception
        self.assertEqual("stdout:\n\nstderr:\nerror", e.output.rstrip())
Esempio n. 14
0
    def test_run_fail(self):
        """Exit 1 raises an exception"""
        self.job_config['args'] = ['-c', 'echo "error" >&2; exit 1']

        self.mock_benchmark['path'] = 'sh'

        job = Job(self.job_config, self.mock_benchmark)

        with self.assertRaises(subprocess.CalledProcessError) as e:
            job.run()
        e = e.exception
        self.assertEqual('stdout:\n\nstderr:\nerror', e.output.rstrip())
Esempio n. 15
0
    def test_run_fail(self):
        """Exit 1 raises an exception"""
        self.job_config['args'] = ['-c', 'echo "error" >&2; exit 1']

        self.mock_benchmark['path'] = 'sh'

        job = Job(self.job_config, self.mock_benchmark)

        with self.assertRaises(subprocess.CalledProcessError) as e:
            job.run()
        e = e.exception
        self.assertEqual('stdout:\n\nstderr:\nerror', e.output.rstrip())
Esempio n. 16
0
    def test_arg_list(self):
        """Argument list is formatted correctly with lists or dicts"""
        self.assertListEqual(["--output-format=json", "a"],
                             Job.arg_list(["--output-format=json", "a"]))

        expected = ["--output-format", "json", "--file"]
        actual = Job.arg_list({"output-format": "json", "file": None})
        # items are the same regardless of order
        self.assertCountEqual(expected, actual)
        # '--output-format' comes immediately before 'json'
        self.assertEqual(
            actual.index("--output-format") + 1, actual.index("json"))
Esempio n. 17
0
    def test_arg_list(self):
        """Argument list is formatted correctly with lists or dicts"""
        self.assertListEqual(
            ['--output-format=json', 'a'],
            Job.arg_list(['--output-format=json', 'a']))

        expected = ['--output-format', 'json', '--file']
        actual = Job.arg_list({'output-format': 'json', 'file': None})
        # items are the same regardless of order
        self.assertCountEqual(expected, actual)
        # '--output-format' comes immediately before 'json'
        self.assertEqual(actual.index('--output-format') + 1,
                         actual.index('json'))
Esempio n. 18
0
    def test_tee_stdouterr(self):
        """tee_output option works correctly

        With tee_option=True, the job should print the subprocess stdout lines
        starting with 'stdout:' and stderr starting with 'stderr:'"""
        mock_data = 'line 1 from echo\nthis is the second line'
        self.job_config['args'] = [mock_data]
        self.job_config['metrics'] = ['key']
        self.job_config['tee_output'] = True

        self.mock_benchmark['path'] = 'echo'
        self.mock_parser.parse.return_value = {'key': 'hello'}

        job = Job(self.job_config, self.mock_benchmark)
        job.run()

        expected = 'stdout: line 1 from echo\nstdout: this is the second line\n'
        self.assertEqual(sys.stdout.getvalue(), expected)

        # test with stderr and stdout
        # first reset stdout string
        sys.stdout.truncate(0)
        sys.stdout.seek(0)

        self.mock_benchmark['path'] = 'sh'
        self.job_config['args'] = [
            '-c', 'echo "error" >&2 && echo "from stdout"'
        ]
        self.job_config['tee_output'] = True

        job = Job(self.job_config, self.mock_benchmark)
        job.run()

        expected = 'stdout: from stdout\nstderr: error\n'
        self.assertEqual(sys.stdout.getvalue(), expected)
Esempio n. 19
0
def main(args=sys.argv[1:]):
    # register reporter plugins before setting up the parser
    ReporterFactory.register("stdout", StdoutReporter)

    parser = setup_parser()
    args = parser.parse_args(args)

    # warn is 30, should default to 30 when verbose=0
    # each level below warning is 10 less than the previous
    log_level = args.verbose * (-10) + 30
    logging.basicConfig(format="%(levelname)s:%(name)s: %(message)s",
                        level=log_level)
    logger = logging.getLogger(__name__)

    logger.info('Loading benchmarks from "{}"'.format(args.benchmarks))
    with open(args.benchmarks) as tests_file:
        benchmarks = yaml.load(tests_file)

    logger.info('Loading jobs from "{}"'.format(args.jobs_file))
    with open(args.jobs_file) as jobs_file:
        job_configs = yaml.load(jobs_file)

    jobs = [
        Job(j, benchmarks[j["benchmark"]]) for j in job_configs
        if "tests" not in j
    ]
    jobs = {j.name: j for j in jobs}

    logger.info("Loaded {} benchmarks and {} jobs".format(
        len(benchmarks), len(jobs)))

    args.command.run(args, jobs)
Esempio n. 20
0
    def test_save(self):
        """A json file is created in the right directory with the right name
           when saving a job result."""
        history = History('/history')
        job = Job(
            {
                'args': ['somearg'],
                'benchmark': 'bench',
                'description': 'cool description',
                'hooks': [],
                'metrics': ['mysupercoolmetric'],
                'name': 'job name',
            }, {
                'path': 'true',
                'parser': 'parser',
            })

        now = datetime.now(timezone.utc)

        expected_path = os.path.join(
            '/history', 'job_name',
            now.strftime('%Y-%m-%dT%H:%M:%SZ') + '.json')

        # make sure file doesn't already exist
        self.assertFalse(self.fs.Exists(expected_path))

        history.save_job_result(job, Metrics({'mysupercoolmetric': 1}), now)

        # make sure it exists now
        self.assertTrue(self.fs.Exists(expected_path))
Esempio n. 21
0
    def test_run_succeed(self):
        """Echo is able to run and be parsed correctly

        Run a job to echo some json and make sure it can be parse and is
        exported correctly."""
        mock_data = '{"key": "hello"}'
        self.job_config['args'] = [mock_data]
        self.job_config['metrics'] = ['key']

        self.mock_benchmark['path'] = 'echo'
        self.mock_parser.parse.return_value = {'key': 'hello'}

        job = Job(self.job_config, self.mock_benchmark)

        metrics = job.run()
        self.mock_parser.parse.assert_called_with([mock_data, ''], [''], 0)
        self.assertDictEqual({'key': 'hello'}, metrics.metrics())
Esempio n. 22
0
    def test_run_timeout_is_pass(self):
        """Binary running past timeout raises an error"""
        self.job_config["timeout"] = 0.1
        self.job_config["timeout_is_pass"] = True
        self.mock_benchmark["path"] = "/bin/sh"
        self.job_config["args"] = [
            "-c",
            'echo "wow" && echo "err" > /dev/stderr && sleep 2',
        ]

        self.mock_parser.parse.return_value = {"success": "True"}

        job = Job(self.job_config, self.mock_benchmark)

        job.run()

        self.mock_parser.parse.assert_called_with(["wow"], ["err"], 0)
Esempio n. 23
0
    def test_run_succeed(self):
        """Echo is able to run and be parsed correctly

        Run a job to echo some json and make sure it can be parse and is
        exported correctly."""
        mock_data = '{"key": "hello"}'
        self.job_config['args'] = [mock_data]
        self.job_config['metrics'] = ['key']

        self.mock_benchmark['path'] = 'echo'
        self.mock_parser.parse.return_value = {'key': 'hello'}

        job = Job(self.job_config, self.mock_benchmark)

        metrics = job.run()
        self.mock_parser.parse.assert_called_with([mock_data, ''], [''], 0)
        self.assertDictEqual({'key': 'hello'}, metrics.metrics())
Esempio n. 24
0
    def test_run_succeed(self):
        """Echo is able to run and be parsed correctly

        Run a job to echo some json and make sure it can be parse and is
        exported correctly."""
        mock_data = '{"key": "hello"}'
        self.job_config["args"] = [mock_data]
        self.job_config["metrics"] = ["key"]

        self.mock_benchmark["path"] = "echo"
        self.mock_parser.parse.return_value = {"key": "hello"}

        job = Job(self.job_config, self.mock_benchmark)

        metrics = job.run()
        self.mock_parser.parse.assert_called_with([mock_data], [], 0)
        self.assertDictEqual({"key": "hello"}, metrics)
Esempio n. 25
0
    def test_validate_metrics(self):
        """Metrics with keys that don't match definition raise an error"""
        self.job_config['metrics'] = ['rps']
        job = Job(self.job_config, self.mock_benchmark)
        with self.assertRaises(AssertionError):
            job.validate_metrics(Metrics({}))
        with self.assertRaises(AssertionError):
            job.validate_metrics(Metrics({'latency': {'p50': 1}}))

        self.assertTrue(job.validate_metrics(Metrics({'rps': 1})))

        self.job_config['metrics'] = {'latency': ['p50', 'p95']}
        job = Job(self.job_config, self.mock_benchmark)
        self.assertTrue(
            job.validate_metrics(Metrics({'latency': {
                'p50': 1,
                'p95': 2
            }})))
Esempio n. 26
0
    def test_job_suite_job_fail(self):
        """JobSuite with a failed job raises an error"""
        self.mock_benchmark['path'] = 'abinaryhasnopath'

        fail_job = Job(self.job_config, self.mock_benchmark)

        suite = JobSuite({'name': 'suite', 'description': 'test'}, [fail_job])
        with self.assertRaises(OSError):
            suite.run()
Esempio n. 27
0
    def test_strip_metrics(self):
        """Metrics with keys that aren't in definition are removed"""
        config = defaultdict(str)
        config['args'] = {}

        self.job_config['metrics'] = ['rps']
        job = Job(self.job_config, self.mock_benchmark)

        # an empty set of metrics should stay empty
        stripped = job.strip_metrics(Metrics({}))
        self.assertEqual(len(stripped.metrics_list()), 0)

        # only passing the desired metric should stay the same
        stripped = job.strip_metrics(Metrics({'rps': 1}))
        self.assertEqual(len(stripped.metrics_list()), 1)

        # passing in more metrics should give just the requested ones
        stripped = job.strip_metrics(Metrics({'rps': 1, 'extra': 2}))
        self.assertEqual(len(stripped.metrics_list()), 1)
Esempio n. 28
0
    def test_strip_metrics(self):
        """Metrics with keys that aren't in definition are removed"""
        config = defaultdict(str)
        config['args'] = {}

        self.job_config['metrics'] = ['rps']
        job = Job(self.job_config, self.mock_benchmark)

        # an empty set of metrics should stay empty
        stripped = job.strip_metrics(Metrics({}))
        self.assertEqual(len(stripped.metrics_list()), 0)

        # only passing the desired metric should stay the same
        stripped = job.strip_metrics(Metrics({'rps': 1}))
        self.assertEqual(len(stripped.metrics_list()), 1)

        # passing in more metrics should give just the requested ones
        stripped = job.strip_metrics(Metrics({'rps': 1, 'extra': 2}))
        self.assertEqual(len(stripped.metrics_list()), 1)
Esempio n. 29
0
    def test_hooks(self):
        """Job runs hooks before/after in stack order"""
        self.mock_benchmark["path"] = "true"
        self.job_config["hooks"] = [
            {
                "hook": "first",
                "options": {
                    "a": 1
                }
            },
            {
                "hook": "second",
                "options": {
                    "b": 1
                }
            },
        ]
        mock = MagicMock()
        first = mock.first
        second = mock.second

        def get_mock_hook(name):
            if name == "first":
                return first
            else:
                return second

        HookFactory.create.side_effect = get_mock_hook

        job = Job(self.job_config, self.mock_benchmark)
        job.run()

        self.assertListEqual(
            [
                call.first.before_job({"a": 1}, job),
                call.second.before_job({"b": 1}, job),
                # post hooks run in reverse order
                call.second.after_job({"b": 1}, job),
                call.first.after_job({"a": 1}, job),
            ],
            mock.method_calls,
        )
Esempio n. 30
0
    def test_tee_output_file(self):
        """tee_output can write to file."""
        mock_data = 'line 1 from echo\nthis is the second line'
        self.job_config['args'] = [mock_data]
        self.job_config['metrics'] = ['key']

        fd, teefile = tempfile.mkstemp()
        os.close(fd)

        self.mock_benchmark['path'] = 'sh'
        self.job_config['args'] = ['-c',
                                   'echo "error" >&2 && echo "from stdout"']
        self.job_config['tee_output'] = teefile

        job = Job(self.job_config, self.mock_benchmark)
        job.run()

        expected = 'stdout: from stdout\nstderr: error\n'
        with open(teefile, 'r') as tmp:
            self.assertEqual(tmp.read(), expected)
        os.remove(teefile)
Esempio n. 31
0
    def test_tee_output_file(self):
        """tee_output can write to file."""
        mock_data = "line 1 from echo\nthis is the second line"
        self.job_config["args"] = [mock_data]
        self.job_config["metrics"] = ["key"]

        fd, teefile = tempfile.mkstemp()
        os.close(fd)

        self.mock_benchmark["path"] = "sh"
        self.job_config["args"] = [
            "-c", 'echo "error" >&2 && echo "from stdout"'
        ]
        self.job_config["tee_output"] = teefile

        job = Job(self.job_config, self.mock_benchmark)
        job.run()

        expected = "stdout: from stdout\nstderr: error\n"
        with open(teefile, "r") as tmp:
            self.assertEqual(tmp.read(), expected)
        os.remove(teefile)
Esempio n. 32
0
    def test_validate_metrics(self):
        """Metrics with keys that don't match definition raise an error"""
        self.job_config['metrics'] = ['rps']
        job = Job(self.job_config, self.mock_benchmark)
        with self.assertRaises(AssertionError):
            job.validate_metrics(Metrics({}))
        with self.assertRaises(AssertionError):
            job.validate_metrics(Metrics({'latency': {'p50': 1}}))

        self.assertTrue(job.validate_metrics(Metrics({'rps': 1})))

        self.job_config['metrics'] = {'latency': ['p50', 'p95']}
        job = Job(self.job_config, self.mock_benchmark)
        self.assertTrue(job.validate_metrics(
            Metrics({'latency': {'p50': 1, 'p95': 2}})))
Esempio n. 33
0
    def test_no_validate_metrics(self):
        """When validation is disabled, job leaves metrics as-is"""
        config = defaultdict(str)
        config['args'] = {}
        self.mock_benchmark['path'] = 'true'

        self.job_config['metrics'] = ['_no_validate', 'something']

        job = Job(self.job_config, self.mock_benchmark)

        # an empty set of metrics should stay empty
        self.mock_parser.parse.return_value = {}
        metrics = job.run()
        self.assertEqual(len(metrics.metrics_list()), 0)

        # metric defined in config should remain
        self.mock_parser.parse.return_value = {'something': 1}
        metrics = job.run()
        self.assertEqual(len(metrics.metrics_list()), 1)

        # more metrics besides defined should keep all
        self.mock_parser.parse.return_value = {'something': 1, 'extra': 2}
        metrics = job.run()
        self.assertEqual(len(metrics.metrics_list()), 2)
Esempio n. 34
0
    def test_no_validate_metrics(self):
        """When validation is disabled, job leaves metrics as-is"""
        config = defaultdict(str)
        config['args'] = {}
        self.mock_benchmark['path'] = 'true'

        self.job_config['metrics'] = ['_no_validate', 'something']

        job = Job(self.job_config, self.mock_benchmark)

        # an empty set of metrics should stay empty
        self.mock_parser.parse.return_value = {}
        metrics = job.run()
        self.assertEqual(len(metrics.metrics_list()), 0)

        # metric defined in config should remain
        self.mock_parser.parse.return_value = {'something': 1}
        metrics = job.run()
        self.assertEqual(len(metrics.metrics_list()), 1)

        # more metrics besides defined should keep all
        self.mock_parser.parse.return_value = {'something': 1, 'extra': 2}
        metrics = job.run()
        self.assertEqual(len(metrics.metrics_list()), 2)
Esempio n. 35
0
    def test_tee_stdouterr(self):
        """tee_output option works correctly

        With tee_option=True, the job should print the subprocess stdout lines
        starting with 'stdout:' and stderr starting with 'stderr:'"""
        mock_data = "line 1 from echo\nthis is the second line"
        self.job_config["args"] = [mock_data]
        self.job_config["metrics"] = ["key"]
        self.job_config["tee_output"] = True

        self.mock_benchmark["path"] = "echo"
        self.mock_parser.parse.return_value = {"key": "hello"}

        job = Job(self.job_config, self.mock_benchmark)
        # capture stdout/err
        orig_stdout, orig_stderr = sys.stdout, sys.stderr
        sys.stdout = io.StringIO()
        sys.stderr = io.StringIO()

        job.run()

        expected = "stdout: line 1 from echo\nstdout: this is the second line\n"
        self.assertEqual(sys.stdout.getvalue(), expected)

        # test with stderr and stdout
        # first reset stdout string
        sys.stdout.truncate(0)
        sys.stdout.seek(0)

        self.mock_benchmark["path"] = "sh"
        self.job_config["args"] = [
            "-c", 'echo "error" >&2 && echo "from stdout"'
        ]
        self.job_config["tee_output"] = True

        job = Job(self.job_config, self.mock_benchmark)
        job.run()

        expected = "stdout: from stdout\nstderr: error\n"
        self.assertEqual(sys.stdout.getvalue(), expected)

        sys.stdout = orig_stdout
        sys.stderr = orig_stderr
Esempio n. 36
0
def main(args=sys.argv[1:]):
    # register reporter plugins before setting up the parser
    ReporterFactory.register('stdout', StdoutReporter)

    parser = setup_parser()
    args = parser.parse_args(args)

    # warn is 30, should default to 30 when verbose=0
    # each level below warning is 10 less than the previous
    log_level = args.verbose*(-10) + 30
    logging.basicConfig(format='%(levelname)s:%(name)s: %(message)s',
                        level=log_level)
    logger = logging.getLogger(__name__)

    logger.info('Loading benchmarks from "{}"'.format(args.benchmarks))
    with open(args.benchmarks) as tests_file:
        benchmarks = yaml.load(tests_file)

    logger.info('Loading jobs from "{}"'.format(args.jobs_file))
    with open(args.jobs_file) as jobs_file:
        job_configs = yaml.load(jobs_file)

    # on the first pass, construct all normal jobs, then make job suites
    jobs = [Job(j, benchmarks[j['benchmark']]) for j in job_configs
            if 'tests' not in j]
    jobs = {j.name: j for j in jobs}
    # after all the regulars jobs are created, create the job suites
    for config in job_configs:
        if 'tests' in config:
            suite_jobs = []
            for name in config['tests']:
                try:
                    suite_jobs.append(jobs[name])
                except KeyError:
                    logger.error('Couldn\'t find job "%s" in suite "%s"',
                                 name, config['name'])
                    exit(1)

            suite = JobSuite(config, suite_jobs)
            jobs[suite.name] = suite

    logger.info('Loaded {} benchmarks and {} jobs'
                .format(len(benchmarks), len(jobs)))

    args.command.run(args, jobs)
Esempio n. 37
0
    def test_consistency(self):
        """History is able to detect when a job configuration has changed."""
        history = History('/history')
        consistent_job = Job(
            {
                'args': ['somearg'],
                'benchmark': 'bench',
                'description': 'cool description',
                'hooks': [],
                'metrics': ['mysupercoolmetric'],
                'name': 'job name',
            }, {
                'path': 'true',
                'parser': 'parser',
            })

        self.fs.CreateFile('/history/job_name/1.json',
                           contents='''
                           {
                             "config": {
                               "args": ["somearg"],
                               "benchmark": "bench",
                               "description": "cool description",
                               "hooks": [],
                               "metrics": ["mysupercoolmetric"],
                               "name": "job name",
                               "path": "true",
                               "parser": "parser"
                             },
                             "job": "job name",
                             "metrics": {
                               "mysupercoolmetric": 1
                             },
                             "timestamp": "2017-06-26T21:41:04"
                           }
                           ''')

        self.assertTrue(history.is_job_config_consistent(consistent_job))

        inconsistent_job = consistent_job
        inconsistent_job.config['args'] = ['some different arg']

        self.assertFalse(history.is_job_config_consistent(inconsistent_job))
Esempio n. 38
0
    def test_invalid_format(self):
        """History complains when a historical record is in an invalid format
           (missing key(s))."""
        history = History('/history')
        job = Job(
            {
                'args': ['somearg'],
                'benchmark': 'bench',
                'description': 'cool description',
                'metrics': ['mysupercoolmetric'],
                'name': 'broken job',
            }, {
                'path': 'true',
                'parser': 'parser',
            })

        self.fs.CreateFile('/history/broken_job/1.json',
                           contents='''
                           {
                             "config": {
                               "args": ["somearg"],
                               "benchmark": "bench",
                               "description": "cool description",
                               "hooks": [],
                               "metrics": ["mysupercoolmetric"],
                               "name": "job name",
                               "path": "true",
                               "parser": "parser"
                             },
                             "job": "broken_job",
                             "metrics": {
                                 "mysupercoolmetric": 1
                             }
                           }''')

        with self.assertRaises(KeyError):
            history.load_historical_results(job)