def test_filename_format(): r = results.Results({'machine': 'foo'}, [], "commit", 0, "", "env", {}) assert r._filename == join("foo", "commit-env.json") r = results.Results({'machine': 'foo'}, [], "hash", 0, "", "a" * 128, {}) assert r._filename == join( "foo", "hash-env-e510683b3f5ffe4093d021808bc6ff70.json")
def test_json_timestamp(tmpdir): # Check that per-benchmark timestamps are saved as JS timestamps in the result file tmpdir = six.text_type(tmpdir) stamp0 = datetime.datetime(1970, 1, 1) stamp1 = datetime.datetime(1971, 1, 1) stamp2 = datetime.datetime.utcnow() r = results.Results({'machine': 'mach'}, {}, 'aaaa', util.datetime_to_timestamp(stamp0), 'py', 'env') value = { 'result': [42], 'params': [], 'stats': None, 'samples': None, 'number': None, 'started_at': stamp1, 'ended_at': stamp2 } r.add_result('some_benchmark', value) r.save(tmpdir) r = util.load_json(join(tmpdir, 'mach', 'aaaa-env.json')) assert r['started_at']['some_benchmark'] == util.datetime_to_js_timestamp( stamp1) assert r['ended_at']['some_benchmark'] == util.datetime_to_js_timestamp( stamp2)
def test_json_timestamp(tmpdir): # Check that per-benchmark timestamps are saved as JS timestamps in the result file tmpdir = six.text_type(tmpdir) stamp0 = datetime.datetime(1970, 1, 1) stamp1 = datetime.datetime(1971, 1, 1) duration = 1.5 r = results.Results({'machine': 'mach'}, {}, 'aaaa', util.datetime_to_timestamp(stamp0), 'py', 'env', {}) value = runner.BenchmarkResult(result=[42], samples=[None], number=[None], profile=None, errcode=0, stderr='') benchmark = { 'name': 'some_benchmark', 'version': 'some version', 'params': [] } r.add_result(benchmark, value, started_at=stamp1, duration=duration) r.save(tmpdir) r = util.load_json(join(tmpdir, 'mach', 'aaaa-env.json')) keys = r['result_columns'] values = dict(zip(keys, r['results']['some_benchmark'])) assert values['started_at'] == util.datetime_to_js_timestamp(stamp1) assert values['duration'] == duration
def test_results(tmpdir): tmpdir = six.text_type(tmpdir) resultsdir = join(tmpdir, "results") for i in six.moves.xrange(10): r = results.Results( {'machine': 'foo', 'arch': 'x86_64'}, {}, hex(i), i * 1000000, '2.7', 'some-environment-name') for key, val in { 'suite1.benchmark1': float(i * 0.001), 'suite1.benchmark2': float(i * i * 0.001), 'suite2.benchmark1': float((i + 1) ** -1)}.items(): r.add_time(key, val) r.save(resultsdir) r2 = results.Results.load(join(resultsdir, r._filename)) assert r2._results == r._results assert r2.date == r.date assert r2.commit_hash == r.commit_hash assert r2._filename == r._filename
def test_json_timestamp(tmpdir): # Check that per-benchmark timestamps are saved as JS timestamps in the result file tmpdir = six.text_type(tmpdir) stamp0 = datetime.datetime(1970, 1, 1) stamp1 = datetime.datetime(1971, 1, 1) stamp2 = datetime.datetime.utcnow() r = results.Results({'machine': 'mach'}, {}, 'aaaa', util.datetime_to_timestamp(stamp0), 'py', 'env') value = runner.BenchmarkResult(result=[42], params=[], stats=None, samples=None, started_at=stamp1, ended_at=stamp2, profile=None, errcode=0, stderr='') r.add_result('some_benchmark', value, "some version") r.save(tmpdir) r = util.load_json(join(tmpdir, 'mach', 'aaaa-env.json')) assert r['started_at']['some_benchmark'] == util.datetime_to_js_timestamp( stamp1) assert r['ended_at']['some_benchmark'] == util.datetime_to_js_timestamp( stamp2)
def test_results(tmpdir): envdir = six.text_type(tmpdir.join("env")) version = "{0[0]}.{0[1]}".format(sys.version_info) env = environment.get_environment(envdir, version, {}) resultsdir = six.text_type(tmpdir.join("results")) for i in six.moves.xrange(10): r = results.Results({ 'machine': 'foo', 'arch': 'x86_64' }, env, hex(i), i * 1000000) for key, val in { 'suite1.benchmark1': float(i * 0.001), 'suite1.benchmark2': float(i * i * 0.001), 'suite2.benchmark1': float((i + 1)**-1) }.items(): r.add_time(key, val) r.save(resultsdir) r2 = results.Results.load(os.path.join(resultsdir, r._filename)) assert r2._results == r._results assert r2.date == r.date assert r2.commit_hash == r.commit_hash
def test_results(tmpdir): tmpdir = six.text_type(tmpdir) timestamp1 = datetime.datetime.utcnow() duration = 1.5 resultsdir = join(tmpdir, "results") for i in six.moves.xrange(10): r = results.Results({ 'machine': 'foo', 'arch': 'x86_64' }, {}, hex(i), i * 1000000, '2.7', 'some-environment-name', {}) x1 = float(i * 0.001) x2 = float(i * 0.001) x3 = float((i + 1)**-1) values = { 'suite1.benchmark1': { 'result': [x1], 'number': [1], 'samples': [[x1, x1]], 'params': [['a']], 'version': "1", 'profile': b'\x00\xff' }, 'suite1.benchmark2': { 'result': [x2], 'number': [1], 'samples': [[x2, x2, x2]], 'params': [], 'version': "1", 'profile': b'\x00\xff' }, 'suite2.benchmark1': { 'result': [x3], 'number': [None], 'samples': [None], 'params': [['c']], 'version': None, 'profile': b'\x00\xff' } } for key, val in values.items(): v = runner.BenchmarkResult(result=val['result'], samples=val['samples'], number=val['number'], profile=val['profile'], errcode=0, stderr='') benchmark = { 'name': key, 'version': val['version'], 'params': val['params'] } r.add_result(benchmark, v, record_samples=True, started_at=timestamp1, duration=duration) # Save / add_existing_results roundtrip r.save(resultsdir) r2 = results.Results.load(join(resultsdir, r._filename)) assert r2.date == r.date assert r2.commit_hash == r.commit_hash assert r2._filename == r._filename r3 = results.Results(r.params, r._requirements, r.commit_hash, r.date, r._python, r.env_name, {}) r3.load_data(resultsdir) for rr in [r2, r3]: assert rr._results == r._results assert rr._stats == _truncate_floats(r._stats) assert rr._samples == r._samples assert rr._profiles == r._profiles assert rr.started_at == r._started_at assert rr.duration == _truncate_floats(r._duration) assert rr.benchmark_version == r._benchmark_version # Check the get_* methods assert sorted(r2.get_all_result_keys()) == sorted(values.keys()) for bench in r2.get_all_result_keys(): # Get with same parameters as stored params = r2.get_result_params(bench) assert params == values[bench]['params'] assert r2.get_result_value(bench, params) == values[bench]['result'] assert r2.get_result_samples(bench, params) == values[bench]['samples'] stats = r2.get_result_stats(bench, params) if values[bench]['number'][0] is None: assert stats == [None] else: assert stats[0]['number'] == values[bench]['number'][0] # Get with different parameters than stored (should return n/a) bad_params = [['foo', 'bar']] assert r2.get_result_value(bench, bad_params) == [None, None] assert r2.get_result_stats(bench, bad_params) == [None, None] assert r2.get_result_samples(bench, bad_params) == [None, None] # Get profile assert r2.get_profile(bench) == b'\x00\xff' # Check get_result_keys mock_benchmarks = { 'suite1.benchmark1': { 'version': '1' }, 'suite1.benchmark2': { 'version': '2' }, 'suite2.benchmark1': { 'version': '2' }, } assert sorted(r2.get_result_keys(mock_benchmarks)) == [ 'suite1.benchmark1', 'suite2.benchmark1' ]
def test_results(tmpdir): tmpdir = six.text_type(tmpdir) timestamp1 = datetime.datetime.utcnow() timestamp2 = datetime.datetime.utcnow() resultsdir = join(tmpdir, "results") for i in six.moves.xrange(10): r = results.Results({ 'machine': 'foo', 'arch': 'x86_64' }, {}, hex(i), i * 1000000, '2.7', 'some-environment-name') values = { 'suite1.benchmark1': { 'result': [float(i * 0.001)], 'stats': [{ 'foo': 1 }], 'samples': [[1, 2]], 'number': [6], 'params': [['a']] }, 'suite1.benchmark2': { 'result': [float(i * i * 0.001)], 'stats': [{ 'foo': 2 }], 'samples': [[3, 4]], 'number': [7], 'params': [] }, 'suite2.benchmark1': { 'result': [float((i + 1)**-1)], 'stats': [{ 'foo': 3 }], 'samples': [[5, 6]], 'number': [8], 'params': [['c']] } } for key, val in values.items(): val['started_at'] = timestamp1 val['ended_at'] = timestamp2 r.add_result(key, val) # Save / add_existing_results roundtrip r.save(resultsdir) r2 = results.Results.load(join(resultsdir, r._filename)) assert r2.date == r.date assert r2.commit_hash == r.commit_hash assert r2._filename == r._filename r3 = results.Results({'machine': 'bar'}, {}, 'a' * 8, 123, '3.5', 'something') r3.add_existing_results(r) for rr in [r2, r3]: assert rr._results == r._results assert rr._stats == r._stats assert rr._number == r._number assert rr._samples == r._samples assert rr.started_at == r._started_at assert rr.ended_at == r._ended_at # Check the get_* methods assert sorted(r2.result_keys) == sorted(values.keys()) for bench in r2.result_keys: # Get with same parameters as stored params = r2.get_result_params(bench) assert params == values[bench]['params'] assert r2.get_result_value(bench, params) == values[bench]['result'] assert r2.get_result_stats(bench, params) == values[bench]['stats'] assert r2.get_result_samples(bench, params) == (values[bench]['samples'], values[bench]['number']) # Get with different parameters than stored (should return n/a) bad_params = [['foo', 'bar']] assert r2.get_result_value(bench, bad_params) == [None, None] assert r2.get_result_stats(bench, bad_params) == [None, None] assert r2.get_result_samples(bench, bad_params) == ([None, None], [None, None])
def test_results(tmpdir): tmpdir = six.text_type(tmpdir) timestamp1 = datetime.datetime.utcnow() timestamp2 = datetime.datetime.utcnow() resultsdir = join(tmpdir, "results") for i in six.moves.xrange(10): r = results.Results({ 'machine': 'foo', 'arch': 'x86_64' }, {}, hex(i), i * 1000000, '2.7', 'some-environment-name') values = { 'suite1.benchmark1': { 'result': [float(i * 0.001)], 'stats': [{ 'foo': 1 }], 'samples': [[1, 2]], 'params': [['a']], 'version': "1", 'profile': b'\x00\xff' }, 'suite1.benchmark2': { 'result': [float(i * i * 0.001)], 'stats': [{ 'foo': 2 }], 'samples': [[3, 4]], 'params': [], 'version': "1", 'profile': b'\x00\xff' }, 'suite2.benchmark1': { 'result': [float((i + 1)**-1)], 'stats': [{ 'foo': 3 }], 'samples': [[5, 6]], 'params': [['c']], 'version': None, 'profile': b'\x00\xff' } } for key, val in values.items(): val = dict(val) version = val.pop('version') val = runner.BenchmarkResult(started_at=timestamp1, ended_at=timestamp2, errcode=0, stderr='', **val) r.add_result(key, val, version, record_samples=True) # Save / add_existing_results roundtrip r.save(resultsdir) r2 = results.Results.load(join(resultsdir, r._filename)) assert r2.date == r.date assert r2.commit_hash == r.commit_hash assert r2._filename == r._filename r3 = results.Results({'machine': 'bar'}, {}, 'a' * 8, 123, '3.5', 'something') r3.add_existing_results(r) for rr in [r2, r3]: assert rr._results == r._results assert rr._stats == r._stats assert rr._samples == r._samples assert rr._profiles == r._profiles assert rr.started_at == r._started_at assert rr.ended_at == r._ended_at assert rr.benchmark_version == r._benchmark_version # Check the get_* methods assert sorted(r2.get_all_result_keys()) == sorted(values.keys()) for bench in r2.get_all_result_keys(): # Get with same parameters as stored params = r2.get_result_params(bench) assert params == values[bench]['params'] assert r2.get_result_value(bench, params) == values[bench]['result'] assert r2.get_result_stats(bench, params) == values[bench]['stats'] assert r2.get_result_samples(bench, params) == values[bench]['samples'] # Get with different parameters than stored (should return n/a) bad_params = [['foo', 'bar']] assert r2.get_result_value(bench, bad_params) == [None, None] assert r2.get_result_stats(bench, bad_params) == [None, None] assert r2.get_result_samples(bench, bad_params) == [None, None] # Get profile assert r2.get_profile(bench) == b'\x00\xff' # Check get_result_keys mock_benchmarks = { 'suite1.benchmark1': { 'version': '1' }, 'suite1.benchmark2': { 'version': '2' }, 'suite2.benchmark1': { 'version': '2' }, } assert sorted(r2.get_result_keys(mock_benchmarks)) == [ 'suite1.benchmark1', 'suite2.benchmark1' ]