def test_JSON(): with pytest.raises(TypeError): format.JSON(0) # dtypes.Stats json_stat = format.JSON(dtypes.Stats([0.0, 1.0, 2.0, 3.0], lambda x: x, 0)) _test_json(json_stat, append=True) _test_json(json_stat, append=False) with tempfile.TemporaryDirectory() as dir: # append but file does not exist yet json_file = os.path.join(dir, "json_file") json_stat.save(json_file) assert os.path.exists(json_file) with open(json_file, "r", newline="") as read_f: read_json = read_f.read() assert read_json == json.dumps(json_stat.resource(append=False)) # append but file exists json_stat.save(json_file, append=True) with open(json_file, "r", newline="") as read_f: read_json = json.load(read_f) assert len(read_json["runs"]) == 2 # append == False and already exists with pytest.raises(FileExistsError) as exc: json_stat.save(json_file, append=False) assert "already exists but 'append'" in str(exc)
def test_CSV(): with pytest.raises(TypeError): format.CSV(0) # dtypes.Stats csv = format.CSV(dtypes.Stats([0.0, 1.0, 2.0, 3.0], lambda x: x, 0)) _test_csv(csv, append=True) _test_csv(csv, append=False) with tempfile.TemporaryDirectory() as dir: # append but file does not exist yet csv_file = os.path.join(dir, "csv_file") csv.save(csv_file) assert os.path.exists(csv_file) with open(csv_file, "r", newline="") as read_f: read_csv = read_f.read() assert read_csv == csv.resource() # append but file exists csv.save(csv_file, append=True) with open(csv_file, "r", newline="") as read_f: read_csv = read_f.read() rows = read_csv.split(os.linesep) assert len(rows) == 4 assert rows[1] == rows[2] # append == False and already exists with pytest.raises(FileExistsError) as exc: csv.save(csv_file, append=False) assert "already exists but 'append'" in str(exc)
def test_Markdown(): with pytest.raises(TypeError): format.Markdown(0) # dtypes.Stats md = format.Markdown(dtypes.Stats([0.0, 1.0, 2.0, 3.0], lambda x: x, 0)) _test_markdown(md, append=True) _test_markdown(md, append=False) with tempfile.TemporaryDirectory() as dir: # append but file does not exist yet md_file = os.path.join(dir, "md_file") md.save(md_file) assert os.path.exists(md_file) with open(md_file, "r", newline="") as read_f: read_md = read_f.read() assert read_md == md.resource() # append but file exists md.save(md_file, append=True) with open(md_file, "r", newline="") as read_f: read_md = read_f.read() rows = read_md.split(os.linesep) assert len(rows) == 5 assert rows[2] == rows[3] # append == False and already exists with pytest.raises(FileExistsError) as exc: md.save(md_file, append=False) assert "already exists but 'append'" in str(exc)
def test_stats_metadata(): # defaults start = time.time() t = dtypes.Stats([10, 20], None, 0) assert t.setup == "pass" assert start <= t.timestamp <= time.time() assert t.function_args == None assert t.function_kwargs == None assert t.file == None assert t.line == None assert t.warmups == 0 assert t.unit == dtypes.TimeUnit.S # function t = dtypes.Stats([10, 20], _inner, 1) assert t.function_name == _inner.__name__ assert t.file == inspect.getsourcefile(_inner) assert isinstance(t.line, int) assert t.warmups == 1 t = dtypes.Stats([10, 20], _inner, 0, timestamp=start) assert t.timestamp == start # inbuilt function t = dtypes.Stats([10, 20], max, 0) assert t.function_name == max.__name__ assert t.file == None assert t.line == None assert t.function == max # function string fn = "lambda: 10" t = dtypes.Stats([10], fn, 0) assert t.file == None assert t.function_name == None assert t.line == None assert t.function == fn # runtime function fn = _make_runtime_function() t = dtypes.Stats([10], fn, 0) assert t.file == "<string>" assert t.function_name == fn.__name__ assert t.line == None assert t.function == fn # args and kwargs args = (1, 2) kwargs = {1: 2, 3: 4} t = dtypes.Stats([10], _inner, 0, fn_args=args, fn_kwargs=kwargs) assert t.function_args == args assert t.function_kwargs == kwargs with pytest.raises(ValueError) as exc: dtypes.Stats([1, 2, 3], None, 3) assert "No timing data" in str(exc)
def test_stats_as_markdown(): stats = dtypes.Stats([random.random() for _ in range(20)], lambda x: x, 0) markdown = format.stats_as_markdown(stats) _test_markdown(markdown, append=False) _test_markdown(markdown, append=True) with pytest.raises(TypeError) as exc: format.stats_as_markdown([1.0]) assert "stats has to be of type dtypes.Stats" in str(exc.value)
def test_save(): stats = dtypes.Stats([i for i in range(20)], lambda x: x, 0) with tempfile.TemporaryDirectory() as dir: available_formats = [ f for f in format.Format ][:-1] # TODO remove [:-1] when Plot is implemented for f in available_formats: formatted_stats = format.stats_as(stats, f) def _inner(): return formatted_stats file = os.path.join(dir, f.value) result = decorators.save(file)(_inner)() assert result == formatted_stats assert os.path.exists(file) _inner = lambda: 10 # Disabled by argument result = decorators.save("csv", enabled=False)(_inner)() assert result == _inner() # Disabled by environment os.environ["ENABLE_BENCHMARKING"] = "0" result = decorators.save("csv")(_inner)() assert result == _inner() del os.environ["ENABLE_BENCHMARKING"] # With @decorator with tempfile.TemporaryDirectory() as dir: file = os.path.join(dir, "decorator") formatted_stats = format.stats_as(stats, "csv") @decorators.save(file) def _inner(): return formatted_stats assert _inner() == formatted_stats assert os.path.exists(file)
def test_stats_as(): stats = dtypes.Stats([random.random() for _ in range(20)], lambda x: x, 0) # CSV csv1 = format.stats_as(stats, "csv") _test_csv(csv1, append=True) csv2 = format.stats_as(stats, format.Format.CSV) assert csv1.resource() == csv2.resource() # Markdown markdown1 = format.stats_as(stats, "markdown") _test_markdown(markdown1, append=True) markdown2 = format.stats_as(stats, format.Format.MARKDOWN) assert markdown1.resource() == markdown2.resource() # JSON json1 = format.stats_as(stats, "json") _test_json(json1, append=True) json2 = format.stats_as(stats, format.Format.JSON) assert json1.resource() == json2.resource() with pytest.raises(ValueError) as exc: format.stats_as(stats, "hello") assert "Unsupported format" in str(exc.value) # PLot try: import matplotlib # TODO Test plot plot = format.stats_as(stats, "plot") assert isinstance(plot, format.Plot) except ImportError: pass
def test_stats_cache(): properties = [ "mean", "std", "std_outliers", "repetitions", "total", "throughput", "throughput_min", "minimum", "percentile_5th", "percentile_25th", "median", "percentile_75th", "percentile_95th", "maximum", ] t = dtypes.Stats([1.0], lambda x: x, 0) # Nothing cached yet for p in properties: assert p not in t.__dict__ # Access all cached_properties to cache them for p in properties: getattr(t, p) # Cached for p in properties: assert p in t.__dict__ # This should invalidate cache t.to_nanoseconds() # Nothing cached again for p in properties: assert p not in t.__dict__
def test_stats_statistics(): times = [0.0] t = dtypes.Stats(times, _inner, 0) assert t.raw_times == times assert t.times == times assert t.minimum == 0.0 assert t.minimum == 0.0 assert t.maximum == 0.0 assert t.mean == 0.0 assert t.median == 0.0 assert t.repetitions == 1 assert t.total == 0.0 assert t.std == 0.0 assert t.std_outliers == 0 assert math.isnan(t.throughput) assert math.isnan(t.throughput_min) assert t.percentile_0th == 0.0 assert t.percentile_0th == t.minimum assert t.percentile_5th == 0.0 assert t.percentile_25th == 0.0 assert t.percentile_50th == 0.0 assert t.percentile_50th == t.median assert t.percentile_75th == 0.0 assert t.percentile_95th == 0.0 assert t.percentile_100th == 0.0 assert t.percentile_100th == t.maximum times = [0.1, 0.2, 0.3, 0.4, 0.5] t = dtypes.Stats(times, _inner, 0) assert t.raw_times == times assert t.times == times assert t.minimum == 0.1 assert t.maximum == 0.5 assert t.mean == 0.3 assert t.std == 0.15811388300841897 assert t.median == 0.3 assert t.std_outliers == 2 assert isinstance(t.std_outliers, int) assert t.repetitions == 5 assert t.total == 1.5 assert t.throughput == 3 + 1 / 3 assert t.throughput_min == 10.0 assert t.percentile_0th == 0.1 assert t.percentile_0th == t.minimum assert math.isclose(t.percentile_5th, 0.12) assert t.percentile_25th == 0.2 assert t.percentile_50th == 0.3 assert t.percentile_50th == t.median assert t.percentile_75th == 0.4 assert t.percentile_95th == 0.48 assert t.percentile_100th == 0.5 assert t.percentile_100th == t.maximum with pytest.raises(AttributeError): t.times = [10] with pytest.raises(ValueError) as exc: t._percentile(2) assert "must be a float" in str(exc) with pytest.raises(ValueError): t._percentile(1.2) assert "must be a float" in str(exc) # With warmups times = [0.0, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5] warmups = 2 t = dtypes.Stats(times, _inner, warmups) assert t.raw_times == times assert t.times == times[warmups:] assert t.minimum == 0.1 assert t.maximum == 0.5 assert t.mean == 0.3 assert t.std == 0.15811388300841897 assert t.median == 0.3 assert t.std_outliers == 2 assert t.repetitions == 5 assert t.total == 1.5 assert t.throughput == 3 + 1 / 3 assert t.throughput_min == 10.0 assert t.percentile_0th == 0.1 assert t.percentile_0th == t.minimum assert math.isclose(t.percentile_5th, 0.12) assert t.percentile_25th == 0.2 assert t.percentile_50th == 0.3 assert t.percentile_50th == t.median assert t.percentile_75th == 0.4 assert t.percentile_95th == 0.48 assert t.percentile_100th == 0.5 assert t.percentile_100th == t.maximum
def test_stats_time_conversion(): """Note: This checks for general sanity and does not run all combinations of time conversions right now.""" times = [60, 120] t = dtypes.Stats(times, lambda x: x, 0) # nanoseconds t.to_nanoseconds() assert t.unit == dtypes.TimeUnit.NS assert times[0] * 1e9 == t.times[0] assert times[1] * 1e9 == t.times[1] t.to_seconds() assert t.times == times # microseconds t.to_microseconds() assert t.unit == dtypes.TimeUnit.US assert times[0] * 1e6 == t.times[0] assert times[1] * 1e6 == t.times[1] t.to_seconds() assert t.times == times # milliseconds t.to_milliseconds() assert t.unit == dtypes.TimeUnit.MS assert times[0] * 1e3 == t.times[0] assert times[1] * 1e3 == t.times[1] t.to_seconds() assert t.times == times # seconds t.to_seconds() assert t.times == times # minutes t.to_minutes() assert t.unit == dtypes.TimeUnit.M assert times[0] / 60 == t.times[0] assert times[1] / 60 == t.times[1] t.to_seconds() assert t.times == times # hours t.to_hours() assert t.unit == dtypes.TimeUnit.H assert times[0] / (60 * 60) == t.times[0] assert times[1] / (60 * 60) == t.times[1] t.to_seconds() assert t.times == times # days t.to_days() assert t.unit == dtypes.TimeUnit.D assert times[0] / (24 * 60 * 60) == t.times[0] assert times[1] / (24 * 60 * 60) == t.times[1] t.to_seconds() assert t.times == times # nanoseconds to hours t.to_nanoseconds() t.to_hours() assert t.times[0] == times[0] * 1e9 / (60 * 60 * 1e9) assert t.times[1] == times[1] * 1e9 / (60 * 60 * 1e9) # With warmup t = dtypes.Stats(times, lambda x: x, 1) # nanoseconds t.to_nanoseconds() assert t.unit == dtypes.TimeUnit.NS assert times[0] * 1e9 == t.raw_times[0] assert times[1] * 1e9 == t.times[0]
def test_stats_as(): stats = dtypes.Stats([i for i in range(20)], lambda x: x, 0) def _inner(): return stats # CSV formatted = decorators.stats_as("csv")(_inner)() assert isinstance(formatted, format.CSV) formatted = decorators.stats_as(format.Format.CSV)(_inner)() assert isinstance(formatted, format.CSV) # Markdown formatted = decorators.stats_as("markdown")(_inner)() assert isinstance(formatted, format.Markdown) formatted = decorators.stats_as(format.Format.MARKDOWN)(_inner)() assert isinstance(formatted, format.Markdown) # JSON formatted = decorators.stats_as("json")(_inner)() assert isinstance(formatted, format.JSON) formatted = decorators.stats_as(format.Format.JSON)(_inner)() assert isinstance(formatted, format.JSON) # PLot try: import matplotlib formatted = decorators.stats_as("plot")(_inner)() assert isinstance(formatted, format.Plot) formatted = decorators.stats_as(format.Format.PLOT)(_inner)() assert isinstance(formatted, format.Plot) except ImportError: with pytest.raises(ImportError) as exc: decorators.stats_as("plot")(_inner)() assert "pip install matplotlib" in str(exc) with pytest.raises(TypeError) as exc: decorators.stats_as("csv")(lambda: 10)() assert "can only be used on" in str(exc) # Disabled by argument result = decorators.stats_as("csv", enabled=False)(_inner)() assert result == _inner() # Disabled by environment os.environ["ENABLE_BENCHMARKING"] = "0" result = decorators.stats_as("csv")(_inner)() assert result == _inner() del os.environ["ENABLE_BENCHMARKING"] # With @decorator @decorators.stats_as("csv") def _inner(): return stats assert isinstance(_inner(), format.CSV)