def test_untaged_sequence(): with pytest.raises(ValueError) as excinfo: timeit(delayed(sleep)(0.1) for _ in range(2)) assert "please provide the tag parameter" in str(excinfo.value) with pytest.raises(ValueError) as excinfo: timeit([ delayed(sleep, tags={'a': 1})(0.1), delayed(sleep, tags={'a': 1})(0.1) ]) assert "but only 1 unique tags were found" in str(excinfo.value)
def test_progress_bar(capsys): timeit((delayed(sleep, tags={'N': idx})(0.1) for idx in range(2)), repeat=1) out, err = capsys.readouterr() out = out + err assert len(out) == 0 timeit((delayed(sleep, tags={'N': idx})(0.1) for idx in range(2)), progress_bar=1e-3, repeat=1) out, err = capsys.readouterr() out = out + err assert len(out) > 0 assert '100%' in out assert '2/2' in out
def test_timeit_sequence(repeat): res = timeit((delayed(sleep, tags={'idx': idx})(0.1) for idx in range(2)), repeat=repeat, to_dataframe=False) assert isinstance(res, list) for row in res: assert 'wall_time' in row assert row['wall_time'] > 0
def test_repeat(): agg = ('mean', ) res = timeit(delayed(sleep)(0), repeat=2, aggregate=agg) pd = import_or_none('pandas') if pd is None: assert len(res) == 2 else: assert list(res.columns) == ['wall_time'] assert list(res.index) == list(agg)
def test_timeit_overhead(): dt = 0.2 res = timeit(delayed(sleep)(dt)) # overhead should be less than 500 us if sys.platform == 'win32': # precision of time.time on windows is 16 ms timer_precision = 25e-3 elif sys.platform == 'darwin': # for some reason on OS X time.sleep appears to be # quite inaccurate timer_precision = 80e-3 else: timer_precision = 5e-3 assert res['wall_time'] == approx(dt, abs=timer_precision)
def test_wall_user_time(): pytest.importorskip('resource') res = timeit(delayed(sleep)(0), timer='cpu_time') assert 'cpu_time' in res
def test_benchmark_env(): res = timeit(delayed(sleep, env={'NEURTU_TEST': 'true'})(0)) assert 'NEURTU_TEST' in res assert res['NEURTU_TEST'] == 'true'
Time complexity of numpy.sort ============================= In this example we will look into the time complexity of :func:`numpy.sort` """ import numpy as np from neurtu import timeit, delayed rng = np.random.RandomState(42) df = timeit(delayed(np.sort, tags={'N': N, 'kind': kind})(rng.rand(N), kind=kind) for N in np.logspace(2, 5, num=5).astype('int') for kind in ["quicksort", "mergesort", "heapsort"]) print(df.to_string()) ############################################################################## # # we can use the pandas plotting API (that requires matplotlib) ax = df.wall_time.unstack().plot(marker='o') ax.set_xscale('log') ax.set_yscale('log') ax.set_ylabel('Wall time (s)') ax.set_title('Time complexity of numpy.sort')
'n_components': n_components, 'n_samples': n_samples, 'n_features': n_features, 'nnz': X.nnz, 'density': density, 'preconditioner': str(preconditioner), } yield neurtu.delayed(randomized_svd, tags=params)( X, n_components=n_components, preconditioner=preconditioner) # %% with threadpool_limits(limits=16): df = neurtu.timeit(benchmark_sparse(), repeat=3).wall_time # %% def highlight_best(s): is_max = s == s.min() return ['background-color: #206b3c80' if v else '' for v in is_max] df['mean'].unstack().round(2).style.apply(highlight_best, axis=1) # %% def benchmark_dense(): for ratio in [10, 100, 1000, 2500, 5000, 7500, 10000]: for n_features in [50, 500, 1000]: