def _worker(self, sample_func): loops = self.args.loops if loops < 1: # FIXME: move this check in argument parsing raise ValueError("--loops must be >= 1") run_result = perf.RunResult(loops=loops, inner_loops=self.inner_loops, metadata=self.metadata) # only import metadata submodule in worker processes from perf import metadata as perf_metadata perf_metadata.collect_metadata(run_result.metadata) for is_warmup, run in self._range(): dt = sample_func(loops) dt = float(dt) / loops if self.inner_loops is not None: dt /= self.inner_loops self._add(run_result, is_warmup, run, dt) self._display_run_result_avg(run_result) result = perf.Benchmark(name=self.name) result.runs.append(run_result) return result
def create_runs(self, samples, metadata=None): runs = [] for sample in samples: run = perf.RunResult([sample]) if metadata: run.metadata.update(metadata) runs.append(run) return runs
def test_run_result_json(self): run = perf.RunResult(samples=[1.0, 1.5, 2.0], warmups=[5.0], loops=10, inner_loops=3) run.metadata = {'key': 'value'} run = perf.RunResult.json_load(run.json()) self.assertEqual(run.samples, [1.0, 1.5, 2.0]) self.assertEqual(run.warmups, [5.0]) self.assertEqual(run.metadata, {'key': 'value'}) self.assertEqual(run.loops, 10) self.assertEqual(run.inner_loops, 3)
def test_results(self): runs = [] for sample in (1.0, 1.5, 2.0): run = perf.RunResult([sample]) run.metadata['key'] = 'value' runs.append(run) results = perf.Benchmark(runs, "name") self.assertEqual(results.runs, runs) self.assertEqual(results.name, "name") self.assertEqual(results.get_metadata(), {'key': 'value'}) self.assertEqual(str(results), 'name: 1.50 sec +- 0.50 sec') self.assertEqual(results.format(0), '1.50 sec +- 0.50 sec') self.assertEqual(results.format(1), '1.50 sec +- 0.50 sec ' '(3 runs x 1 sample)')
def test_run_result(self): run = perf.RunResult(samples=[1.0, 1.5, 2.0]) self.assertEqual(run.samples, [1.0, 1.5, 2.0]) self.assertEqual(str(run), '1.50 sec +- 0.50 sec')