def benchmark_only_pipeline(): test_pipeline_name = "benchmark-only" original = racecontrol.pipelines[test_pipeline_name] pipeline = racecontrol.Pipeline(test_pipeline_name, "Pipeline intended for unit-testing", mock.Mock()) yield pipeline # restore prior pipeline! racecontrol.pipelines[test_pipeline_name] = original
def test_conflicting_pipeline_and_distribution_version(self): mock_pipeline = mock.Mock() test_pipeline_name = "unit-test-pipeline" rnd_pipeline_name = False while not rnd_pipeline_name or rnd_pipeline_name == "from-distribution": rnd_pipeline_name = random.choice( racecontrol.available_pipelines())[0] racecontrol.Pipeline(test_pipeline_name, "Pipeline intended for unit-testing", mock_pipeline) cfg = config.Config() cfg.add(config.Scope.benchmark, "race", "pipeline", rnd_pipeline_name) cfg.add(config.Scope.benchmark, "mechanic", "distribution.version", "6.5.3") with self.assertRaises(exceptions.SystemSetupError) as ctx: racecontrol.run(cfg) self.assertRegex( ctx.exception.args[0], r"--distribution-version can only be used together with pipeline from-distribution, " "but you specified {}.\n" "If you intend to benchmark an externally provisioned cluster, don't specify --distribution-version otherwise\n" "please read the docs for from-distribution pipeline at " "{}/pipelines.html#from-distribution".format( rnd_pipeline_name, DOC_LINK)) # ensure we remove it again from the list of registered pipelines to avoid unwanted side effects del racecontrol.pipelines[test_pipeline_name]
def test_passes_benchmark_only_pipeline_in_docker(self): mock_pipeline = mock.Mock() test_pipeline_name = "benchmark-only" racecontrol.Pipeline("benchmark-only", "Mocked benchmark-only pipeline for unittest", mock_pipeline) cfg = config.Config() cfg.add(config.Scope.benchmark, "race", "pipeline", "benchmark-only") racecontrol.run(cfg) mock_pipeline.assert_called_once_with(cfg) del racecontrol.pipelines[test_pipeline_name]
def test_runs_a_known_pipeline(self): mock_pipeline = mock.Mock() p = racecontrol.Pipeline("unit-test-pipeline", "Pipeline intended for unit-testing", mock_pipeline) cfg = config.Config() cfg.add(config.Scope.benchmark, "race", "pipeline", "unit-test-pipeline") cfg.add(config.Scope.benchmark, "mechanic", "distribution.version", "") racecontrol.run(cfg) mock_pipeline.assert_called_once_with(cfg) # ensure we remove it again from the list of registered pipelines to avoid unwanted side effects del p
def test_fails_without_benchmark_only_pipeline_in_docker(self): mock_pipeline = mock.Mock() test_pipeline_name = "unit-test-pipeline" racecontrol.Pipeline("unit-test-pipeline", "Pipeline intended for unit-testing", mock_pipeline) cfg = config.Config() cfg.add(config.Scope.benchmark, "race", "pipeline", "unit-test-pipeline") with self.assertRaises(exceptions.SystemSetupError) as ctx: racecontrol.run(cfg) self.assertEqual( "Only the [benchmark-only] pipeline is supported by the Rally Docker image.\n" "Add --pipeline=benchmark-only in your Rally arguments and try again.\n" "For more details read the docs for the benchmark-only pipeline in https://esrally.readthedocs.io/en/latest/pipelines.html#benchmark-only\n", ctx.exception.args[0]) del racecontrol.pipelines[test_pipeline_name]
def unittest_pipeline(): pipeline = racecontrol.Pipeline("unit-test-pipeline", "Pipeline intended for unit-testing", mock.Mock()) yield pipeline del racecontrol.pipelines[pipeline.name]