def test_does_not_find_unknown_challenge(self): default_challenge = track.Challenge("default", description="default challenge", default=True) another_challenge = track.Challenge("other", description="non-default challenge", default=False) self.assertIsNone(track.Track(name="unittest", description="unittest track", challenges=[another_challenge, default_challenge]) .find_challenge_or_default("unknown-name"))
def test_finds_default_challenge(self): default_challenge = track.Challenge("default", description="default challenge", default=True) another_challenge = track.Challenge("other", description="non-default challenge", default=False) self.assertEqual(default_challenge, track.Track(name="unittest", description="unittest track", challenges=[another_challenge, default_challenge]) .default_challenge)
def test_uses_default_challenge_if_no_name_given(self): default_challenge = track.Challenge("default", description="default challenge", default=True) another_challenge = track.Challenge("other", description="non-default challenge", default=False) self.assertEqual(default_challenge, track.Track(name="unittest", description="unittest track", challenges=[another_challenge, default_challenge]) .find_challenge_or_default(""))
def test_does_not_find_unknown_challenge(self): default_challenge = track.Challenge("default", description="default challenge", default=True) another_challenge = track.Challenge("other", description="non-default challenge", default=False) with self.assertRaises(exceptions.InvalidName) as ctx: track.Track(name="unittest", description="unittest track", challenges=[another_challenge, default_challenge]).find_challenge_or_default("unknown-name") self.assertEqual("Unknown challenge [unknown-name] for track [unittest]", ctx.exception.args[0])
def test_finds_challenge_by_name(self): default_challenge = track.Challenge("default", description="default challenge", default=True) another_challenge = track.Challenge( "other", description="non-default challenge", default=False) assert another_challenge == (track.Track( name="unittest", description="unittest track", challenges=[another_challenge, default_challenge], ).find_challenge_or_default("other"))
def _create_challenges(self, track_spec): ops = self.parse_operations(self._r(track_spec, "operations")) challenges = [] for challenge in self._r(track_spec, "challenges"): challenge_name = self._r(challenge, "name", error_ctx="challenges") challenge_description = self._r(challenge, "description", error_ctx=challenge_name) index_settings = self._r(challenge, "index-settings", error_ctx=challenge_name, mandatory=False) schedule = [] for op in self._r(challenge, "schedule", error_ctx=challenge_name): if "parallel" in op: task = self.parse_parallel(op["parallel"], ops, challenge_name) else: task = self.parse_task(op, ops, challenge_name) schedule.append(task) challenges.append( track.Challenge(name=challenge_name, description=challenge_description, index_settings=index_settings, schedule=schedule)) return challenges
def _create_challenges(self, track_spec): ops = self.parse_operations(self._r(track_spec, "operations", mandatory=False, default_value=[])) challenges = [] known_challenge_names = set() default_challenge = None challenge_specs = self._get_challenge_specs(track_spec) number_of_challenges = len(challenge_specs) for challenge_spec in challenge_specs: name = self._r(challenge_spec, "name", error_ctx="challenges") description = self._r(challenge_spec, "description", error_ctx=name, mandatory=False) user_info = self._r(challenge_spec, "user-info", error_ctx=name, mandatory=False) meta_data = self._r(challenge_spec, "meta", error_ctx=name, mandatory=False) # if we only have one challenge it is treated as default challenge, no matter what the user has specified default = number_of_challenges == 1 or self._r(challenge_spec, "default", error_ctx=name, mandatory=False) index_settings = self._r(challenge_spec, "index-settings", error_ctx=name, mandatory=False) cluster_settings = self._r(challenge_spec, "cluster-settings", error_ctx=name, mandatory=False) if default and default_challenge is not None: self._error("Both '%s' and '%s' are defined as default challenges. Please define only one of them as default." % (default_challenge.name, name)) if name in known_challenge_names: self._error("Duplicate challenge with name '%s'." % name) known_challenge_names.add(name) schedule = [] for op in self._r(challenge_spec, "schedule", error_ctx=name): if "parallel" in op: task = self.parse_parallel(op["parallel"], ops, name) else: task = self.parse_task(op, ops, name) schedule.append(task) # verify we don't have any duplicate task names (which can be confusing / misleading in reporting). known_task_names = set() for task in schedule: for sub_task in task: if sub_task.name in known_task_names: self._error("Challenge '%s' contains multiple tasks with the name '%s'. Please use the task's name property to " "assign a unique name for each task." % (name, sub_task.name)) else: known_task_names.add(sub_task.name) challenge = track.Challenge(name=name, meta_data=meta_data, description=description, user_info=user_info, index_settings=index_settings, cluster_settings=cluster_settings, default=default, schedule=schedule) if default: default_challenge = challenge challenges.append(challenge) if challenges and default_challenge is None: self._error("No default challenge specified. Please edit the track and add \"default\": true to one of the challenges %s." % ", ".join([c.name for c in challenges])) return challenges
def test_does_not_find_unknown_challenge(self): default_challenge = track.Challenge("default", description="default challenge", default=True) another_challenge = track.Challenge( "other", description="non-default challenge", default=False) with pytest.raises(exceptions.InvalidName) as exc: track.Track( name="unittest", description="unittest track", challenges=[another_challenge, default_challenge], ).find_challenge_or_default("unknown-name") assert exc.value.args[ 0] == "Unknown challenge [unknown-name] for track [unittest]"
def _create_challenges(self, track_spec): ops = self.parse_operations(self._r(track_spec, "operations")) challenges = [] known_challenge_names = set() default_challenge = None number_of_challenges = len(self._r(track_spec, "challenges")) for challenge in self._r(track_spec, "challenges"): name = self._r(challenge, "name", error_ctx="challenges") description = self._r(challenge, "description", error_ctx=name) meta_data = self._r(challenge, "meta", error_ctx=name, mandatory=False) # if we only have one challenge it is treated as default challenge, no matter what the user has specified default = number_of_challenges == 1 or self._r( challenge, "default", error_ctx=name, mandatory=False) index_settings = self._r(challenge, "index-settings", error_ctx=name, mandatory=False) cluster_settings = self._r(challenge, "cluster-settings", error_ctx=name, mandatory=False) if default and default_challenge is not None: self._error( "Both '%s' and '%s' are defined as default challenges. Please define only one of them as default." % (default_challenge.name, name)) if name in known_challenge_names: self._error("Duplicate challenge with name '%s'." % name) known_challenge_names.add(name) schedule = [] for op in self._r(challenge, "schedule", error_ctx=name): if "parallel" in op: task = self.parse_parallel(op["parallel"], ops, name) else: task = self.parse_task(op, ops, name) schedule.append(task) new_challenge = track.Challenge(name=name, meta_data=meta_data, description=description, index_settings=index_settings, cluster_settings=cluster_settings, default=default, schedule=schedule) if default: default_challenge = new_challenge challenges.append(new_challenge) if challenges and default_challenge is None: self._error( "No default challenge specified. Please edit the track and add \"default\": true to one of the challenges %s." % ", ".join([c.name for c in challenges])) return challenges
def test_sets_absolute_path(self): from esrally import config from esrally.track import track cfg = config.Config() cfg.add(config.Scope.application, "benchmarks", "local.dataset.cache", "/data") default_challenge = track.Challenge( "default", description="default challenge", default=True, schedule=[ track.Task(operation=track.Operation( "index", operation_type=track.OperationType.Index), clients=4) ]) another_challenge = track.Challenge( "other", description="non-default challenge", default=False) t = track.Track( name="unittest", short_description="unittest track", challenges=[another_challenge, default_challenge], indices=[ track.Index(name="test", auto_managed=True, types=[ track.Type( "docs", mapping={}, document_file="docs/documents.json", document_archive="docs/documents.json.bz2") ]) ]) loader.set_absolute_data_path(cfg, t) self.assertEqual("/data/docs/documents.json", t.indices[0].types[0].document_file) self.assertEqual("/data/docs/documents.json.bz2", t.indices[0].types[0].document_archive)
def test_run_benchmark(self): cfg = config.Config() cfg.add(config.Scope.application, "system", "env.name", "unittest") cfg.add( config.Scope.application, "system", "time.start", datetime(year=2017, month=8, day=20, hour=1, minute=0, second=0)) cfg.add(config.Scope.application, "system", "race.id", "6ebc6e53-ee20-4b0c-99b4-09697987e9f4") cfg.add(config.Scope.application, "system", "offline.mode", False) cfg.add(config.Scope.application, "driver", "on.error", "abort") cfg.add(config.Scope.application, "driver", "profiling", False) cfg.add(config.Scope.application, "reporting", "datastore.type", "in-memory") cfg.add(config.Scope.application, "track", "params", {}) cfg.add(config.Scope.application, "track", "test.mode.enabled", True) cfg.add(config.Scope.application, "telemetry", "devices", []) cfg.add(config.Scope.application, "telemetry", "params", {}) cfg.add(config.Scope.application, "mechanic", "car.names", ["external"]) cfg.add(config.Scope.application, "mechanic", "skip.rest.api.check", True) cfg.add( config.Scope.application, "client", "hosts", AsyncDriverTests.Holder(all_hosts={"default": ["localhost:9200"]})) cfg.add(config.Scope.application, "client", "options", AsyncDriverTests.Holder(all_client_options={"default": {}})) params.register_param_source_for_name("bulk-param-source", AsyncDriverTestParamSource) task = track.Task( name="bulk-index", operation=track.Operation( "bulk-index", track.OperationType.Bulk.name, params={ "body": ["action_metadata_line", "index_line"], "action-metadata-present": True, "bulk-size": 1, # we need this because the parameter source does not know that we only have one # bulk and hence size() returns incorrect results "size": 1 }, param_source="bulk-param-source"), warmup_iterations=0, iterations=1, clients=1) current_challenge = track.Challenge(name="default", default=True, schedule=[task]) current_track = track.Track(name="unit-test", challenges=[current_challenge]) driver = async_driver.AsyncDriver( cfg, current_track, current_challenge, es_client_factory_class=StaticClientFactory) distribution_flavor, distribution_version, revision = driver.setup() self.assertEqual("oss", distribution_flavor) self.assertEqual("7.3.0", distribution_version) self.assertEqual("de777fa", revision) metrics_store_representation = driver.run() metric_store = metrics.metrics_store(cfg, read_only=True, track=current_track, challenge=current_challenge) metric_store.bulk_add(metrics_store_representation) self.assertIsNotNone( metric_store.get_one(name="latency", task="bulk-index", sample_type=metrics.SampleType.Normal)) self.assertIsNotNone( metric_store.get_one(name="service_time", task="bulk-index", sample_type=metrics.SampleType.Normal)) self.assertIsNotNone( metric_store.get_one(name="processing_time", task="bulk-index", sample_type=metrics.SampleType.Normal)) self.assertIsNotNone( metric_store.get_one(name="throughput", task="bulk-index", sample_type=metrics.SampleType.Normal)) self.assertIsNotNone( metric_store.get_one(name="node_total_young_gen_gc_time", sample_type=metrics.SampleType.Normal)) self.assertIsNotNone( metric_store.get_one(name="node_total_old_gen_gc_time", sample_type=metrics.SampleType.Normal)) # ensure that there are not more documents than we expect self.assertEqual(6, len(metric_store.docs), msg=json.dumps(metric_store.docs, indent=2))