def test_list_tracks(cfg): assert it.esrally(cfg, "list tracks") == 0 assert it.esrally(cfg, "list tracks --track-repository=eventdata") == 0 assert it.esrally( cfg, "list tracks --track-repository=default --track-revision=4080dc9850d07e23b6fc7cfcdc7cf57b14e5168d" ) == 0
def test_sources(cfg): it.wait_until_port_is_free() assert it.esrally(cfg, "--on-error=abort --revision=latest --track=geonames --test-mode " "--challenge=append-no-conflicts --car=4gheap --elasticsearch-plugins=analysis-icu") == 0 it.wait_until_port_is_free() assert it.esrally(cfg, "--on-error=abort --pipeline=from-sources-skip-build --track=geonames --test-mode " "--challenge=append-no-conflicts-index-only --car=\"4gheap,ea\"") == 0
def test_create_track(cfg, tmp_path, test_cluster): # prepare some data cmd = f"--test-mode --pipeline=benchmark-only --target-hosts=127.0.0.1:{test_cluster.http_port} " \ f" --track=geonames --challenge=append-no-conflicts-index-only --quiet" assert it.race(cfg, cmd) == 0 # create the track track_name = f"test-track-{uuid.uuid4()}" track_path = tmp_path / track_name assert it.esrally(cfg, f"create-track --target-hosts=127.0.0.1:{test_cluster.http_port} --indices=geonames " f"--track={track_name} --output-path={tmp_path}") == 0 expected_files = ["track.json", "geonames.json", "geonames-documents-1k.json", "geonames-documents.json", "geonames-documents-1k.json.bz2", "geonames-documents.json.bz2"] for f in expected_files: full_path = track_path / f assert full_path.exists(), f"Expected file to exist at path [{full_path}]" # run a benchmark with the created track cmd = f"--test-mode --pipeline=benchmark-only --target-hosts=127.0.0.1:{test_cluster.http_port} --track-path={track_path}" assert it.race(cfg, cmd) == 0
def test_fails_when_spec_not_found(cfg, tmp_path): chart_spec_path = "/non/existent/path" output_path = os.path.join(tmp_path, "nightly-charts.ndjson") assert it.esrally( cfg, f"generate charts --chart-spec-path={chart_spec_path} --chart-type=time-series --output-path={output_path}" ) != 0
def test_track_info_with_challenge(cfg, tmp_path): cwd = os.path.dirname(__file__) chart_spec_path = os.path.join(cwd, "resources", "sample-race-config.json") output_path = os.path.join(tmp_path, "nightly-charts.ndjson") assert it.esrally( cfg, f"generate charts --chart-spec-path={chart_spec_path} --chart-type=time-series --output-path={output_path}" ) == 0
def test_create_track(cfg, tmp_path, test_cluster): # use 0.05% of geonames corpus to generate data. We need something small but >1000 docs to properly test # the -1k corpus too. cmd = ( f"--pipeline=benchmark-only --target-hosts=127.0.0.1:{test_cluster.http_port} --track=geonames " f'--challenge=append-no-conflicts-index-only --track-params="ingest_percentage:0.05" --on-error=abort ' f'--include-tasks="delete-index,create-index,check-cluster-health,index-append" --quiet' ) assert it.race(cfg, cmd) == 0 # create the track track_name = f"test-track-{uuid.uuid4()}" track_path = tmp_path / track_name assert (it.esrally( cfg, f"create-track --target-hosts=127.0.0.1:{test_cluster.http_port} --indices=geonames " f"--track={track_name} --output-path={tmp_path}", ) == 0) base_generated_corpora = "geonames-documents" expected_files = [ "track.json", "geonames.json", f"{base_generated_corpora}-1k.json", f"{base_generated_corpora}.json", f"{base_generated_corpora}-1k.json.bz2", f"{base_generated_corpora}.json.bz2", ] for f in expected_files: full_path = track_path / f assert full_path.exists( ), f"Expected file to exist at path [{full_path}]" with open(track_path / f"{base_generated_corpora}-1k.json", "rt") as f: num_lines = sum(1 for line in f) assert ( num_lines == 1000 ), f"Corpora [{base_generated_corpora}-1k.json] used by test-mode is [{num_lines}] lines but should be 1000 lines" # run a benchmark in test mode with the created track cmd = f"--test-mode --pipeline=benchmark-only --target-hosts=127.0.0.1:{test_cluster.http_port} --track-path={track_path}" assert it.race(cfg, cmd) == 0 # and also run a normal (short) benchmark using the created track cmd = f"--pipeline=benchmark-only --target-hosts=127.0.0.1:{test_cluster.http_port} --track-path={track_path}" assert it.race(cfg, cmd) == 0
def test_list_telemetry(cfg): assert it.esrally(cfg, "list telemetry") == 0
def test_list_elasticsearch_plugins(cfg): assert it.esrally(cfg, "list elasticsearch-plugins") == 0
def test_list_cars(cfg): assert it.esrally(cfg, "list cars") == 0
def test_list_races(cfg): assert it.esrally(cfg, "list races") == 0
def test_track_info_with_track_repo(cfg): assert it.esrally(cfg, "info --track-repository=default --track=geonames") == 0
def test_does_not_download_unsupported_distribution(cfg): assert it.esrally(cfg, "download --distribution-version=\"1.7.6\" --quiet") != 0
def test_download_distribution(cfg): for d in it.DISTRIBUTIONS: assert it.esrally( cfg, f"download --distribution-version=\"{d}\" --quiet") == 0
def test_track_info_with_task_filter(cfg): assert it.esrally( cfg, "info --track=geonames --challenge=append-no-conflicts --include-tasks=\"type:search\"" ) == 0
def test_run_with_direct_internet_connection(cfg, http_proxy, fresh_log_file): assert it.esrally(cfg, "list tracks") == 0 assert_log_line_present(fresh_log_file, "Connecting directly to the Internet")
def test_configure(self): # just run to test the configuration procedure, don't use this configuration in other tests. assert it.esrally(CFG_FILE, f"configure --assume-defaults") == 0
def test_track_info_with_challenge(cfg): assert it.esrally( cfg, "info --track=geonames --challenge=append-no-conflicts") == 0