def workflow_default(c: Composition, parser: WorkflowArgumentParser) -> None: c.silent = True parser.add_argument("--scenario", metavar="SCENARIO", type=str, help="Scenario to run.") parser.add_argument("--check", metavar="CHECK", type=str, help="Check to run.") args = parser.parse_args() c.up("testdrive", persistent=True) # c.start_and_wait_for_tcp( # services=["zookeeper", "kafka", "schema-registry", "postgres"] # ) scenarios = ([globals()[args.scenario]] if args.scenario else Scenario.__subclasses__()) checks = [globals()[args.check]] if args.check else Check.__subclasses__() for scenario_class in scenarios: print(f"Testing upgrade scenario {scenario_class}") scenario = scenario_class(checks=checks) scenario.run(c)
def workflow_default(c: Composition, parser: WorkflowArgumentParser) -> None: parser.add_argument( "--scenario", metavar="SCENARIO", type=str, help="Scenario to run", required=True, ) parser.add_argument("--seed", metavar="N", type=int, help="Random seed", default=1) args = parser.parse_args() scenario_class = globals()[args.scenario] c.start_and_wait_for_tcp( services=["zookeeper", "kafka", "schema-registry"]) c.up("testdrive", persistent=True) random.seed(args.seed) print("Generating test...") test = Test(scenario=scenario_class(), max_actions=500) print("Running test...") test.run(c)
def run_one_failpoint(c: Composition, failpoint: str, action: str) -> None: print( f">>> Running failpoint test for failpoint {failpoint} with action {action}" ) seed = round(time.time()) c.up("materialized") c.wait_for_materialized() c.run( "testdrive-svc", f"--seed={seed}", f"--var=failpoint={failpoint}", f"--var=action={action}", "failpoints/before.td", ) time.sleep(2) # kill Mz if the failpoint has not killed it c.kill("materialized") c.up("materialized") c.wait_for_materialized() c.run("testdrive-svc", f"--seed={seed}", "failpoints/after.td") c.kill("materialized") c.rm("materialized", "testdrive-svc", destroy_volumes=True) c.rm_volumes("mzdata")
def workflow_default(c: Composition, parser: WorkflowArgumentParser) -> None: """Run the proxy tests.""" parser.add_argument( "--redpanda", action="store_true", help="run against Redpanda instead of the Confluent Platform", ) parser.add_argument( "--aws-region", help="run against the specified AWS region instead of localstack", ) args = parser.parse_args() dependencies = ["squid"] if args.redpanda: dependencies += ["redpanda"] else: dependencies += ["zookeeper", "kafka", "schema-registry"] if not args.aws_region: dependencies += ["localstack"] c.start_and_wait_for_tcp(dependencies) aws_arg = (f"--aws-region={args.aws_region}" if args.aws_region else "--aws-endpoint=http://localstack:4566") for test_case in test_cases: print(f"Running test case {test_case.name!r}") with c.override(Materialized(environment_extra=test_case.env)): c.up("materialized") c.wait_for_materialized("materialized") c.run("testdrive-svc", aws_arg, *test_case.files)
def workflow_default(c: Composition, parser: WorkflowArgumentParser) -> None: parser.add_argument( "--seed", help="an alternate seed to use to avoid clashing with existing topics", type=int, default=1, ) args = parser.parse_args() c.start_and_wait_for_tcp( services=["zookeeper", "kafka", "schema-registry", "materialized"]) c.run( "testdrive", f"--seed={args.seed}", "--kafka-option=group.id=group1", "before-restart.td", ) c.kill("materialized") c.up("materialized") c.wait_for_materialized() c.run( "testdrive", f"--seed={args.seed}", "--no-reset", "--kafka-option=group.id=group2", "after-restart.td", )
def workflow_default(c: Composition, parser: WorkflowArgumentParser) -> None: """Runs the dbt adapter test suite against Materialize in various configurations.""" parser.add_argument( "filter", nargs="?", default="", help="limit to test cases matching filter" ) args = parser.parse_args() for test_case in test_cases: if args.filter in test_case.name: print(f"> Running test case {test_case.name}") materialized = Materialized( options=test_case.materialized_options, image=test_case.materialized_image, depends_on=["test-certs"], volumes=["secrets:/secrets"], ) with c.override(materialized): c.up("materialized") c.wait_for_tcp(host="materialized", port=6875) c.run( "dbt-test", "pytest", "dbt-materialize/test", env_extra=test_case.dbt_env, )
def test_proxy(c: Composition, aws: str) -> None: for test in tests: mz: Materialized = test["mz"] c.up(mz.name) c.wait_for_materialized(mz.name) c.run("testdrive-svc", aws, test["td"]) c.kill(mz.name)
def workflow_start_two_mzs(c: Composition, parser: WorkflowArgumentParser) -> None: """Starts two Mz instances from different git tags for the purpose of manually running RQG comparison tests. """ parser.add_argument("--this-tag", help="Run Materialize with this git tag on port 6875") parser.add_argument("--other-tag", help="Run Materialize with this git tag on port 16875") args = parser.parse_args() with c.override( Materialized( name="mz_this", image=f"materialize/materialized:{args.this_tag}" if args.this_tag else None, volumes= [], # Keep the mzdata, pgdata, etc. private to the container allow_host_ports=True, ports=["6875:6875"], ), Materialized( name="mz_other", image=f"materialize/materialized:{args.other_tag}" if args.other_tag else None, volumes=[], allow_host_ports=True, ports=["16875:6875"], ), ): for mz in ["mz_this", "mz_other"]: c.up(mz) c.wait_for_materialized(service=mz)
def test_github_12251(c: Composition) -> None: c.up("materialized") c.wait_for_materialized() c.up("computed_1") c.sql(""" DROP CLUSTER IF EXISTS cluster1 CASCADE; CREATE CLUSTER cluster1 REPLICAS (replica1 (REMOTE ['computed_1:2100'])); SET cluster = cluster1; """) start_time = time.process_time() try: c.sql(""" SET statement_timeout = '1 s'; CREATE TABLE IF NOT EXISTS log_table (f1 TEXT); CREATE TABLE IF NOT EXISTS panic_table (f1 TEXT); INSERT INTO panic_table VALUES ('panic!'); -- Crash loop the cluster with the table's index INSERT INTO log_table SELECT mz_internal.mz_panic(f1) FROM panic_table; """) except ProgrammingError as e: # Ensure we received the correct error message assert "statement timeout" in e.args[0]["M"], e # Ensure the statemenet_timeout setting is ~honored assert (time.process_time() - start_time < 2), "idle_in_transaction_session_timeout not respected" else: assert False, "unexpected success in test_github_12251" # Ensure we can select from tables after cancellation. c.sql("SELECT * FROM log_table;")
def workflow_disable_user_indexes(c: Composition) -> None: seed = round(time.time()) c.start_and_wait_for_tcp(services=prerequisites) c.up("materialized") c.wait_for_materialized() c.run("testdrive-svc", f"--seed={seed}", "disable-user-indexes/before.td") c.kill("materialized") with c.override( Materialized(options=f"{mz_options} --disable-user-indexes", )): c.up("materialized") c.wait_for_materialized() c.run("testdrive-svc", f"--seed={seed}", "disable-user-indexes/after.td") c.kill("materialized") c.rm("materialized", "testdrive-svc", destroy_volumes=True) c.rm_volumes("mzdata")
def workflow_default(c: Composition) -> None: c.up("test-certs") c.start_and_wait_for_tcp( services=["zookeeper", "kafka", "schema-registry"]) c.up("materialized") c.wait_for_materialized() c.run("testdrive", "*.td")
def run_sqllogictest(c: Composition, command: str) -> None: c.up("postgres") c.wait_for_postgres(dbname="postgres") try: junit_report = ci_util.junit_report_filename(c.name) c.run("sqllogictest-svc", command, f"--junit-report={junit_report}") finally: ci_util.upload_junit_report(c.name, ROOT / junit_report)
def workflow_mz_with_options(c: Composition) -> None: c.up("mz_2_workers") c.wait_for_materialized("mz_2_workers") c.kill("mz_2_workers") c.up("mz_4_workers") c.wait_for_materialized("mz_4_workers") c.kill("mz_4_workers")
def workflow_test_drop_default_cluster(c: Composition) -> None: """Test that the default cluster can be dropped""" c.down(destroy_volumes=True) c.up("materialized") c.wait_for_materialized() c.sql("DROP CLUSTER default CASCADE") c.sql("CREATE CLUSTER default REPLICAS (default (SIZE '1'))")
def run_test(c: Composition, materialized: str, env: Dict[str, str]) -> None: c.up(materialized) c.wait_for_tcp(host=materialized, port=6875) c.run( "dbt-test", "pytest", "dbt-materialize/test", env=env, )
def workflow_load_test(c: Composition) -> None: """Run CH-benCHmark with a selected amount of load against Materialize.""" c.up("prometheus-sql-exporter") c.workflow( "demo", "--peek-conns=1", "--mz-views=q01,q02,q05,q06,q08,q09,q12,q14,q17,q19", "--transactional-threads=2", )
def workflow_versioned_mz(c: Composition) -> None: for mz in versioned_mz: c.up(mz.name) c.wait_for_materialized(mz.name) c.run("testdrive-svc", "test*.td") c.kill(mz.name)
def run_test(c: Composition, disruption: Disruption, id: int) -> None: print(f"+++ Running disruption scenario {disruption.name}") c.up("testdrive", persistent=True) nodes = [ Computed( name="computed_1_1", peers=["computed_1_1", "computed_1_2"], ), Computed( name="computed_1_2", peers=["computed_1_1", "computed_1_2"], ), Computed( name="computed_2_1", peers=["computed_2_1", "computed_2_2"], ), Computed( name="computed_2_2", peers=["computed_2_1", "computed_2_2"], ), ] with c.override(*nodes): c.up("materialized", *[n.name for n in nodes]) c.wait_for_materialized() c.sql( """ CREATE CLUSTER cluster1 REPLICAS ( replica1 (REMOTE ['computed_1_1:2100', 'computed_1_2:2100']), replica2 (REMOTE ['computed_2_1:2100', 'computed_2_2:2100']) ) """ ) with c.override( Testdrive( validate_data_dir=False, no_reset=True, materialize_params={"cluster": "cluster1"}, seed=id, ) ): populate(c) # Disrupt replica1 by some means disruption.disruption(c) validate(c) cleanup_list = ["materialized", "testdrive", *[n.name for n in nodes]] c.kill(*cleanup_list) c.rm(*cleanup_list, destroy_volumes=True) c.rm_volumes("mzdata", "pgdata")
def workflow_cluster(c: Composition, parser: WorkflowArgumentParser) -> None: """Run all the limits tests against a multi-node, multi-replica cluster""" parser.add_argument("--scenario", metavar="SCENARIO", type=str, help="Scenario to run.") parser.add_argument( "--workers", type=int, metavar="N", default=2, help="set the default number of workers", ) args = parser.parse_args() c.start_and_wait_for_tcp( services=["zookeeper", "kafka", "schema-registry"]) c.up("materialized") c.wait_for_materialized() nodes = [ Computed( name="computed_1_1", workers=args.workers, peers=["computed_1_1", "computed_1_2"], ), Computed( name="computed_1_2", workers=args.workers, peers=["computed_1_1", "computed_1_2"], ), Computed( name="computed_2_1", workers=args.workers, peers=["computed_2_1", "computed_2_2"], ), Computed( name="computed_2_2", workers=args.workers, peers=["computed_2_1", "computed_2_2"], ), ] with c.override(*nodes): c.up(*[n.name for n in nodes]) c.sql(""" CREATE CLUSTER cluster1 REPLICAS ( replica1 (REMOTE ['computed_1_1:2100', 'computed_1_2:2100']), replica2 (REMOTE ['computed_2_1:2100', 'computed_2_2:2100']) ) """) run_test(c, args)
def workflow_github_8021(c: Composition) -> None: c.up("materialized") c.wait_for_materialized("materialized") c.run("testdrive", "github-8021.td") # Ensure MZ can boot c.kill("materialized") c.up("materialized") c.wait_for_materialized("materialized") c.kill("materialized")
def initialize(c: Composition) -> None: c.up("materialized", "postgres", "toxiproxy") c.wait_for_materialized() c.wait_for_postgres() c.wait_for_tcp(host="toxiproxy", port=8474) # We run configure-postgres.td only once for all workflows as # it contains CREATE USER that is not indempotent c.run("testdrive-svc", "configure-postgres.td")
def workflow_failpoints(c: Composition) -> None: seed = round(time.time()) c.up("materialized") c.wait_for_materialized() c.run("testdrive-svc", f"--seed={seed}", f"failpoints/{td_test}.td") c.kill("materialized") c.rm("materialized", "testdrive-svc", destroy_volumes=True) c.rm_volumes("mzdata")
def workflow_compaction(c: Composition) -> None: with c.override(Materialized(options=f"--metrics-scraping-interval=1s", )): c.up("materialized") c.wait_for_materialized() c.run("testdrive", "compaction/compaction.td") c.kill("materialized") c.rm("materialized", "testdrive", destroy_volumes=True) c.rm_volumes("mzdata", "pgdata")
def run_test(c: Composition, args: argparse.Namespace) -> None: c.up("testdrive", persistent=True) scenarios = ([globals()[args.scenario]] if args.scenario else Generator.__subclasses__()) for scenario in scenarios: with tempfile.NamedTemporaryFile(mode="w", dir=c.path) as tmp: with contextlib.redirect_stdout(tmp): scenario.generate() sys.stdout.flush() c.exec("testdrive", os.path.basename(tmp.name))
def workflow_compaction(c: Composition) -> None: with c.override(mz_fast_metrics): c.up("materialized") c.wait_for_materialized() c.run("testdrive-svc", "compaction/compaction.td") c.kill("materialized") c.rm("materialized", "testdrive-svc", destroy_volumes=True) c.rm_volumes("mzdata")
def workflow_default(c: Composition, parser: WorkflowArgumentParser) -> None: parser.add_argument( "filter", nargs="*", default=["*.td"], help="limit to only the files matching filter", ) args = parser.parse_args() c.up("materialized", "test-certs", "testdrive-svc", "postgres") c.wait_for_materialized() c.wait_for_postgres() c.run("testdrive-svc", *args.filter)
def workflow_test_resource_limits(c: Composition) -> None: """Test resource limits in Materialize.""" c.down(destroy_volumes=True) with c.override( Testdrive(), Materialized(), ): c.up("materialized") c.wait_for_materialized() c.run("testdrive", "resources/resource-limits.td")
def workflow_kafka_sources( c: Composition, args_or_parser: Union[WorkflowArgumentParser, Namespace]) -> None: start_deps(c, args_or_parser) seed = round(time.time()) c.up("materialized") c.wait_for_materialized("materialized") c.run("testdrive", f"--seed={seed}", f"kafka-sources/*{td_test}*-before.td") c.kill("materialized") c.up("materialized") c.wait_for_materialized("materialized") # And restart again, for extra stress c.kill("materialized") c.up("materialized") c.wait_for_materialized("materialized") c.run("testdrive", f"--seed={seed}", f"kafka-sources/*{td_test}*-after.td") # Do one more restart, just in case and just confirm that Mz is able to come up c.kill("materialized") c.up("materialized") c.wait_for_materialized("materialized") c.kill("materialized") c.rm("materialized", "testdrive", destroy_volumes=True) c.rm_volumes("mzdata", "pgdata")
def workflow_kafka_sources(c: Composition) -> None: seed = round(time.time()) c.start_and_wait_for_tcp(services=prerequisites) c.up("materialized") c.wait_for_materialized("materialized") c.run("testdrive-svc", f"--seed={seed}", f"kafka-sources/*{td_test}*-before.td") c.kill("materialized") c.up("materialized") c.wait_for_materialized("materialized") # And restart again, for extra stress c.kill("materialized") c.up("materialized") c.wait_for_materialized("materialized") c.run("testdrive-svc", f"--seed={seed}", f"kafka-sources/*{td_test}*-after.td") # Do one more restart, just in case and just confirm that Mz is able to come up c.kill("materialized") c.up("materialized") c.wait_for_materialized("materialized") c.kill("materialized") c.rm("materialized", "testdrive-svc", destroy_volumes=True) c.rm_volumes("mzdata")
def workflow_default(c: Composition, parser: WorkflowArgumentParser) -> None: parser.add_argument("--scenario", metavar="SCENARIO", type=str, help="Scenario to run.") args = parser.parse_args() c.start_and_wait_for_tcp( services=["zookeeper", "kafka", "schema-registry"]) c.up("materialized") c.wait_for_materialized() run_test(c, args)