Beispiel #1
0
def workflow_default(c: Composition) -> None:
    c.start_and_wait_for_tcp(services=["localstack"])

    for version in CONFLUENT_PLATFORM_VERSIONS:
        print(f"==> Testing Confluent Platform {version}")
        confluent_platform_services = [
            Zookeeper(tag=version),
            Kafka(tag=version),
            SchemaRegistry(tag=version),
        ]
        with c.override(*confluent_platform_services):
            c.start_and_wait_for_tcp(services=[
                "zookeeper", "kafka", "schema-registry", "materialized"
            ])
            c.wait_for_materialized()
            c.run("testdrive", "kafka-matrix.td", "testdrive/kafka-*.td")
            c.kill(
                "zookeeper",
                "kafka",
                "schema-registry",
                "materialized",
            )
            c.rm(
                "zookeeper",
                "kafka",
                "schema-registry",
                "materialized",
                "testdrive",
                destroy_volumes=True,
            )
            c.rm_volumes("mzdata", "pgdata", force=True)
Beispiel #2
0
def test_testdrive(c: Composition, mz: Materialized, aws: str, tests: str) -> None:
    c.start_and_wait_for_tcp(
        services=["zookeeper", "kafka", "schema-registry", mz.name]
    )
    c.wait_for_materialized(mz.name)
    c.run("testdrive-svc", aws, tests)
    c.kill(mz.name)
Beispiel #3
0
def test_cluster(c: Composition, *glob: str) -> None:
    c.up("materialized")
    c.wait_for_materialized()

    # Create a remote cluster and verify that tests pass.
    c.up("computed_1")
    c.up("computed_2")
    c.sql("DROP CLUSTER IF EXISTS cluster1 CASCADE;")
    c.sql(
        "CREATE CLUSTER cluster1 REPLICAS (replica1 (REMOTE ['computed_1:2100', 'computed_2:2100']));"
    )
    c.run("testdrive", *glob)

    # Add a replica to that remote cluster and verify that tests still pass.
    c.up("computed_3")
    c.up("computed_4")
    c.sql(
        "CREATE CLUSTER REPLICA cluster1.replica2 REMOTE ['computed_3:2100', 'computed_4:2100']"
    )
    c.run("testdrive", *glob)

    # Kill one of the nodes in the first replica of the compute cluster and
    # verify that tests still pass.
    c.kill("computed_1")
    c.run("testdrive", *glob)

    # Leave only replica 2 up and verify that tests still pass.
    c.sql("DROP CLUSTER REPLICA cluster1.replica1")
    c.run("testdrive", *glob)
Beispiel #4
0
def workflow_default(c: Composition, parser: WorkflowArgumentParser) -> None:
    c.start_and_wait_for_tcp(services=[
        "zookeeper", "kafka", "schema-registry", "materialized", "toxiproxy"
    ])
    c.wait_for_materialized()

    seed = random.getrandbits(16)
    for i, failure_mode in enumerate([
            "toxiproxy-close-connection.td",
            "toxiproxy-limit-connection.td",
            "toxiproxy-timeout.td",
            # TODO: Enable https://github.com/MaterializeInc/materialize/issues/11085
            # "toxiproxy-timeout-hold.td",
    ]):
        c.start_and_wait_for_tcp(["toxiproxy"])
        c.run(
            "testdrive-svc",
            "--no-reset",
            "--max-errors=1",
            f"--seed={seed}{i}",
            f"--temp-dir=/share/tmp/kafka-resumption-{seed}{i}",
            "setup.td",
            failure_mode,
            "during.td",
            "sleep.td",
            "toxiproxy-restore-connection.td",
            "verify-success.td",
            "cleanup.td",
        )
        c.kill("toxiproxy")
Beispiel #5
0
def workflow_default(c: Composition, parser: WorkflowArgumentParser) -> None:
    parser.add_argument(
        "--seed",
        help="an alternate seed to use to avoid clashing with existing topics",
        type=int,
        default=1,
    )
    args = parser.parse_args()

    c.start_and_wait_for_tcp(
        services=["zookeeper", "kafka", "schema-registry", "materialized"])
    c.run(
        "testdrive",
        f"--seed={args.seed}",
        "--kafka-option=group.id=group1",
        "before-restart.td",
    )
    c.kill("materialized")
    c.up("materialized")
    c.wait_for_materialized()
    c.run(
        "testdrive",
        f"--seed={args.seed}",
        "--no-reset",
        "--kafka-option=group.id=group2",
        "after-restart.td",
    )
Beispiel #6
0
def test_proxy(c: Composition, aws: str) -> None:
    for test in tests:
        mz: Materialized = test["mz"]
        c.up(mz.name)
        c.wait_for_materialized(mz.name)
        c.run("testdrive-svc", aws, test["td"])
        c.kill(mz.name)
Beispiel #7
0
def workflow_disable_user_indexes(c: Composition) -> None:
    seed = round(time.time())

    c.start_and_wait_for_tcp(services=prerequisites)

    c.up("materialized")
    c.wait_for_materialized()

    c.run("testdrive-svc", f"--seed={seed}", "disable-user-indexes/before.td")

    c.kill("materialized")

    with c.override(
            Materialized(options=f"{mz_options} --disable-user-indexes", )):
        c.up("materialized")
        c.wait_for_materialized()

        c.run("testdrive-svc", f"--seed={seed}",
              "disable-user-indexes/after.td")

        c.kill("materialized")

    c.rm("materialized", "testdrive-svc", destroy_volumes=True)

    c.rm_volumes("mzdata")
Beispiel #8
0
def run_one_failpoint(c: Composition, failpoint: str, action: str) -> None:
    print(
        f">>> Running failpoint test for failpoint {failpoint} with action {action}"
    )

    seed = round(time.time())

    c.up("materialized")
    c.wait_for_materialized()

    c.run(
        "testdrive-svc",
        f"--seed={seed}",
        f"--var=failpoint={failpoint}",
        f"--var=action={action}",
        "failpoints/before.td",
    )

    time.sleep(2)
    # kill Mz if the failpoint has not killed it
    c.kill("materialized")
    c.up("materialized")
    c.wait_for_materialized()

    c.run("testdrive-svc", f"--seed={seed}", "failpoints/after.td")

    c.kill("materialized")
    c.rm("materialized", "testdrive-svc", destroy_volumes=True)
    c.rm_volumes("mzdata")
Beispiel #9
0
def workflow_default(c: Composition, parser: WorkflowArgumentParser) -> None:
    """Run testdrive."""
    parser.add_argument(
        "--redpanda",
        action="store_true",
        help="run against Redpanda instead of the Confluent Platform",
    )
    parser.add_argument(
        "--aws-region",
        help="run against the specified AWS region instead of localstack",
    )
    parser.add_argument(
        "--workers",
        type=int,
        metavar="N",
        help="set the number of materialized dataflow workers",
    )
    parser.add_argument(
        "--persistent-user-tables",
        action="store_true",
        help="enable the --persistent-user-tables materialized option",
    )
    parser.add_argument(
        "files",
        nargs="*",
        default=["*.td", "esoteric/*.td"],
        help="run against the specified files",
    )
    args = parser.parse_args()

    if not args.redpanda and Arch.host() == Arch.AARCH64:
        ui.warn(
            "Running the Confluent Platform in Docker on ARM-based machines is "
            "nearly unusably slow. Consider using Redpanda instead (--redpanda) "
            "or running tests without mzcompose.")

    dependencies = ["materialized"]
    if args.redpanda:
        dependencies += ["redpanda"]
    else:
        dependencies += ["zookeeper", "kafka", "schema-registry"]

    materialized = Materialized(
        workers=args.workers,
        options=["--persistent-user-tables"]
        if args.persistent_user_tables else [],
    )

    testdrive = Testdrive(
        forward_buildkite_shard=True,
        entrypoint_extra=[f"--aws-region={args.aws_region}"]
        if args.aws_region else ["--aws-endpoint=http://localstack:4566"],
    )

    with c.override(materialized, testdrive):
        c.start_and_wait_for_tcp(services=dependencies)
        c.wait_for_materialized("materialized")
        c.run("testdrive-svc", *args.files)
        c.kill("materialized")
Beispiel #10
0
def workflow_mz_with_options(c: Composition) -> None:
    c.up("mz_2_workers")
    c.wait_for_materialized("mz_2_workers")
    c.kill("mz_2_workers")

    c.up("mz_4_workers")
    c.wait_for_materialized("mz_4_workers")
    c.kill("mz_4_workers")
Beispiel #11
0
def test_upgrade_from_version(
    c: Composition, from_version: str, priors: List[str], filter: str, style: str = ""
) -> None:
    print(f"===>>> Testing upgrade from Materialize {from_version} to current_source.")

    version_glob = "{" + ",".join(["any_version", *priors, from_version]) + "}"
    print(">>> Version glob pattern: " + version_glob)

    c.rm("materialized", "testdrive-svc", stop=True)
    c.rm_volumes("mzdata", "tmp")

    if from_version != "current_source":
        mz_from = Materialized(
            image=f"materialize/materialized:{from_version}",
            options=" ".join(
                opt
                for start_version, opt in mz_options.items()
                if from_version[1:] >= start_version
            ),
            environment=[
                "SSL_KEY_PASSWORD=mzmzmz",
            ],
            volumes_extra=["secrets:/share/secrets"],
        )
        with c.override(mz_from):
            c.up("materialized")
    else:
        c.up("materialized")

    c.wait_for_materialized("materialized")

    temp_dir = f"--temp-dir=/share/tmp/upgrade-from-{from_version}"
    seed = f"--seed={random.getrandbits(32)}"
    c.run(
        "testdrive-svc",
        "--no-reset",
        f"--var=upgrade-from-version={from_version}",
        temp_dir,
        seed,
        f"create-{style}in-{version_glob}-{filter}.td",
    )

    c.kill("materialized")
    c.rm("materialized", "testdrive-svc")

    c.up("materialized")
    c.wait_for_materialized("materialized")

    c.run(
        "testdrive-svc",
        "--no-reset",
        f"--var=upgrade-from-version={from_version}",
        temp_dir,
        seed,
        "--validate-catalog=/share/mzdata/catalog",
        f"check-{style}from-{version_glob}-{filter}.td",
    )
Beispiel #12
0
def workflow_smoke_test(c: Composition) -> None:
    c.workflow(
        "default",
        "--num-seconds=15",
        "--records-per-second=1000",
    )
    c.kill("materialized")
    c.rm("materialized", "testdrive", "kafka", destroy_volumes=True)
    c.rm_volumes("mzdata", "pgdata")
Beispiel #13
0
def workflow_versioned_mz(c: Composition) -> None:
    for mz in versioned_mz:
        c.up(mz.name)

        c.wait_for_materialized(mz.name)

        c.run("testdrive-svc", "test*.td")

        c.kill(mz.name)
Beispiel #14
0
def run_test(c: Composition, disruption: Disruption, id: int) -> None:
    print(f"+++ Running disruption scenario {disruption.name}")

    c.up("testdrive", persistent=True)

    nodes = [
        Computed(
            name="computed_1_1",
            peers=["computed_1_1", "computed_1_2"],
        ),
        Computed(
            name="computed_1_2",
            peers=["computed_1_1", "computed_1_2"],
        ),
        Computed(
            name="computed_2_1",
            peers=["computed_2_1", "computed_2_2"],
        ),
        Computed(
            name="computed_2_2",
            peers=["computed_2_1", "computed_2_2"],
        ),
    ]

    with c.override(*nodes):
        c.up("materialized", *[n.name for n in nodes])
        c.wait_for_materialized()

        c.sql(
            """
            CREATE CLUSTER cluster1 REPLICAS (
                replica1 (REMOTE ['computed_1_1:2100', 'computed_1_2:2100']),
                replica2 (REMOTE ['computed_2_1:2100', 'computed_2_2:2100'])
            )
            """
        )

        with c.override(
            Testdrive(
                validate_data_dir=False,
                no_reset=True,
                materialize_params={"cluster": "cluster1"},
                seed=id,
            )
        ):
            populate(c)

            # Disrupt replica1 by some means
            disruption.disruption(c)

            validate(c)

        cleanup_list = ["materialized", "testdrive", *[n.name for n in nodes]]
        c.kill(*cleanup_list)
        c.rm(*cleanup_list, destroy_volumes=True)
        c.rm_volumes("mzdata", "pgdata")
Beispiel #15
0
def workflow_github_8021(c: Composition) -> None:
    c.up("materialized")
    c.wait_for_materialized("materialized")
    c.run("testdrive", "github-8021.td")

    # Ensure MZ can boot
    c.kill("materialized")
    c.up("materialized")
    c.wait_for_materialized("materialized")
    c.kill("materialized")
Beispiel #16
0
def run_one_scenario(
    c: Composition, scenario: Type[Scenario], args: argparse.Namespace
) -> Comparator:
    name = scenario.__name__
    print(f"--- Now benchmarking {name} ...")
    comparator = make_comparator(name)
    common_seed = round(time.time())

    mzs = {
        "this": Materialized(
            image=f"materialize/materialized:{args.this_tag}"
            if args.this_tag
            else None,
            options=args.this_options,
        ),
        "other": Materialized(
            image=f"materialize/materialized:{args.other_tag}"
            if args.other_tag
            else None,
            options=args.other_options,
        ),
    }

    for mz_id, instance in enumerate(["this", "other"]):
        with c.override(mzs[instance]):
            print(f"The version of the '{instance.upper()}' Mz instance is:")
            c.run("materialized", "--version")

            c.start_and_wait_for_tcp(services=["materialized"])
            c.wait_for_materialized()

            executor = Docker(
                composition=c,
                seed=common_seed,
            )

            benchmark = Benchmark(
                mz_id=mz_id,
                scenario=scenario,
                scale=args.scale,
                executor=executor,
                filter=make_filter(args),
                termination_conditions=make_termination_conditions(args),
                aggregation=make_aggregation(),
            )

            outcome, iterations = benchmark.run()
            comparator.append(outcome)

            c.kill("materialized")
            c.rm("materialized", "testdrive-svc")
            c.rm_volumes("mzdata")

    return comparator
Beispiel #17
0
def workflow_failpoints(c: Composition) -> None:
    seed = round(time.time())

    c.up("materialized")
    c.wait_for_materialized()

    c.run("testdrive-svc", f"--seed={seed}", f"failpoints/{td_test}.td")

    c.kill("materialized")
    c.rm("materialized", "testdrive-svc", destroy_volumes=True)
    c.rm_volumes("mzdata")
Beispiel #18
0
def workflow_smoke_test(c: Composition) -> None:
    for arg in ["--upsert", "--enable-persistence"]:
        c.workflow(
            "default",
            "--num-seconds=15",
            "--records-per-second=1000",
            arg,
        )
        c.kill("materialized")
        c.rm("materialized", "testdrive-svc", "kafka", destroy_volumes=True)
        c.rm_volumes("mzdata")
Beispiel #19
0
def workflow_compaction(c: Composition) -> None:
    with c.override(mz_fast_metrics):
        c.up("materialized")
        c.wait_for_materialized()

        c.run("testdrive-svc", "compaction/compaction.td")

        c.kill("materialized")

    c.rm("materialized", "testdrive-svc", destroy_volumes=True)

    c.rm_volumes("mzdata")
Beispiel #20
0
def workflow_compaction(c: Composition) -> None:
    with c.override(Materialized(options=f"--metrics-scraping-interval=1s", )):
        c.up("materialized")
        c.wait_for_materialized()

        c.run("testdrive", "compaction/compaction.td")

        c.kill("materialized")

    c.rm("materialized", "testdrive", destroy_volumes=True)

    c.rm_volumes("mzdata", "pgdata")
Beispiel #21
0
def test_upgrade_from_version(
    c: Composition, from_version: str, priors: List[str], filter: str
) -> None:
    print(f"===>>> Testing upgrade from Materialize {from_version} to current_source.")

    version_glob = "|".join(["any_version", *priors, from_version])
    print(">>> Version glob pattern: " + version_glob)

    if from_version != "current_source":
        mz_from = Materialized(
            image=f"materialize/materialized:{from_version}",
            options=" ".join(
                opt
                for start_version, opt in mz_options.items()
                if from_version[1:] >= start_version
            ),
        )
        with c.override(mz_from):
            c.up("materialized")
    else:
        c.up("materialized")

    c.wait_for_materialized("materialized")

    temp_dir = f"--temp-dir=/share/tmp/upgrade-from-{from_version}"
    with patch.dict(os.environ, {"UPGRADE_FROM_VERSION": from_version}):
        c.run(
            "testdrive-svc",
            "--seed=1",
            "--no-reset",
            temp_dir,
            f"create-in-@({version_glob})-{filter}.td",
        )

    c.kill("materialized")
    c.rm("materialized", "testdrive-svc")

    c.up("materialized")
    c.wait_for_materialized("materialized")

    with patch.dict(os.environ, {"UPGRADE_FROM_VERSION": from_version}):
        c.run(
            "testdrive-svc",
            "--seed=1",
            "--no-reset",
            temp_dir,
            f"--validate-catalog=/share/mzdata/catalog check-from-@({version_glob})-{filter}.td",
        )

    c.kill("materialized")
    c.rm("materialized", "testdrive-svc")
    c.rm_volumes("mzdata", "tmp")
Beispiel #22
0
def workflow_kafka_sources(
        c: Composition, args_or_parser: Union[WorkflowArgumentParser,
                                              Namespace]) -> None:
    start_deps(c, args_or_parser)

    seed = round(time.time())

    c.up("materialized")
    c.wait_for_materialized("materialized")

    c.run("testdrive", f"--seed={seed}",
          f"kafka-sources/*{td_test}*-before.td")

    c.kill("materialized")
    c.up("materialized")
    c.wait_for_materialized("materialized")

    # And restart again, for extra stress
    c.kill("materialized")
    c.up("materialized")
    c.wait_for_materialized("materialized")

    c.run("testdrive", f"--seed={seed}", f"kafka-sources/*{td_test}*-after.td")

    # Do one more restart, just in case and just confirm that Mz is able to come up
    c.kill("materialized")
    c.up("materialized")
    c.wait_for_materialized("materialized")

    c.kill("materialized")
    c.rm("materialized", "testdrive", destroy_volumes=True)
    c.rm_volumes("mzdata", "pgdata")
Beispiel #23
0
def workflow_kafka_sources(c: Composition) -> None:
    seed = round(time.time())

    c.start_and_wait_for_tcp(services=prerequisites)

    c.up("materialized")
    c.wait_for_materialized("materialized")

    c.run("testdrive-svc", f"--seed={seed}",
          f"kafka-sources/*{td_test}*-before.td")

    c.kill("materialized")
    c.up("materialized")
    c.wait_for_materialized("materialized")

    # And restart again, for extra stress
    c.kill("materialized")
    c.up("materialized")
    c.wait_for_materialized("materialized")

    c.run("testdrive-svc", f"--seed={seed}",
          f"kafka-sources/*{td_test}*-after.td")

    # Do one more restart, just in case and just confirm that Mz is able to come up
    c.kill("materialized")
    c.up("materialized")
    c.wait_for_materialized("materialized")

    c.kill("materialized")
    c.rm("materialized", "testdrive-svc", destroy_volumes=True)
    c.rm_volumes("mzdata")
Beispiel #24
0
def workflow_test_builtin_migration(c: Composition) -> None:
    """Exercise the builtin object migration code by upgrading between two versions
    that will have a migration triggered between them. Create a materialized view
    over the affected builtin object to confirm that the migration was successful
    """

    c.down(destroy_volumes=True)
    with c.override(
            # Random commit before pg_roles was updated.
            Materialized(
                image=
                "materialize/materialized:devel-9efd269199b1510b3e8f90196cb4fa3072a548a1",
            ),
            Testdrive(default_timeout="15s",
                      no_reset=True,
                      consistent_seed=True),
    ):
        c.up("testdrive", persistent=True)
        c.up("materialized")
        c.wait_for_materialized()

        c.testdrive(input=dedent("""
        > CREATE VIEW v1 AS SELECT COUNT(*) FROM pg_roles;
        > SELECT * FROM v1;
        2
        ! SELECT DISTINCT rolconnlimit FROM pg_roles;
        contains:column "rolconnlimit" does not exist
    """))

        c.kill("materialized")

    with c.override(
            # This will stop working if we introduce a breaking change.
            Materialized(),
            Testdrive(default_timeout="15s",
                      no_reset=True,
                      consistent_seed=True),
    ):
        c.up("testdrive", persistent=True)
        c.up("materialized")
        c.wait_for_materialized()

        c.testdrive(input=dedent("""
       > SELECT * FROM v1;
       2
       # This column is new after the migration
       > SELECT DISTINCT rolconnlimit FROM pg_roles;
       -1
    """))
Beispiel #25
0
def workflow_disable_user_indexes(c: Composition) -> None:
    c.start_and_wait_for_tcp(
        services=["zookeeper", "kafka", "schema-registry"])

    # Create catalog with vanilla MZ
    c.up("materialized")
    c.wait_for_materialized("materialized")
    c.run("testdrive-svc", "user-indexes-enabled.td")
    c.kill("materialized")

    # Test semantics of disabling user indexes
    c.up("mz_disable_user_indexes")
    c.wait_for_materialized("mz_disable_user_indexes")
    c.run("testdrive_no_reset", "user-indexes-disabled.td")
    c.kill("mz_disable_user_indexes")
Beispiel #26
0
def workflow_default(c: Composition) -> None:
    c.start_and_wait_for_tcp(services=[
        "zookeeper",
        "kafka1",
        "kafka2",
        "kafka3",
        "schema-registry",
        "materialized",
    ])
    c.run("testdrive", "--kafka-addr=kafka2", "01-init.td")
    time.sleep(10)
    c.kill("kafka1")
    time.sleep(10)
    c.run("testdrive", "--kafka-addr=kafka2,kafka3", "--no-reset",
          "02-after-leave.td")
    c.up("kafka1")
    time.sleep(10)
    c.run("testdrive", "--kafka-addr=kafka1", "--no-reset", "03-after-join.td")
Beispiel #27
0
def workflow_test_cluster(c: Composition,
                          parser: WorkflowArgumentParser) -> None:
    """Run testdrive in a variety of compute cluster configurations."""

    parser.add_argument(
        "glob",
        nargs="*",
        default=["smoke/*.td"],
        help="run against the specified files",
    )
    args = parser.parse_args()

    c.down(destroy_volumes=True)
    c.start_and_wait_for_tcp(
        services=["zookeeper", "kafka", "schema-registry", "localstack"])
    c.up("materialized")
    c.wait_for_materialized()

    # Create a remote cluster and verify that tests pass.
    c.up("computed_1")
    c.up("computed_2")
    c.sql("DROP CLUSTER IF EXISTS cluster1 CASCADE;")
    c.sql(
        "CREATE CLUSTER cluster1 REPLICAS (replica1 (REMOTE ['computed_1:2100', 'computed_2:2100']));"
    )
    c.run("testdrive", *args.glob)

    # Add a replica to that remote cluster and verify that tests still pass.
    c.up("computed_3")
    c.up("computed_4")
    c.sql(
        "CREATE CLUSTER REPLICA cluster1.replica2 REMOTE ['computed_3:2100', 'computed_4:2100']"
    )
    c.run("testdrive", *args.glob)

    # Kill one of the nodes in the first replica of the compute cluster and
    # verify that tests still pass.
    c.kill("computed_1")
    c.run("testdrive", *args.glob)

    # Leave only replica 2 up and verify that tests still pass.
    c.sql("DROP CLUSTER REPLICA cluster1.replica1")
    c.run("testdrive", *args.glob)
Beispiel #28
0
def workflow_test_github_13603(c: Composition) -> None:
    """Test that multi woker replicas terminate eagerly upon rehydration"""
    c.down(destroy_volumes=True)
    c.up("materialized")
    c.wait_for_materialized()

    c.up("computed_1")
    c.up("computed_2")
    c.sql(
        "CREATE CLUSTER cluster1 REPLICAS (replica1 (REMOTE ['computed_1:2100', 'computed_2:2100']));"
    )

    c.kill("materialized")
    c.up("materialized")
    c.wait_for_materialized()

    # Ensure the computeds have crashed
    c1 = c.invoke("logs", "computed_1", capture=True)
    assert "panicked" in c1.stdout
    c2 = c.invoke("logs", "computed_2", capture=True)
    assert "panicked" in c2.stdout
Beispiel #29
0
def workflow_audit_log(c: Composition) -> None:
    c.up("materialized")
    c.wait_for_materialized(service="materialized")

    # Create some audit log entries.
    c.sql("CREATE TABLE t (i INT)")
    c.sql("CREATE DEFAULT INDEX ON t")

    log = c.sql_query("SELECT * FROM mz_audit_events ORDER BY id")

    # Restart mz.
    c.kill("materialized")
    c.up("materialized")
    c.wait_for_materialized()

    # Verify the audit log entries are still present and have not changed.
    restart_log = c.sql_query("SELECT * FROM mz_audit_events ORDER BY id")
    if log != restart_log:
        print("initial audit log:", log)
        print("audit log after restart:", restart_log)
        raise Exception("audit logs not equal after restart")
Beispiel #30
0
def workflow_user_tables(
        c: Composition, args_or_parser: Union[WorkflowArgumentParser,
                                              Namespace]) -> None:
    start_deps(c, args_or_parser)

    seed = round(time.time())

    c.up("materialized")
    c.wait_for_materialized()

    c.run(
        "testdrive",
        f"--seed={seed}",
        f"user-tables/table-persistence-before-{td_test}.td",
    )

    c.kill("materialized")
    c.up("materialized")

    c.kill("materialized")
    c.up("materialized")

    c.run(
        "testdrive",
        f"--seed={seed}",
        f"user-tables/table-persistence-after-{td_test}.td",
    )

    c.kill("materialized")
    c.rm("materialized", "testdrive", destroy_volumes=True)
    c.rm_volumes("mzdata", "pgdata")