示例#1
0
def workflow_default(c: Composition, parser: WorkflowArgumentParser) -> None:
    parser.add_argument(
        "--num-seconds",
        type=int,
        default=100,
        help="number of seconds to write records to Kafka",
    )
    parser.add_argument(
        "--records-per-second",
        type=int,
        default=10000,
        help="throughput of writes to maintain during testing",
    )
    parser.add_argument("--num-keys",
                        type=int,
                        default=1000000000,
                        help="number of distinct keys")
    parser.add_argument("--value-bytes",
                        type=int,
                        default=500,
                        help="record payload size in bytes")
    parser.add_argument("--timeout-secs",
                        type=int,
                        default=120,
                        help="timeout to send records to Kafka")
    parser.add_argument(
        "--blob-url",
        type=str,
        default=None,
        help="location where we store persistent data",
    )
    parser.add_argument(
        "--consensus-url",
        type=str,
        default=None,
        help="location where we store persistent data",
    )
    args = parser.parse_args()

    options = [
        "--persist-consensus-url",
        f"{args.consensus_url}",
        "--persist-blob-url",
        f"{args.blob_url}",
    ]

    override = [Materialized(options=options)]

    with c.override(*override):
        c.start_and_wait_for_tcp(services=prerequisites)

        c.up("materialized")
        c.wait_for_materialized("materialized")

        c.run(
            "testdrive",
            "setup.td",
        )

        start = time.monotonic()
        records_sent = 0
        total_records_to_send = args.records_per_second * args.num_seconds
        # Maximum observed delta between records sent by the benchmark and ingested by
        # Materialize.
        max_lag = 0
        last_reported_time = 0.0

        while True:
            elapsed = time.monotonic() - start
            records_ingested = query_materialize(c)

            lag = records_sent - records_ingested

            if lag > max_lag:
                max_lag = lag

            # Report our findings back once per second.
            if elapsed - last_reported_time > 1:
                print(
                    f"C> after {elapsed:.3f}s sent {records_sent} records, and ingested {records_ingested}. max observed lag {max_lag} records, most recent lag {lag} records"
                )
                last_reported_time = elapsed

            # Determine how many records we are scheduled to send, based on how long
            # the benchmark has been running and the desired QPS.
            records_scheduled = int(
                min(elapsed, args.num_seconds) * args.records_per_second)
            records_to_send = records_scheduled - records_sent

            if records_to_send > 0:
                send_records(
                    c,
                    num_records=records_to_send,
                    num_keys=args.num_keys,
                    value_bytes=args.value_bytes,
                    timeout_secs=args.timeout_secs,
                )
                records_sent = records_scheduled

            # Exit once we've sent all the records we need to send, and confirmed that
            # Materialize has ingested them.
            if records_sent == total_records_to_send == records_ingested:
                print(
                    f"C> Finished after {elapsed:.3f}s sent and ingested {records_sent} records. max observed lag {max_lag} records."
                )
                break
示例#2
0
def workflow_default(c: Composition, parser: WorkflowArgumentParser) -> None:
    parser.add_argument(
        "--num-seconds",
        type=int,
        default=100,
        help="number of seconds to write records to Kafka",
    )
    parser.add_argument(
        "--records-per-second",
        type=int,
        default=10000,
        help="throughput of writes to maintain during testing",
    )
    parser.add_argument("--num-keys",
                        type=int,
                        default=1000000000,
                        help="number of distinct keys")
    parser.add_argument("--value-bytes",
                        type=int,
                        default=500,
                        help="record payload size in bytes")
    parser.add_argument(
        "--upsert",
        action="store_true",
        help="whether to use envelope UPSERT (True) or NONE (False)",
    )
    parser.add_argument("--timeout-secs",
                        type=int,
                        default=120,
                        help="timeout to send records to Kafka")
    parser.add_argument(
        "--enable-persistence",
        action="store_true",
        help="whether or not to enable persistence on materialized",
    )
    parser.add_argument(
        "--s3-storage",
        type=str,
        default=None,
        help=
        "enables s3 persist storage, pointed at the given subpath of our internal testing bucket",
    )
    parser.add_argument(
        "--workers",
        type=int,
        default=None,
        help="number of dataflow workers to use in materialized",
    )
    args = parser.parse_args()

    envelope = "NONE"
    if args.upsert:
        envelope = "UPSERT"

    options = []
    if args.enable_persistence:
        options = [
            "--persistent-user-tables",
            "--persistent-kafka-sources",
            "--disable-persistent-system-tables-test",
        ]

    if args.s3_storage == "":
        print("--s3-storage value must be non-empty", file=sys.stderr)
        sys.exit(1)
    elif args.s3_storage:
        if args.enable_persistence is not True:
            print(
                "cannot specifiy --s3-storage without --enable-persistence",
                file=sys.stderr,
            )
            sys.exit(1)
        options.extend([
            "--persist-storage-enabled",
            f"--persist-storage=s3://mtlz-test-persist-1d-lifecycle-delete/{args.s3_storage}",
        ])

    override = [
        Materialized(
            workers=args.workers,
            timestamp_frequency="1s",
            options=options,
        )
    ]

    with c.override(*override):
        c.start_and_wait_for_tcp(services=prerequisites)

        c.up("materialized")
        c.wait_for_materialized("materialized")

        c.run(
            "testdrive-svc",
            f"--var=envelope={envelope}",
            "setup.td",
        )

        start = time.monotonic()
        records_sent = 0
        total_records_to_send = args.records_per_second * args.num_seconds
        # Maximum observed delta between records sent by the benchmark and ingested by
        # Materialize.
        max_lag = 0
        last_reported_time = 0.0

        while True:
            elapsed = time.monotonic() - start
            records_ingested = query_materialize(c)

            lag = records_sent - records_ingested

            if lag > max_lag:
                max_lag = lag

            # Report our findings back once per second.
            if elapsed - last_reported_time > 1:
                print(
                    f"C> after {elapsed:.3f}s sent {records_sent} records, and ingested {records_ingested}. max observed lag {max_lag} records, most recent lag {lag} records"
                )
                last_reported_time = elapsed

            # Determine how many records we are scheduled to send, based on how long
            # the benchmark has been running and the desired QPS.
            records_scheduled = int(
                min(elapsed, args.num_seconds) * args.records_per_second)
            records_to_send = records_scheduled - records_sent

            if records_to_send > 0:
                send_records(
                    c,
                    num_records=records_to_send,
                    num_keys=args.num_keys,
                    value_bytes=args.value_bytes,
                    timeout_secs=args.timeout_secs,
                )
                records_sent = records_scheduled

            # Exit once we've sent all the records we need to send, and confirmed that
            # Materialize has ingested them.
            if records_sent == total_records_to_send == records_ingested:
                print(
                    f"C> Finished after {elapsed:.3f}s sent and ingested {records_sent} records. max observed lag {max_lag} records."
                )
                break
示例#3
0
def workflow_mzcloud(c: Composition, parser: WorkflowArgumentParser) -> None:
    # Make sure Kafka is externally accessible on a predictable port.
    assert (
        c.preserve_ports
    ), "`--preserve-ports` must be specified (BEFORE the `run` command)"

    parser.add_argument(
        "--mzcloud-url",
        type=str,
        help=
        "The postgres connection url to the mzcloud deployment to benchmark.",
    )

    parser.add_argument(
        "--external-addr",
        type=str,
        help=
        "Kafka and Schema Registry are started by mzcompose and exposed on the public interface. This is the IP address or hostname that is accessible by the mzcloud instance, usually the public IP of your machine.",
    )

    parser.add_argument(
        "--root-scenario",
        "--scenario",
        metavar="SCENARIO",
        type=str,
        default="Scenario",
        help=
        "Scenario or scenario family to benchmark. See scenarios.py for available scenarios.",
    )

    parser.add_argument(
        "--scale",
        metavar="+N | -N | N",
        type=str,
        default=None,
        help="Absolute or relative scale to apply.",
    )

    parser.add_argument(
        "--max-measurements",
        metavar="N",
        type=int,
        default=99,
        help="Limit the number of measurements to N.",
    )

    parser.add_argument(
        "--max-retries",
        metavar="N",
        type=int,
        default=2,
        help="Retry any potential performance regressions up to N times.",
    )

    parser.add_argument(
        "--test-filter",
        type=str,
        help="Filter scenario names by this string (case insensitive).",
    )

    args = parser.parse_args()

    assert args.mzcloud_url
    assert args.external_addr

    print(f"""
mzcloud url: {args.mzcloud_url}
external addr: {args.external_addr}

root_scenario: {args.root_scenario}""")

    overrides = [
        KafkaService(extra_environment=[
            f"KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://{args.external_addr}:9092"
        ]),
    ]

    with c.override(*overrides):
        c.start_and_wait_for_tcp(
            services=["zookeeper", "kafka", "schema-registry"])
        c.up("testdrive", persistent=True)

        # Build the list of scenarios to run
        root_scenario = globals()[args.root_scenario]
        initial_scenarios = {}

        if root_scenario.__subclasses__():
            for scenario in root_scenario.__subclasses__():
                has_children = False
                for s in scenario.__subclasses__():
                    has_children = True
                    initial_scenarios[s] = 1

                if not has_children:
                    initial_scenarios[scenario] = 1
        else:
            initial_scenarios[root_scenario] = 1

        scenarios = initial_scenarios.copy()

        for cycle in range(0, args.max_retries):
            print(
                f"Cycle {cycle+1} with scenarios: {', '.join([scenario.__name__ for scenario in scenarios.keys()])}"
            )

            report = SingleReport()

            for scenario in list(scenarios.keys()):
                name = scenario.__name__
                if args.test_filter and args.test_filter.lower(
                ) not in name.lower():
                    continue
                print(f"--- Now benchmarking {name} ...")
                comparator = SuccessComparator(name, threshold=0)
                common_seed = round(time.time())
                executor = MzCloud(
                    composition=c,
                    mzcloud_url=args.mzcloud_url,
                    seed=common_seed,
                    external_addr=args.external_addr,
                )
                executor.Reset()
                mz_id = 0

                benchmark = Benchmark(
                    mz_id=mz_id,
                    scenario=scenario,
                    scale=args.scale,
                    executor=executor,
                    filter=make_filter(args),
                    termination_conditions=make_termination_conditions(args),
                    aggregation=make_aggregation(),
                )

                outcome, iterations = benchmark.run()
                comparator.append(outcome)
                report.append(comparator)

                print(f"+++ Benchmark Report for cycle {cycle+1}:")
                report.dump()
示例#4
0
def workflow_default(c: Composition, parser: WorkflowArgumentParser) -> None:
    """Feature benchmark framework."""

    c.silent = True

    parser.add_argument(
        "--redpanda",
        action="store_true",
        help="run against Redpanda instead of the Confluent Platform",
    )

    parser.add_argument(
        "--this-tag",
        metavar="TAG",
        type=str,
        default=os.getenv("THIS_TAG", None),
        help=
        "'This' Materialize container tag to benchmark. If not provided, the current source will be used.",
    )

    parser.add_argument(
        "--this-options",
        metavar="OPTIONS",
        type=str,
        default=os.getenv("THIS_OPTIONS", None),
        help="Options to pass to the 'This' instance.",
    )

    parser.add_argument(
        "--other-tag",
        metavar="TAG",
        type=str,
        default=os.getenv("OTHER_TAG", None),
        help=
        "'Other' Materialize container tag to benchmark. If not provided, the current source will be used.",
    )

    parser.add_argument(
        "--other-options",
        metavar="OPTIONS",
        type=str,
        default=os.getenv("OTHER_OPTIONS", None),
        help="Options to pass to the 'Other' instance.",
    )

    parser.add_argument(
        "--root-scenario",
        "--scenario",
        metavar="SCENARIO",
        type=str,
        default="Scenario",
        help=
        "Scenario or scenario family to benchmark. See scenarios.py for available scenarios.",
    )

    parser.add_argument(
        "--scale",
        metavar="+N | -N | N",
        type=str,
        default=None,
        help="Absolute or relative scale to apply.",
    )

    parser.add_argument(
        "--max-measurements",
        metavar="N",
        type=int,
        default=99,
        help="Limit the number of measurements to N.",
    )

    parser.add_argument(
        "--max-retries",
        metavar="N",
        type=int,
        default=3,
        help="Retry any potential performance regressions up to N times.",
    )

    parser.add_argument(
        "--this-nodes",
        metavar="N",
        type=int,
        default=None,
        help="Start a cluster with that many nodes for 'THIS'",
    )

    parser.add_argument(
        "--other-nodes",
        metavar="N",
        type=int,
        default=None,
        help="Start a cluster with that many nodes for 'OTHER'",
    )

    parser.add_argument(
        "--this-workers",
        metavar="N",
        type=int,
        default=None,
        help="Number of workers to use for 'THIS'",
    )

    parser.add_argument(
        "--other-workers",
        metavar="N",
        type=int,
        default=None,
        help="Number of workers to use for 'OTHER'",
    )

    args = parser.parse_args()

    print(f"""
this_tag: {args.this_tag}
this_options: {args.this_options}

other_tag: {args.other_tag}
other_options: {args.other_options}

root_scenario: {args.root_scenario}""")

    # Build the list of scenarios to run
    root_scenario = globals()[args.root_scenario]
    initial_scenarios = {}

    if root_scenario.__subclasses__():
        for scenario in root_scenario.__subclasses__():
            has_children = False
            for s in scenario.__subclasses__():
                has_children = True
                initial_scenarios[s] = 1

            if not has_children:
                initial_scenarios[scenario] = 1
    else:
        initial_scenarios[root_scenario] = 1

    dependencies = ["postgres"]

    if args.redpanda:
        dependencies += ["redpanda"]
    else:
        dependencies += ["zookeeper", "kafka", "schema-registry"]

    c.start_and_wait_for_tcp(services=dependencies)

    scenarios = initial_scenarios.copy()

    for cycle in range(0, args.max_retries):
        print(
            f"Cycle {cycle+1} with scenarios: {', '.join([scenario.__name__ for scenario in scenarios.keys()])}"
        )

        report = Report()

        for scenario in list(scenarios.keys()):
            comparison = run_one_scenario(c, scenario, args)
            report.append(comparison)

            if not comparison.is_regression():
                del scenarios[scenario]

            print(f"+++ Benchmark Report for cycle {cycle+1}:")
            report.dump()

        if len(scenarios.keys()) == 0:
            break

    if len(scenarios.keys()) > 0:
        print(
            f"ERROR: The following scenarios have regressions: {', '.join([scenario.__name__ for scenario in scenarios.keys()])}"
        )
        sys.exit(1)
示例#5
0
def workflow_default(c: Composition, parser: WorkflowArgumentParser) -> None:
    """Run testdrive."""
    parser.add_argument(
        "--redpanda",
        action="store_true",
        help="run against Redpanda instead of the Confluent Platform",
    )
    parser.add_argument(
        "--aws-region",
        help="run against the specified AWS region instead of localstack",
    )
    parser.add_argument(
        "--workers",
        type=int,
        metavar="N",
        help="set the number of materialized dataflow workers",
    )
    parser.add_argument(
        "--kafka-default-partitions",
        type=int,
        metavar="N",
        help="set the default number of kafka partitions per topic",
    )
    parser.add_argument(
        "--persistent-user-tables",
        action="store_true",
        help="enable the --persistent-user-tables materialized option",
    )
    parser.add_argument(
        "files",
        nargs="*",
        default=["*.td", "esoteric/*.td"],
        help="run against the specified files",
    )
    args = parser.parse_args()

    if not args.redpanda and Arch.host() == Arch.AARCH64:
        ui.warn(
            "Running the Confluent Platform in Docker on ARM-based machines is "
            "nearly unusably slow. Consider using Redpanda instead (--redpanda) "
            "or running tests without mzcompose."
        )

    dependencies = ["materialized"]
    if args.redpanda:
        dependencies += ["redpanda"]
    else:
        dependencies += ["zookeeper", "kafka", "schema-registry"]

    materialized = Materialized(
        workers=args.workers,
        options=["--persistent-user-tables"] if args.persistent_user_tables else [],
    )

    testdrive = Testdrive(
        forward_buildkite_shard=True,
        kafka_default_partitions=args.kafka_default_partitions,
        entrypoint_extra=[f"--aws-region={args.aws_region}"]
        if args.aws_region
        else ["--aws-endpoint=http://localstack:4566"],
    )

    with c.override(materialized, testdrive):
        c.start_and_wait_for_tcp(services=dependencies)
        c.wait_for_materialized("materialized")
        try:
            junit_report = ci_util.junit_report_filename(c.name)
            c.run("testdrive-svc", f"--junit-report={junit_report}", *args.files)
        finally:
            ci_util.upload_junit_report(
                "testdrive", Path(__file__).parent / junit_report
            )
示例#6
0
def workflow_default(c: Composition, parser: WorkflowArgumentParser) -> None:
    """Feature benchmark framework."""

    c.silent = True

    parser.add_argument(
        "--this-tag",
        metavar="TAG",
        type=str,
        default=os.getenv("THIS_TAG", None),
        help="'This' Materialize container tag to benchmark. If not provided, the current source will be used.",
    )

    parser.add_argument(
        "--this-options",
        metavar="OPTIONS",
        type=str,
        default=os.getenv("THIS_OPTIONS", None),
        help="Options to pass to the 'This' instance.",
    )

    parser.add_argument(
        "--other-tag",
        metavar="TAG",
        type=str,
        default=os.getenv("OTHER_TAG", None),
        help="'Other' Materialize container tag to benchmark. If not provided, the current source will be used.",
    )

    parser.add_argument(
        "--other-options",
        metavar="OPTIONS",
        type=str,
        default=os.getenv("OTHER_OPTIONS", None),
        help="Options to pass to the 'Other' instance.",
    )

    parser.add_argument(
        "--root-scenario",
        "--scenario",
        metavar="SCENARIO",
        type=str,
        default="Scenario",
        help="Scenario or scenario family to benchmark. See scenarios.py for available scenarios.",
    )

    parser.add_argument(
        "--scale",
        metavar="+N | -N | N",
        type=str,
        default=None,
        help="Absolute or relative scale to apply.",
    )

    parser.add_argument(
        "--max-runs",
        metavar="N",
        type=int,
        default=99,
        help="Limit the number of executions to N.",
    )

    args = parser.parse_args()

    print(
        f"""
this_tag: {args.this_tag}
this_options: {args.this_options}

other_tag: {args.other_tag}
other_options: {args.other_options}

root_scenario: {args.root_scenario}"""
    )

    # Build the list of scenarios to run
    root_scenario = globals()[args.root_scenario]
    scenarios = []

    if root_scenario.__subclasses__():
        for scenario in root_scenario.__subclasses__():
            has_children = False
            for s in scenario.__subclasses__():
                has_children = True
                scenarios.append(s)

            if not has_children:
                scenarios.append(scenario)
    else:
        scenarios.append(root_scenario)

    print(f"scenarios: {', '.join([scenario.__name__ for scenario in scenarios])}")

    c.start_and_wait_for_tcp(
        services=["zookeeper", "kafka", "schema-registry", "postgres"]
    )

    report = Report()
    has_regressions = False

    for scenario in scenarios:
        comparison = run_one_scenario(c, scenario, args)
        report.append(comparison)

        if comparison.is_regression():
            has_regressions = True
        report.dump()

    print("+++ Benchmark Report")
    report.dump()

    if has_regressions:
        print("ERROR: benchmarks have regressions")
        sys.exit(1)