Ejemplo n.º 1
0
def test_metrics():
    expected_metrics = [
        "node.data-0-node.fs.total.total_in_bytes",
        "node.data-0-node.jvm.mem.pools.old.peak_used_in_bytes",
        "node.data-0-node.jvm.threads.count",
    ]

    def expected_metrics_exist(emitted_metrics):
        # Elastic metrics are also dynamic and based on the service name# For eg:
        # elasticsearch.test__integration__elastic.node.data-0-node.thread_pool.listener.completed
        # To prevent this from breaking we drop the service name from the metric name
        # => data-0-node.thread_pool.listener.completed
        metric_names = [
            ".".join(metric_name.split(".")[2:])
            for metric_name in emitted_metrics
        ]
        return sdk_metrics.check_metrics_presence(metric_names,
                                                  expected_metrics)

    sdk_metrics.wait_for_service_metrics(
        config.PACKAGE_NAME,
        foldered_name,
        "data-0",
        "data-0-node",
        config.DEFAULT_TIMEOUT,
        expected_metrics_exist,
    )

    sdk_plan.wait_for_completed_deployment(foldered_name)
    sdk_plan.wait_for_completed_recovery(foldered_name)
Ejemplo n.º 2
0
def test_metrics() -> None:
    expected_metrics = [
        "node.data-0-node.fs.total.total_in_bytes",
        "node.data-0-node.jvm.mem.pools.old.peak_used_in_bytes",
        "node.data-0-node.jvm.threads.count",
    ]

    def expected_metrics_exist(emitted_metrics: List[str]) -> bool:
        # Elastic metrics are also dynamic and based on the service name# For eg:
        # elasticsearch.test__integration__elastic.node.data-0-node.thread_pool.listener.completed
        # To prevent this from breaking we drop the service name from the metric name
        # => data-0-node.thread_pool.listener.completed
        metric_names = [".".join(metric_name.split(".")[2:]) for metric_name in emitted_metrics]
        return sdk_metrics.check_metrics_presence(metric_names, expected_metrics)

    sdk_metrics.wait_for_service_metrics(
        package_name,
        service_name,
        "data-0",
        "data-0-node",
        config.DEFAULT_TIMEOUT,
        expected_metrics_exist,
    )

    sdk_plan.wait_for_completed_deployment(service_name)
    sdk_plan.wait_for_completed_recovery(service_name)
Ejemplo n.º 3
0
def test_metrics():
    expected_metrics = [
        "JournalNode.jvm.JvmMetrics.ThreadsRunnable",
        "null.rpc.rpc.RpcQueueTimeNumOps",
        "null.metricssystem.MetricsSystem.PublishAvgTime",
    ]

    def expected_metrics_exist(emitted_metrics):
        # HDFS metric names need sanitation as they're dynamic.
        # For eg: ip-10-0-0-139.null.rpc.rpc.RpcQueueTimeNumOps
        # This is consistent across all HDFS metric names.
        metric_names = set([
            ".".join(metric_name.split(".")[1:])
            for metric_name in emitted_metrics
        ])
        return sdk_metrics.check_metrics_presence(metric_names,
                                                  expected_metrics)

    sdk_metrics.wait_for_service_metrics(
        config.PACKAGE_NAME,
        foldered_name,
        "journal-0",
        "journal-0-node",
        config.DEFAULT_HDFS_TIMEOUT,
        expected_metrics_exist,
    )
Ejemplo n.º 4
0
def test_metrics_for_task_metrics(configure_package):

    def write_metric_to_statsd_counter(metric_name: str, value: int):
        """
        Write a metric with the specified value to statsd.

        This is done by echoing the statsd string through ncat to the statsd host an port.
        """
        metric_echo = 'echo \\"{}:{}|c\\"'.format(metric_name, value)
        ncat_command = 'ncat -w 1 -u \\$STATSD_UDP_HOST \\$STATSD_UDP_PORT'
        pipe = " | "

        bash_command = sdk_cmd.get_bash_command(
            metric_echo + pipe + ncat_command,
            environment=None,
        )
        sdk_cmd.service_task_exec(configure_package["service"]["name"], "hello-0-server", bash_command)

    metric_name = "test.metrics.CamelCaseMetric"
    write_metric_to_statsd_counter(metric_name, 1)

    def expected_metrics_exist(emitted_metrics) -> bool:
        return sdk_metrics.check_metrics_presence(emitted_metrics, [metric_name])

    sdk_metrics.wait_for_service_metrics(
        configure_package["package_name"],
        configure_package["service"]["name"],
        "hello-0",
        "hello-0-server",
        timeout=5 * 60,
        expected_metrics_callback=expected_metrics_exist,
    )
Ejemplo n.º 5
0
def test_metrics_for_task_metrics(configure_package):
    def write_metric_to_statsd_counter(metric_name: str, value: int):
        """
        Write a metric with the specified value to statsd.

        This is done by echoing the statsd string through ncat to the statsd host an port.
        """
        metric_echo = 'echo \\"{}:{}|c\\"'.format(metric_name, value)
        ncat_command = "ncat -w 1 -u \\$STATSD_UDP_HOST \\$STATSD_UDP_PORT"
        pipe = " | "

        bash_command = sdk_cmd.get_bash_command(metric_echo + pipe +
                                                ncat_command,
                                                environment=None)
        sdk_cmd.service_task_exec(configure_package["service"]["name"],
                                  "hello-0-server", bash_command)

    metric_name = "test.metrics.CamelCaseMetric"
    write_metric_to_statsd_counter(metric_name, 1)

    def expected_metrics_exist(emitted_metrics) -> bool:
        return sdk_metrics.check_metrics_presence(emitted_metrics,
                                                  [metric_name])

    sdk_metrics.wait_for_service_metrics(
        configure_package["package_name"],
        configure_package["service"]["name"],
        "hello-0",
        "hello-0-server",
        timeout=5 * 60,
        expected_metrics_callback=expected_metrics_exist,
    )
Ejemplo n.º 6
0
def test_metrics():
    expected_metrics = [
        "kafka.network.RequestMetrics.ResponseQueueTimeMs.max",
        "kafka.socket-server-metrics.io-ratio",
        "kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.p95"
    ]

    def expected_metrics_exist(emitted_metrics):
        return sdk_metrics.check_metrics_presence(emitted_metrics,
                                                  expected_metrics)

    sdk_metrics.wait_for_service_metrics(
        config.PACKAGE_NAME, sdk_utils.get_foldered_name(config.SERVICE_NAME),
        "kafka-0-broker", config.DEFAULT_KAFKA_TIMEOUT, expected_metrics_exist)
Ejemplo n.º 7
0
def test_metrics():
    expected_metrics = [
        "org.apache.cassandra.metrics.Table.CoordinatorReadLatency.system.hints.p999",
        "org.apache.cassandra.metrics.Table.CompressionRatio.system_schema.indexes",
        "org.apache.cassandra.metrics.ThreadPools.ActiveTasks.internal.MemtableReclaimMemory"
    ]

    def expected_metrics_exist(emitted_metrics):
        return sdk_metrics.check_metrics_presence(emitted_metrics,
                                                  expected_metrics)

    sdk_metrics.wait_for_service_metrics(config.PACKAGE_NAME,
                                         config.get_foldered_service_name(),
                                         "node-0-server",
                                         config.DEFAULT_CASSANDRA_TIMEOUT,
                                         expected_metrics_exist)
Ejemplo n.º 8
0
def test_metrics():
    expected_metrics = [
        "kafka.network.RequestMetrics.ResponseQueueTimeMs.max",
        "kafka.socket-server-metrics.io-ratio",
        "kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.p95"
    ]

    def expected_metrics_exist(emitted_metrics):
        return sdk_metrics.check_metrics_presence(emitted_metrics, expected_metrics)

    sdk_metrics.wait_for_service_metrics(
        config.PACKAGE_NAME,
        sdk_utils.get_foldered_name(config.SERVICE_NAME),
        "kafka-0-broker",
        config.DEFAULT_KAFKA_TIMEOUT,
        expected_metrics_exist
    )
Ejemplo n.º 9
0
def test_metrics():
    expected_metrics = [
        "JournalNode.jvm.JvmMetrics.ThreadsRunnable",
        "null.rpc.rpc.RpcQueueTimeNumOps",
        "null.metricssystem.MetricsSystem.PublishAvgTime"
    ]

    def expected_metrics_exist(emitted_metrics):
        # HDFS metric names need sanitation as they're dynamic.
        # For eg: ip-10-0-0-139.null.rpc.rpc.RpcQueueTimeNumOps
        # This is consistent across all HDFS metric names.
        metric_names = set(['.'.join(metric_name.split(".")[1:]) for metric_name in emitted_metrics])
        return sdk_metrics.check_metrics_presence(metric_names, expected_metrics)

    sdk_metrics.wait_for_service_metrics(
        config.PACKAGE_NAME,
        sdk_utils.get_foldered_name(config.SERVICE_NAME),
        "journal-0-node",
        config.DEFAULT_HDFS_TIMEOUT,
        expected_metrics_exist
    )