def test_status_code_exceptions(status_code, expected_exc, log_level, caplog):
    caplog.set_level(logging.INFO)
    HttpClientRecorder.STATUS_CODE = status_code
    settings = finalize_application_settings({
        "license_key": "123LICENSEKEY",
    })
    protocol = AgentProtocol(settings, client_cls=HttpClientRecorder)

    internal_metrics = CustomMetrics()
    with InternalTraceContext(internal_metrics):
        with pytest.raises(expected_exc):
            protocol.send("analytic_event_data")

    internal_metrics = dict(internal_metrics.metrics())
    if status_code == 413:
        assert internal_metrics[
            "Supportability/Python/Collector/MaxPayloadSizeLimit/analytic_event_data"] == [
                1, 0, 0, 0, 0, 0
            ]
    else:
        assert (
            "Supportability/Python/Collector/MaxPayloadSizeLimit/analytic_event_data"
            not in internal_metrics)

    assert len(HttpClientRecorder.SENT) == 1
    request = HttpClientRecorder.SENT[0]
    assert request.params["method"] == "analytic_event_data"

    assert len(caplog.records) == 1
    assert caplog.records[0].levelname == log_level
    message = caplog.records[0].getMessage()
    assert "123LICENSEKEY" not in message
def test_default_cert_path(monkeypatch, system_certs_available):
    if system_certs_available:
        cert_file = "foo"
    else:
        cert_file = None

    class DefaultVerifyPaths(object):
        cafile = cert_file

        def __init__(self, *args, **kwargs):
            pass

    monkeypatch.setattr(ssl, "DefaultVerifyPaths", DefaultVerifyPaths)
    internal_metrics = CustomMetrics()
    with InternalTraceContext(internal_metrics):
        client = HttpClient("localhost", ca_bundle_path=None)

    internal_metrics = dict(internal_metrics.metrics())
    cert_metric = "Supportability/Python/Certificate/BundleRequired"
    if system_certs_available:
        assert "ca_certs" not in client._connection_kwargs
        assert cert_metric not in internal_metrics
    else:
        assert client._connection_kwargs["ca_certs"] == certs.where()
        assert internal_metrics[cert_metric][-3:-1] == [1, 1]
def test_audit_logging(server, insecure_server, client_cls, proxy_host,
                       exception):
    audit_log_fp = StringIO()
    params = {"method": "metric_data"}
    prefix = getattr(client_cls, "PREFIX_SCHEME", "https://")
    if exception:
        port = MockExternalHTTPServer.get_open_port()
    elif prefix == "https://":
        port = server.port
    else:
        port = insecure_server.port

    internal_metrics = CustomMetrics()

    with client_cls(
            "localhost",
            port,
            proxy_scheme="https",
            proxy_host=proxy_host,
            proxy_port=server.port if not exception else port,
            audit_log_fp=audit_log_fp,
            disable_certificate_validation=True,
    ) as client:
        with InternalTraceContext(internal_metrics):
            try:
                client.send_request(params=params)
                exc = ""
            except Exception as e:
                exc = callable_name(type(e.args[0]))

    internal_metrics = dict(internal_metrics.metrics())
    if exception and client_cls is ApplicationModeClient:
        if proxy_host:
            connection = "https-proxy"
        else:
            connection = "direct"
        assert internal_metrics == {
            "Supportability/Python/Collector/Failures": [1, 0, 0, 0, 0, 0],
            "Supportability/Python/Collector/Failures/%s" % connection:
            [1, 0, 0, 0, 0, 0],
            "Supportability/Python/Collector/Exception/%s" % exc:
            [1, 0, 0, 0, 0, 0],
        }
    else:
        assert not internal_metrics

    # Verify the audit log isn't empty
    assert audit_log_fp.tell()

    audit_log_fp.seek(0)
    audit_log_contents = audit_log_fp.read()
    assert prefix in audit_log_contents
def test_non_ok_response(client_cls, server):
    internal_metrics = CustomMetrics()

    with client_cls(
        "localhost", server.port, disable_certificate_validation=True
    ) as client:
        with InternalTraceContext(internal_metrics):
            status, _ = client.send_request(method="PUT")

    assert status != 200
    internal_metrics = dict(internal_metrics.metrics())

    if client_cls is ApplicationModeClient:
        assert internal_metrics == {
            "Supportability/Python/Collector/Failures": [1, 0, 0, 0, 0, 0],
            "Supportability/Python/Collector/Failures/direct": [1, 0, 0, 0, 0, 0],
            "Supportability/Python/Collector/HTTPError/%d" % status: [1, 0, 0, 0, 0, 0],
        }
    else:
        assert not internal_metrics
Пример #5
0
class _GCDataSource(object):
    def __init__(self, settings, environ):
        self.gc_time_metrics = CustomMetrics()
        self.start_time = 0.0
        self.previous_stats = {}
        self.pid = os.getpid()

    @property
    def enabled(self):
        if platform.python_implementation() == "PyPy":
            return False
        else:
            settings = global_settings()
            return settings.gc_runtime_metrics.enabled

    @property
    def top_object_count_limit(self):
        settings = global_settings()
        return settings.gc_runtime_metrics.top_object_count_limit

    def record_gc(self, phase, info):
        if not self.enabled:
            return

        current_generation = info["generation"]

        if phase == "start":
            self.start_time = time.time()
        elif phase == "stop":
            total_time = time.time() - self.start_time
            self.gc_time_metrics.record_custom_metric(
                "GC/time/%d/all" % self.pid, total_time)
            for gen in range(0, 3):
                if gen <= current_generation:
                    self.gc_time_metrics.record_custom_metric(
                        "GC/time/%d/%d" % (self.pid, gen), total_time)
                else:
                    self.gc_time_metrics.record_custom_metric(
                        "GC/time/%d/%d" % (self.pid, gen), 0)

    def start(self):
        if hasattr(gc, "callbacks"):
            gc.callbacks.append(self.record_gc)

    def stop(self):
        # The callback must be removed before resetting the metrics tables.
        # If it isn't, it's possible to be interrupted by the gc and to have more
        # metrics appear in the table that should be empty.
        if hasattr(gc, "callbacks") and self.record_gc in gc.callbacks:
            gc.callbacks.remove(self.record_gc)

        self.gc_time_metrics.reset_metric_stats()
        self.start_time = 0.0

    def __call__(self):
        if not self.enabled:
            return

        # Record object count in total and per generation
        if hasattr(gc, "get_count"):
            counts = gc.get_count()
            yield ("GC/objects/%d/all" % self.pid, {"count": sum(counts)})
            for gen, count in enumerate(counts):
                yield (
                    "GC/objects/%d/generation/%d" % (self.pid, gen),
                    {
                        "count": count
                    },
                )

        # Record object count for top five types with highest count
        if hasattr(gc, "get_objects"):
            object_types = map(type, gc.get_objects())
            if self.top_object_count_limit > 0:
                highest_types = Counter(object_types).most_common(
                    self.top_object_count_limit)
                for obj_type, count in highest_types:
                    yield (
                        "GC/objects/%d/type/%s" %
                        (self.pid, callable_name(obj_type)),
                        {
                            "count": count
                        },
                    )

        if hasattr(gc, "get_stats"):
            stats_by_gen = gc.get_stats()
            if isinstance(stats_by_gen, list):
                for stat_name in stats_by_gen[0].keys():
                    # Aggregate metrics for /all
                    count = sum(stats[stat_name] for stats in stats_by_gen)
                    previous_value = self.previous_stats.get(
                        (stat_name, "all"), 0)
                    self.previous_stats[(stat_name, "all")] = count
                    change_in_value = count - previous_value
                    yield (
                        "GC/%s/%d/all" % (stat_name, self.pid),
                        {
                            "count": change_in_value
                        },
                    )

                    # Breakdowns by generation
                    for gen, stats in enumerate(stats_by_gen):
                        previous_value = self.previous_stats.get(
                            (stat_name, gen), 0)
                        self.previous_stats[(stat_name,
                                             gen)] = stats[stat_name]
                        change_in_value = stats[stat_name] - previous_value

                        yield (
                            "GC/%s/%d/%d" % (stat_name, self.pid, gen),
                            {
                                "count": change_in_value
                            },
                        )

        # In order to avoid a concurrency issue with getting interrupted by the
        # garbage collector, we save a reference to the old metrics table, and overwrite
        # self.gc_time_metrics with a new empty table via reset_metric_stats().
        # This guards against losing data points, or having inconsistent data points
        # reported between /all and the totals of /generation/%d metrics.
        gc_time_metrics = self.gc_time_metrics.metrics()
        self.gc_time_metrics.reset_metric_stats()

        for metric in gc_time_metrics:
            raw_metric = metric[1]
            yield metric[0], {
                "count": raw_metric.call_count,
                "total": raw_metric.total_call_time,
                "min": raw_metric.min_call_time,
                "max": raw_metric.max_call_time,
                "sum_of_squares": raw_metric.sum_of_squares,
            }
def test_http_payload_compression(server, client_cls, method, threshold):
    payload = b"*" * 20

    internal_metrics = CustomMetrics()

    with client_cls(
        "localhost",
        server.port,
        disable_certificate_validation=True,
        compression_method=method,
        compression_threshold=threshold,
    ) as client:
        with InternalTraceContext(internal_metrics):
            status, data = client.send_request(
                payload=payload, params={"method": "test"}
            )

    assert status == 200
    data = data.split(b"\n")
    sent_payload = data[-1]
    payload_byte_len = len(sent_payload)

    internal_metrics = dict(internal_metrics.metrics())
    if client_cls is ApplicationModeClient:
        assert internal_metrics["Supportability/Python/Collector/Output/Bytes/test"][
            :2
        ] == [1, payload_byte_len,]

        if threshold < 20:
            # Verify compression time is recorded
            assert (
                internal_metrics["Supportability/Python/Collector/ZLIB/Compress/test"][
                    0
                ]
                == 1
            )
            assert (
                internal_metrics["Supportability/Python/Collector/ZLIB/Compress/test"][
                    1
                ]
                > 0
            )

            # Verify the original payload length is recorded
            assert internal_metrics["Supportability/Python/Collector/ZLIB/Bytes/test"][
                :2
            ] == [1, len(payload)]

            assert len(internal_metrics) == 3
        else:
            # Verify no ZLIB compression metrics were sent
            assert len(internal_metrics) == 1
    else:
        assert not internal_metrics

    if threshold < 20:
        expected_content_encoding = method.encode("utf-8")
        assert sent_payload != payload
        if method == "deflate":
            sent_payload = zlib.decompress(sent_payload)
        elif method == "gzip":
            decompressor = zlib.decompressobj(31)
            sent_payload = decompressor.decompress(sent_payload)
            sent_payload += decompressor.flush()
    else:
        expected_content_encoding = b"Identity"

    for header in data[1:-1]:
        if header.lower().startswith(b"content-encoding"):
            _, content_encoding = header.split(b":", 1)
            content_encoding = content_encoding.strip()
            break
    else:
        assert False, "Missing content-encoding header"

    assert content_encoding == expected_content_encoding
    assert sent_payload == payload