コード例 #1
0
def test_mem_free_from_memfree_when_memavailable_not_mentioned(
        elasticapm_client, tmpdir):
    proc_stat_self = os.path.join(tmpdir.strpath, "self-stat")
    proc_stat = os.path.join(tmpdir.strpath, "stat")
    proc_meminfo = os.path.join(tmpdir.strpath, "meminfo")

    for path, content in (
        (proc_stat, TEMPLATE_PROC_STAT_DEBIAN.format(user=0, idle=0)),
        (proc_stat_self, TEMPLATE_PROC_STAT_SELF.format(utime=0, stime=0)),
        (proc_meminfo, TEMPLATE_PROC_MEMINFO_NO_MEMAVAILABLE),
    ):
        with open(path, mode="w") as f:
            f.write(content)
    metricset = CPUMetricSet(
        MetricsRegistry(elasticapm_client),
        sys_stats_file=proc_stat,
        process_stats_file=proc_stat_self,
        memory_stats_file=proc_meminfo,
    )

    for path, content in (
        (proc_stat, TEMPLATE_PROC_STAT_DEBIAN.format(user=400000,
                                                     idle=600000)),
        (proc_stat_self,
         TEMPLATE_PROC_STAT_SELF.format(utime=100000, stime=100000)),
        (proc_meminfo, TEMPLATE_PROC_MEMINFO_NO_MEMAVAILABLE),
    ):
        with open(path, mode="w") as f:
            f.write(content)
    data = next(metricset.collect())

    mem_free_expected = sum([359184, 891296, 6416340
                             ]) * 1024  # MemFree + Buffers + Cached, in bytes
    assert data["samples"]["system.memory.actual.free"][
        "value"] == mem_free_expected
コード例 #2
0
def test_cpu_usage_when_cpu_total_is_zero(elasticapm_client, tmpdir):
    proc_stat_self = os.path.join(tmpdir.strpath, "self-stat")
    proc_stat = os.path.join(tmpdir.strpath, "stat")
    proc_meminfo = os.path.join(tmpdir.strpath, "meminfo")

    for path, content in (
        (proc_stat, TEMPLATE_PROC_STAT_DEBIAN.format(user=0, idle=0)),
        (proc_stat_self, TEMPLATE_PROC_STAT_SELF.format(utime=0, stime=0)),
        (proc_meminfo, TEMPLATE_PROC_MEMINFO_NO_MEMAVAILABLE),
    ):
        with open(path, mode="w") as f:
            f.write(content)
    metricset = CPUMetricSet(
        MetricsRegistry(elasticapm_client),
        sys_stats_file=proc_stat,
        process_stats_file=proc_stat_self,
        memory_stats_file=proc_meminfo,
    )
    data = next(metricset.collect())

    cpu_total_expected = 0
    assert data["samples"]["system.cpu.total.norm.pct"][
        "value"] == cpu_total_expected
    assert data["samples"]["system.process.cpu.total.norm.pct"][
        "value"] == cpu_total_expected
コード例 #3
0
def test_metrics_not_collected_if_zero_and_reset():
    m = MetricsSet(MetricsRegistry(0, lambda x: None))
    counter = m.counter("counter", reset_on_collect=False)
    resetting_counter = m.counter("resetting_counter", reset_on_collect=True)
    gauge = m.gauge("gauge", reset_on_collect=False)
    resetting_gauge = m.gauge("resetting_gauge", reset_on_collect=True)
    timer = m.timer("timer", reset_on_collect=False)
    resetting_timer = m.timer("resetting_timer", reset_on_collect=True)

    counter.inc(), resetting_counter.inc()
    gauge.val = 5
    resetting_gauge.val = 5
    timer.update(1, 1)
    resetting_timer.update(1, 1)

    data = list(m.collect())
    more_data = list(m.collect())
    assert set(data[0]["samples"].keys()) == {
        "counter",
        "resetting_counter",
        "gauge",
        "resetting_gauge",
        "timer.count",
        "timer.sum.us",
        "resetting_timer.count",
        "resetting_timer.sum.us",
    }
    assert set(more_data[0]["samples"].keys()) == {"counter", "gauge", "timer.count", "timer.sum.us"}
コード例 #4
0
ファイル: cpu_linux_tests.py プロジェクト: kelovac/fastapi
def test_cpu_mem_from_proc(elasticapm_client, proc_stat_template, tmpdir):
    proc_stat_self = os.path.join(tmpdir.strpath, "self-stat")
    proc_stat = os.path.join(tmpdir.strpath, "stat")
    proc_meminfo = os.path.join(tmpdir.strpath, "meminfo")

    for path, content in (
        (proc_stat, proc_stat_template.format(user=0, idle=0)),
        (proc_stat_self, TEMPLATE_PROC_STAT_SELF.format(utime=0, stime=0)),
        (proc_meminfo, TEMPLATE_PROC_MEMINFO),
    ):
        with open(path, mode="w") as f:
            f.write(content)
    metricset = CPUMetricSet(
        MetricsRegistry(elasticapm_client),
        sys_stats_file=proc_stat,
        process_stats_file=proc_stat_self,
        memory_stats_file=proc_meminfo,
    )

    for path, content in (
        (proc_stat, proc_stat_template.format(user=400000, idle=600000)),
        (proc_stat_self, TEMPLATE_PROC_STAT_SELF.format(utime=100000, stime=100000)),
        (proc_meminfo, TEMPLATE_PROC_MEMINFO),
    ):
        with open(path, mode="w") as f:
            f.write(content)
    data = next(metricset.collect())
    assert data["samples"]["system.cpu.total.norm.pct"]["value"] == 0.4
    assert data["samples"]["system.process.cpu.total.norm.pct"]["value"] == 0.2

    assert data["samples"]["system.memory.total"]["value"] == 16552841216
    assert data["samples"]["system.memory.actual.free"]["value"] == 6670774272

    assert data["samples"]["system.process.memory.rss.bytes"]["value"] == 47738880
    assert data["samples"]["system.process.memory.size"]["value"] == 3686981632
コード例 #5
0
def test_summary(elasticapm_client, prometheus):
    metricset = PrometheusMetrics(MetricsRegistry(elasticapm_client))
    summary = prometheus_client.Summary("a_bare_summary", "Bare summary")
    summary_with_labels = prometheus_client.Summary("summary_with_labels",
                                                    "Summary with labels",
                                                    ["alabel", "anotherlabel"])
    summary.observe(5)
    summary.observe(7)
    summary.observe(9)
    summary_with_labels.labels(alabel="foo", anotherlabel="baz").observe(7)
    summary_with_labels.labels(alabel="bar",
                               anotherlabel="bazzinga").observe(11)
    summary_with_labels.labels(alabel="foo", anotherlabel="baz").observe(2)
    data = list(metricset.collect())

    assert len(data) == 3
    assert data[0]["samples"]["prometheus.metrics.a_bare_summary.count"][
        "value"] == 3.0
    assert data[0]["samples"]["prometheus.metrics.a_bare_summary.sum"][
        "value"] == 21
    assert data[1]["samples"]["prometheus.metrics.summary_with_labels.count"][
        "value"] == 2.0
    assert data[1]["samples"]["prometheus.metrics.summary_with_labels.sum"][
        "value"] == 9.0
    assert data[1]["tags"] == {"alabel": "foo", "anotherlabel": "baz"}
    assert data[2]["samples"]["prometheus.metrics.summary_with_labels.count"][
        "value"] == 1.0
    assert data[2]["samples"]["prometheus.metrics.summary_with_labels.sum"][
        "value"] == 11.0
    assert data[2]["tags"] == {"alabel": "bar", "anotherlabel": "bazzinga"}
コード例 #6
0
def test_metrics_registry():
    mock_queue = mock.Mock()
    registry = MetricsRegistry(0.001, queue_func=mock_queue)
    registry.register("tests.metrics.base_tests.DummyMetricSet")
    try:
        registry.start_thread()
        time.sleep(0.1)
        assert mock_queue.call_count > 0
    finally:
        registry.stop_thread()
コード例 #7
0
def test_compare_psutil_linux_metricsets(elasticapm_client):
    psutil_metricset = cpu_psutil.CPUMetricSet(
        MetricsRegistry(elasticapm_client))
    linux_metricset = cpu_linux.CPUMetricSet(
        MetricsRegistry(elasticapm_client))
    # do something that generates some CPU load
    for i in compat.irange(10**6):
        j = i * i
    psutil_data = next(psutil_metricset.collect())
    linux_data = next(linux_metricset.collect())

    assert (abs(psutil_data["samples"]["system.cpu.total.norm.pct"]["value"] -
                linux_data["samples"]["system.cpu.total.norm.pct"]["value"]) <
            0.02)
    assert (abs(
        psutil_data["samples"]["system.process.cpu.total.norm.pct"]["value"] -
        linux_data["samples"]["system.process.cpu.total.norm.pct"]["value"]) <
            0.02)
コード例 #8
0
def test_cpu_mem_from_proc(elasticapm_client, proc_stat_template, tmpdir):
    proc_stat_self = os.path.join(tmpdir.strpath, "self-stat")
    proc_stat = os.path.join(tmpdir.strpath, "stat")
    proc_meminfo = os.path.join(tmpdir.strpath, "meminfo")
    cgroup_memory_limit = os.path.join(tmpdir.strpath, "memory",
                                       "memory.limit_in_bytes")
    cgroup_memory_usage = os.path.join(tmpdir.strpath, "memory",
                                       "memory.usage_in_bytes")
    proc_self_cgroup = os.path.join(tmpdir.strpath, "cgroup")
    os.mkdir(os.path.join(tmpdir.strpath, "memory"))
    proc_self_mount = os.path.join(tmpdir.strpath, "mountinfo")

    for path, content in (
        (proc_stat, proc_stat_template.format(user=0, idle=0)),
        (proc_stat_self, TEMPLATE_PROC_STAT_SELF.format(utime=0, stime=0)),
        (proc_meminfo, TEMPLATE_PROC_MEMINFO),
        (cgroup_memory_limit, TEMPLATE_CGROUP_MEM_LIMIT_IN_BYTES),
        (cgroup_memory_usage, TEMPLATE_CGROUP_MEM_USAGE_IN_BYTES),
        (proc_self_cgroup, "9:memory:/slice"),
        (
            proc_self_mount,
            "39 30 0:35 / " + tmpdir.strpath +
            "/memory rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,seclabel,memory\n",
        ),
    ):
        with open(path, mode="w") as f:
            f.write(content)
    metricset = CPUMetricSet(
        MetricsRegistry(elasticapm_client),
        sys_stats_file=proc_stat,
        process_stats_file=proc_stat_self,
        memory_stats_file=proc_meminfo,
        proc_self_cgroup=proc_self_cgroup,
        mount_info=proc_self_mount,
    )

    for path, content in (
        (proc_stat, proc_stat_template.format(user=400000, idle=600000)),
        (proc_stat_self,
         TEMPLATE_PROC_STAT_SELF.format(utime=100000, stime=100000)),
        (proc_meminfo, TEMPLATE_PROC_MEMINFO),
    ):
        with open(path, mode="w") as f:
            f.write(content)
    data = next(metricset.collect())
    assert data["samples"]["system.cpu.total.norm.pct"]["value"] == 0.4
    assert data["samples"]["system.process.cpu.total.norm.pct"]["value"] == 0.2

    assert data["samples"]["system.memory.total"]["value"] == 16552841216
    assert data["samples"]["system.memory.actual.free"]["value"] == 6670774272

    assert data["samples"]["system.process.memory.rss.bytes"][
        "value"] == 47738880
    assert data["samples"]["system.process.memory.size"]["value"] == 3686981632
コード例 #9
0
def test_mem_from_cgroup2(elasticapm_client, tmpdir):
    proc_stat_self = os.path.join(tmpdir.strpath, "self-stat")
    proc_stat = os.path.join(tmpdir.strpath, "stat")
    proc_meminfo = os.path.join(tmpdir.strpath, "meminfo")
    cgroup2_memory_limit = os.path.join(tmpdir.strpath, "slice", "memory.max")
    cgroup2_memory_usage = os.path.join(tmpdir.strpath, "slice",
                                        "memory.current")
    cgroup2_memory_stat = os.path.join(tmpdir.strpath, "slice", "memory.stat")
    cgroup2_self_cgroup = os.path.join(tmpdir.strpath, "cgroup")
    proc_self_cgroup = os.path.join(tmpdir.strpath, "cgroup")
    os.mkdir(os.path.join(tmpdir.strpath, "slice"))
    proc_self_mount = os.path.join(tmpdir.strpath, "mountinfo")

    for path, content in (
        (proc_stat, TEMPLATE_PROC_STAT_DEBIAN.format(user=0, idle=0)),
        (proc_stat_self, TEMPLATE_PROC_STAT_SELF.format(utime=0, stime=0)),
        (proc_meminfo, TEMPLATE_PROC_MEMINFO),
        (cgroup2_memory_limit, TEMPLATE_CGROUP_MEM_LIMIT_IN_BYTES_LIMITED),
        (cgroup2_memory_usage, TEMPLATE_CGROUP_MEM_USAGE_IN_BYTES),
        (cgroup2_memory_stat, TEMPLATE_CGROUP_MEM_STAT),
        (cgroup2_self_cgroup, "9:memory:/slice"),
        (
            proc_self_mount,
            "30 23 0:26 / " + tmpdir.strpath +
            " rw,nosuid,nodev,noexec,relatime shared:4 - cgroup2 cgroup rw,seclabel\n",
        ),
    ):
        with open(path, mode="w") as f:
            f.write(content)
    metricset = CPUMetricSet(
        MetricsRegistry(elasticapm_client),
        sys_stats_file=proc_stat,
        process_stats_file=proc_stat_self,
        memory_stats_file=proc_meminfo,
        proc_self_cgroup=proc_self_cgroup,
        mount_info=proc_self_mount,
    )

    data = next(metricset.collect())

    assert data["samples"]["system.memory.total"]["value"] == 16552841216
    assert data["samples"]["system.memory.actual.free"]["value"] == 6670774272

    assert data["samples"]["system.process.memory.rss.bytes"][
        "value"] == 47738880
    assert data["samples"]["system.process.memory.size"]["value"] == 3686981632

    assert data["samples"]["system.process.cgroup.memory.mem.limit.bytes"][
        "value"] == 7964778496
    assert data["samples"]["system.process.cgroup.memory.mem.usage.bytes"][
        "value"] == 954370560
    assert data["samples"][
        "system.process.cgroup.memory.stats.inactive_file.bytes"][
            "value"] == 10407936
コード例 #10
0
def test_metrics_registry():
    mock_queue = mock.Mock()
    registry = MetricsRegistry(0.001, queue_func=mock_queue)
    registry.register("tests.metrics.base_tests.DummyMetricSet")
    time.sleep(0.1)
    assert mock_queue.call_count > 0
    registry._stop_collect_timer()
コード例 #11
0
def test_mem_from_cgroup_files_dont_exist(elasticapm_client, tmpdir):
    # it appears that on Google App engine, there is a possibility of
    # memory.limit_in_bytes existing while memory.usage_in_bytes does not.
    # See https://github.com/elastic/apm-agent-python/issues/985
    proc_stat_self = os.path.join(tmpdir.strpath, "self-stat")
    proc_stat = os.path.join(tmpdir.strpath, "stat")
    proc_meminfo = os.path.join(tmpdir.strpath, "meminfo")
    cgroup_memory_limit = os.path.join(tmpdir.strpath, "memory",
                                       "memory.limit_in_bytes")
    # intentionally commented out
    # cgroup_memory_usage = os.path.join(tmpdir.strpath, "memory", "memory.usage_in_bytes")
    cgroup_memory_stat = os.path.join(tmpdir.strpath, "memory", "memory.stat")
    proc_self_cgroup = os.path.join(tmpdir.strpath, "cgroup")
    os.mkdir(os.path.join(tmpdir.strpath, "memory"))
    proc_self_mount = os.path.join(tmpdir.strpath, "mountinfo")

    for path, content in (
        (proc_stat, TEMPLATE_PROC_STAT_DEBIAN.format(user=0, idle=0)),
        (proc_stat_self, TEMPLATE_PROC_STAT_SELF.format(utime=0, stime=0)),
        (proc_meminfo, TEMPLATE_PROC_MEMINFO),
        (cgroup_memory_limit, TEMPLATE_CGROUP_MEM_LIMIT_IN_BYTES_LIMITED),
            # intentionally commented out
            # (cgroup_memory_usage, TEMPLATE_CGROUP_MEM_USAGE_IN_BYTES),
        (cgroup_memory_stat, TEMPLATE_CGROUP_MEM_STAT),
        (proc_self_cgroup, "9:memory:/slice"),
        (
            proc_self_mount,
            "39 30 0:35 / " + tmpdir.strpath +
            "/memory rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,seclabel,memory\n",
        ),
    ):
        with open(path, mode="w") as f:
            f.write(content)
    metricset = CPUMetricSet(
        MetricsRegistry(elasticapm_client),
        sys_stats_file=proc_stat,
        process_stats_file=proc_stat_self,
        memory_stats_file=proc_meminfo,
        proc_self_cgroup=proc_self_cgroup,
        mount_info=proc_self_mount,
    )

    assert metricset.cgroup_files.limit is not None
    assert metricset.cgroup_files.usage is None

    data = next(metricset.collect())

    assert "system.process.cgroup.memory.mem.limit.bytes" in data["samples"]
    assert "system.process.cgroup.memory.mem.usage.bytes" not in data[
        "samples"]
コード例 #12
0
def test_metrics_counter(elasticapm_client):
    metricset = MetricsSet(MetricsRegistry(elasticapm_client))
    metricset.counter("x").inc()
    data = next(metricset.collect())
    assert data["samples"]["x"]["value"] == 1
    metricset.counter("x").inc(10)
    data = next(metricset.collect())
    assert data["samples"]["x"]["value"] == 11
    metricset.counter("x").dec(10)
    data = next(metricset.collect())
    assert data["samples"]["x"]["value"] == 1
    metricset.counter("x").dec()
    data = next(metricset.collect())
    assert data["samples"]["x"]["value"] == 0
コード例 #13
0
def test_metrics_multithreaded():
    metricset = MetricsSet(MetricsRegistry(0, lambda x: None))
    pool = Pool(5)

    def target():
        for i in range(500):
            metricset.counter("x").inc(i + 1)
            time.sleep(0.0000001)

    [pool.apply_async(target, ()) for i in range(10)]
    pool.close()
    pool.join()
    expected = 10 * ((500 * 501) / 2)
    assert metricset.counter("x").val == expected
コード例 #14
0
def test_metrics_counter():
    metricset = MetricsSet(MetricsRegistry(0, lambda x: None))
    metricset.counter("x").inc()
    data = next(metricset.collect())
    assert data["samples"]["x"]["value"] == 1
    metricset.counter("x").inc(10)
    data = next(metricset.collect())
    assert data["samples"]["x"]["value"] == 11
    metricset.counter("x").dec(10)
    data = next(metricset.collect())
    assert data["samples"]["x"]["value"] == 1
    metricset.counter("x").dec()
    data = next(metricset.collect())
    assert data["samples"]["x"]["value"] == 0
コード例 #15
0
def test_histogram(elasticapm_client, prometheus):
    metricset = PrometheusMetrics(MetricsRegistry(elasticapm_client))
    histo = prometheus_client.Histogram("histo",
                                        "test histogram",
                                        buckets=[1, 10, 100,
                                                 float("inf")])
    histo_with_labels = prometheus_client.Histogram(
        "histowithlabel",
        "test histogram with labels", ["alabel", "anotherlabel"],
        buckets=[1, 10, 100, float("inf")])
    histo.observe(0.5)
    histo.observe(0.6)
    histo.observe(1.5)
    histo.observe(26)
    histo.observe(42)
    histo.observe(12)
    histo.observe(105)
    histo_with_labels.labels(alabel="foo", anotherlabel="baz").observe(1)
    histo_with_labels.labels(alabel="foo", anotherlabel="baz").observe(10)
    histo_with_labels.labels(alabel="foo", anotherlabel="baz").observe(100)
    histo_with_labels.labels(alabel="foo",
                             anotherlabel="bazzinga").observe(1000)
    data = list(metricset.collect())
    assert data[0]["samples"]["prometheus.metrics.histo"]["values"] == [
        0.5, 5.5, 55.0, 100.0
    ]
    assert data[0]["samples"]["prometheus.metrics.histo"]["counts"] == [
        2, 1, 3, 1
    ]
    assert all(
        isinstance(v, int)
        for v in data[0]["samples"]["prometheus.metrics.histo"]["counts"])

    assert data[1]["samples"]["prometheus.metrics.histowithlabel"][
        "values"] == [0.5, 5.5, 55.0, 100.0]
    assert data[1]["samples"]["prometheus.metrics.histowithlabel"][
        "counts"] == [1, 1, 1, 0]
    assert all(
        isinstance(v, int) for v in data[1]["samples"]
        ["prometheus.metrics.histowithlabel"]["counts"])
    assert data[1]["tags"] == {"alabel": "foo", "anotherlabel": "baz"}

    assert data[2]["samples"]["prometheus.metrics.histowithlabel"][
        "values"] == [0.5, 5.5, 55.0, 100.0]
    assert data[2]["samples"]["prometheus.metrics.histowithlabel"][
        "counts"] == [0, 0, 0, 1]
    assert all(
        isinstance(v, int) for v in data[2]["samples"]
        ["prometheus.metrics.histowithlabel"]["counts"])
    assert data[2]["tags"] == {"alabel": "foo", "anotherlabel": "bazzinga"}
コード例 #16
0
def test_metric_limit(caplog, elasticapm_client):
    m = MetricsSet(MetricsRegistry(elasticapm_client))
    with caplog.at_level(logging.WARNING, logger="elasticapm.metrics"):
        for i in range(2):
            counter = m.counter("counter", some_label=i)
            gauge = m.gauge("gauge", some_label=i)
            timer = m.timer("timer", some_label=i)
            if i == 0:
                assert isinstance(timer, Timer)
                assert isinstance(gauge, Gauge)
                assert isinstance(counter, Counter)
            else:
                assert isinstance(timer, NoopMetric)
                assert isinstance(gauge, NoopMetric)
                assert isinstance(counter, NoopMetric)
    assert_any_record_contains(caplog.records, "The limit of 3 metricsets has been reached", "elasticapm.metrics")
コード例 #17
0
def test_metrics_histogram(elasticapm_client):
    metricset = MetricsSet(MetricsRegistry(elasticapm_client))
    hist = metricset.histogram("x", buckets=[1, 10, 100])
    assert len(hist.buckets) == 4

    hist.update(0.3)
    hist.update(1)
    hist.update(5)
    hist.update(20)
    hist.update(100)
    hist.update(1000)

    data = list(metricset.collect())
    assert len(data) == 1
    d = data[0]
    assert d["samples"]["x"]["counts"] == [2, 1, 2, 1]
    assert d["samples"]["x"]["values"] == [0.5, 5.5, 55.0, 100]
コード例 #18
0
def test_mem_from_cgroup2_max_handling(elasticapm_client, tmpdir):
    proc_stat_self = os.path.join(tmpdir.strpath, "self-stat")
    proc_stat = os.path.join(tmpdir.strpath, "stat")
    proc_meminfo = os.path.join(tmpdir.strpath, "meminfo")
    cgroup2_memory_limit = os.path.join(tmpdir.strpath, "slice", "memory.max")
    cgroup2_memory_usage = os.path.join(tmpdir.strpath, "slice",
                                        "memory.current")
    cgroup2_memory_stat = os.path.join(tmpdir.strpath, "slice", "memory.stat")
    cgroup2_self_cgroup = os.path.join(tmpdir.strpath, "cgroup")
    proc_self_cgroup = os.path.join(tmpdir.strpath, "cgroup")
    os.mkdir(os.path.join(tmpdir.strpath, "slice"))
    proc_self_mount = os.path.join(tmpdir.strpath, "mountinfo")

    for path, content in (
        (proc_stat, TEMPLATE_PROC_STAT_DEBIAN.format(user=0, idle=0)),
        (proc_stat_self, TEMPLATE_PROC_STAT_SELF.format(utime=0, stime=0)),
        (proc_meminfo, TEMPLATE_PROC_MEMINFO),
        (cgroup2_memory_limit, "max"),
        (cgroup2_memory_usage, TEMPLATE_CGROUP_MEM_USAGE_IN_BYTES),
        (cgroup2_memory_stat, TEMPLATE_CGROUP_MEM_STAT),
        (cgroup2_self_cgroup, "9:memory:/slice"),
        (
            proc_self_mount,
            "30 23 0:26 / " + tmpdir.strpath +
            " rw,nosuid,nodev,noexec,relatime shared:4 - cgroup2 cgroup rw,seclabel\n",
        ),
    ):
        with open(path, mode="w") as f:
            f.write(content)
    metricset = CPUMetricSet(
        MetricsRegistry(elasticapm_client),
        sys_stats_file=proc_stat,
        process_stats_file=proc_stat_self,
        memory_stats_file=proc_meminfo,
        proc_self_cgroup=proc_self_cgroup,
        mount_info=proc_self_mount,
    )

    data = next(metricset.collect())

    assert "system.process.cgroup.memory.mem.limit.bytes" not in data[
        "samples"]
    assert "system.process.cgroup.memory.mem.usage.bytes" not in data[
        "samples"]
    assert "system.process.cgroup.memory.stats.inactive_file.bytes" not in data[
        "samples"]
コード例 #19
0
def test_cpu_mem_from_psutil(elasticapm_client):
    metricset = cpu_psutil.CPUMetricSet(MetricsRegistry(elasticapm_client))
    # do something that generates some CPU load
    for i in compat.irange(10**6):
        j = i * i
    data = next(metricset.collect())
    # we can't really test any specific values here as it depends on the system state.
    # Mocking is also not really a viable choice, as we would then lose the "integration testing"
    # nature of this test with different versions of psutil
    assert 0 < data["samples"]["system.cpu.total.norm.pct"]["value"] < 1
    assert 0 < data["samples"]["system.process.cpu.total.norm.pct"]["value"] < 1

    assert data["samples"]["system.memory.total"]["value"] > 0
    assert data["samples"]["system.memory.actual.free"]["value"] > 0

    assert data["samples"]["system.process.memory.rss.bytes"]["value"] > 0
    assert data["samples"]["system.process.memory.size"]["value"] > 0
コード例 #20
0
def test_metric_limit(caplog):
    m = MetricsSet(MetricsRegistry(0, lambda x: None))
    with caplog.at_level(logging.WARNING, logger="elasticapm.metrics"):
        for i in range(2):
            counter = m.counter("counter", some_label=i)
            gauge = m.gauge("gauge", some_label=i)
            timer = m.timer("timer", some_label=i)
            if i == 0:
                assert isinstance(timer, Timer)
                assert isinstance(gauge, Gauge)
                assert isinstance(counter, Counter)
            else:
                assert isinstance(timer, NoopMetric)
                assert isinstance(gauge, NoopMetric)
                assert isinstance(counter, NoopMetric)

    assert len(caplog.records) == 1
    record = caplog.records[0]
    assert "The limit of 3 metricsets has been reached" in record.message
コード例 #21
0
def test_counter(elasticapm_client, prometheus):
    metricset = PrometheusMetrics(MetricsRegistry(elasticapm_client))
    counter = prometheus_client.Counter("a_bare_counter", "Bare counter")
    counter_with_labels = prometheus_client.Counter("counter_with_labels",
                                                    "Counter with labels",
                                                    ["alabel", "anotherlabel"])
    counter.inc()
    counter_with_labels.labels(alabel="foo", anotherlabel="baz").inc()
    counter_with_labels.labels(alabel="bar", anotherlabel="bazzinga").inc()
    counter_with_labels.labels(alabel="foo", anotherlabel="baz").inc()
    data = list(metricset.collect())
    assert len(data) == 3
    assert data[0]["samples"]["prometheus.metrics.a_bare_counter"][
        "value"] == 1.0
    assert data[1]["samples"]["prometheus.metrics.counter_with_labels"][
        "value"] == 2.0
    assert data[1]["tags"] == {"alabel": "foo", "anotherlabel": "baz"}
    assert data[2]["samples"]["prometheus.metrics.counter_with_labels"][
        "value"] == 1.0
    assert data[2]["tags"] == {"alabel": "bar", "anotherlabel": "bazzinga"}
コード例 #22
0
def test_gauge(elasticapm_client, prometheus):
    metricset = PrometheusMetrics(MetricsRegistry(elasticapm_client))
    gauge = prometheus_client.Gauge("a_bare_gauge", "Bare gauge")
    gauge_with_labels = prometheus_client.Gauge("gauge_with_labels",
                                                "Gauge with labels",
                                                ["alabel", "anotherlabel"])
    gauge.set(5)
    gauge_with_labels.labels(alabel="foo", anotherlabel="baz").set(7)
    gauge_with_labels.labels(alabel="bar", anotherlabel="bazzinga").set(11)
    gauge_with_labels.labels(alabel="foo", anotherlabel="baz").set(2)
    data = list(metricset.collect())
    assert len(data) == 3
    assert data[0]["samples"]["prometheus.metrics.a_bare_gauge"][
        "value"] == 5.0
    assert data[1]["samples"]["prometheus.metrics.gauge_with_labels"][
        "value"] == 2.0
    assert data[1]["tags"] == {"alabel": "foo", "anotherlabel": "baz"}
    assert data[2]["samples"]["prometheus.metrics.gauge_with_labels"][
        "value"] == 11.0
    assert data[2]["tags"] == {"alabel": "bar", "anotherlabel": "bazzinga"}
コード例 #23
0
def test_metrics_labels():
    metricset = MetricsSet(MetricsRegistry(0, lambda x: None))
    metricset.counter("x", mylabel="a").inc()
    metricset.counter("y", mylabel="a").inc()
    metricset.counter("x", mylabel="b").inc().inc()
    metricset.counter("x", mylabel="b", myotherlabel="c").inc()
    metricset.counter("x", mylabel="a").dec()
    data = list(metricset.collect())
    asserts = 0
    for d in data:
        if d["tags"] == {"mylabel": "a"}:
            assert d["samples"]["x"]["value"] == 0
            assert d["samples"]["y"]["value"] == 1
            asserts += 1
        elif d["tags"] == {"mylabel": "b"}:
            assert d["samples"]["x"]["value"] == 2
            asserts += 1
        elif d["tags"] == {"mylabel": "b", "myotherlabel": "c"}:
            assert d["samples"]["x"]["value"] == 1
            asserts += 1
    assert asserts == 3
コード例 #24
0
    def __init__(self, config=None, **inline):
        # configure loggers first
        cls = self.__class__
        self.logger = get_logger("%s.%s" % (cls.__module__, cls.__name__))
        self.error_logger = get_logger("elasticapm.errors")

        self.tracer = None
        self.processors = []
        self.filter_exception_types_dict = {}
        self._service_info = None

        config = Config(config, inline_dict=inline)
        if config.errors:
            for msg in config.errors.values():
                self.error_logger.error(msg)
            config.disable_send = True
        self.config = VersionedConfig(config, version=None)

        # Insert the log_record_factory into the logging library
        # The LogRecordFactory functionality is only available on python 3.2+
        if compat.PY3 and not self.config.disable_log_record_factory:
            record_factory = logging.getLogRecordFactory()
            # Only way to know if it's wrapped is to create a log record
            throwaway_record = record_factory(__name__, logging.DEBUG, __file__, 252, "dummy_msg", [], None)
            if not hasattr(throwaway_record, "elasticapm_labels"):
                self.logger.debug("Inserting elasticapm log_record_factory into logging")

                # Late import due to circular imports
                import elasticapm.handlers.logging as elastic_logging

                new_factory = elastic_logging.log_record_factory(record_factory)
                logging.setLogRecordFactory(new_factory)

        headers = {
            "Content-Type": "application/x-ndjson",
            "Content-Encoding": "gzip",
            "User-Agent": "elasticapm-python/%s" % elasticapm.VERSION,
        }

        if self.config.secret_token:
            headers["Authorization"] = "Bearer %s" % self.config.secret_token
        transport_kwargs = {
            "metadata": self._build_metadata(),
            "headers": headers,
            "verify_server_cert": self.config.verify_server_cert,
            "server_cert": self.config.server_cert,
            "timeout": self.config.server_timeout,
            "max_flush_time": self.config.api_request_time / 1000.0,
            "max_buffer_size": self.config.api_request_size,
            "processors": self.load_processors(),
        }
        self._api_endpoint_url = compat.urlparse.urljoin(
            self.config.server_url if self.config.server_url.endswith("/") else self.config.server_url + "/",
            constants.EVENTS_API_PATH,
        )
        self._transport = import_string(self.config.transport_class)(self._api_endpoint_url, **transport_kwargs)

        for exc_to_filter in self.config.filter_exception_types or []:
            exc_to_filter_type = exc_to_filter.split(".")[-1]
            exc_to_filter_module = ".".join(exc_to_filter.split(".")[:-1])
            self.filter_exception_types_dict[exc_to_filter_type] = exc_to_filter_module

        if platform.python_implementation() == "PyPy":
            # PyPy introduces a `_functools.partial.__call__` frame due to our use
            # of `partial` in AbstractInstrumentedModule
            skip_modules = ("elasticapm.", "_functools")
        else:
            skip_modules = ("elasticapm.",)

        self.tracer = Tracer(
            frames_collector_func=lambda: list(
                stacks.iter_stack_frames(
                    start_frame=inspect.currentframe(), skip_top_modules=skip_modules, config=self.config
                )
            ),
            frames_processing_func=lambda frames: self._get_stack_info_for_trace(
                frames,
                library_frame_context_lines=self.config.source_lines_span_library_frames,
                in_app_frame_context_lines=self.config.source_lines_span_app_frames,
                with_locals=self.config.collect_local_variables in ("all", "transactions"),
                locals_processor_func=lambda local_var: varmap(
                    lambda k, v: shorten(
                        v,
                        list_length=self.config.local_var_list_max_length,
                        string_length=self.config.local_var_max_length,
                        dict_length=self.config.local_var_dict_max_length,
                    ),
                    local_var,
                ),
            ),
            queue_func=self.queue,
            config=self.config,
            agent=self,
        )
        self.include_paths_re = stacks.get_path_regex(self.config.include_paths) if self.config.include_paths else None
        self.exclude_paths_re = stacks.get_path_regex(self.config.exclude_paths) if self.config.exclude_paths else None
        self._metrics = MetricsRegistry(
            self.config.metrics_interval / 1000.0, self.queue, ignore_patterns=self.config.disable_metrics
        )
        for path in self.config.metrics_sets:
            self._metrics.register(path)
        if self.config.breakdown_metrics:
            self._metrics.register("elasticapm.metrics.sets.breakdown.BreakdownMetricSet")
        compat.atexit_register(self.close)
        if self.config.central_config:
            self._config_updater = IntervalTimer(
                update_config, 1, "eapm conf updater", daemon=True, args=(self,), evaluate_function_interval=True
            )
            self._config_updater.start()
        else:
            self._config_updater = None
コード例 #25
0
class Client(object):
    """
    The base ElasticAPM client, which handles communication over the
    HTTP API to the APM Server.

    Will read default configuration from the environment variable
    ``ELASTIC_APM_APP_NAME`` and ``ELASTIC_APM_SECRET_TOKEN``
    if available. ::

    >>> from elasticapm import Client

    >>> # Read configuration from environment
    >>> client = Client()

    >>> # Configure the client manually
    >>> client = Client(
    >>>     include_paths=['my.package'],
    >>>     service_name='myapp',
    >>>     secret_token='secret_token',
    >>> )

    >>> # Record an exception
    >>> try:
    >>>     1/0
    >>> except ZeroDivisionError:
    >>>     ident = client.capture_exception()
    >>>     print ("Exception caught; reference is %%s" %% ident)
    """

    logger = get_logger("elasticapm")

    def __init__(self, config=None, **inline):
        # configure loggers first
        cls = self.__class__
        self.logger = get_logger("%s.%s" % (cls.__module__, cls.__name__))
        self.error_logger = get_logger("elasticapm.errors")

        self.tracer = None
        self.processors = []
        self.filter_exception_types_dict = {}
        self._service_info = None

        config = Config(config, inline_dict=inline)
        if config.errors:
            for msg in config.errors.values():
                self.error_logger.error(msg)
            config.disable_send = True
        self.config = VersionedConfig(config, version=None)

        # Insert the log_record_factory into the logging library
        # The LogRecordFactory functionality is only available on python 3.2+
        if compat.PY3 and not self.config.disable_log_record_factory:
            record_factory = logging.getLogRecordFactory()
            # Only way to know if it's wrapped is to create a log record
            throwaway_record = record_factory(__name__, logging.DEBUG, __file__, 252, "dummy_msg", [], None)
            if not hasattr(throwaway_record, "elasticapm_labels"):
                self.logger.debug("Inserting elasticapm log_record_factory into logging")

                # Late import due to circular imports
                import elasticapm.handlers.logging as elastic_logging

                new_factory = elastic_logging.log_record_factory(record_factory)
                logging.setLogRecordFactory(new_factory)

        headers = {
            "Content-Type": "application/x-ndjson",
            "Content-Encoding": "gzip",
            "User-Agent": "elasticapm-python/%s" % elasticapm.VERSION,
        }

        if self.config.secret_token:
            headers["Authorization"] = "Bearer %s" % self.config.secret_token
        transport_kwargs = {
            "metadata": self._build_metadata(),
            "headers": headers,
            "verify_server_cert": self.config.verify_server_cert,
            "server_cert": self.config.server_cert,
            "timeout": self.config.server_timeout,
            "max_flush_time": self.config.api_request_time / 1000.0,
            "max_buffer_size": self.config.api_request_size,
            "processors": self.load_processors(),
        }
        self._api_endpoint_url = compat.urlparse.urljoin(
            self.config.server_url if self.config.server_url.endswith("/") else self.config.server_url + "/",
            constants.EVENTS_API_PATH,
        )
        self._transport = import_string(self.config.transport_class)(self._api_endpoint_url, **transport_kwargs)

        for exc_to_filter in self.config.filter_exception_types or []:
            exc_to_filter_type = exc_to_filter.split(".")[-1]
            exc_to_filter_module = ".".join(exc_to_filter.split(".")[:-1])
            self.filter_exception_types_dict[exc_to_filter_type] = exc_to_filter_module

        if platform.python_implementation() == "PyPy":
            # PyPy introduces a `_functools.partial.__call__` frame due to our use
            # of `partial` in AbstractInstrumentedModule
            skip_modules = ("elasticapm.", "_functools")
        else:
            skip_modules = ("elasticapm.",)

        self.tracer = Tracer(
            frames_collector_func=lambda: list(
                stacks.iter_stack_frames(
                    start_frame=inspect.currentframe(), skip_top_modules=skip_modules, config=self.config
                )
            ),
            frames_processing_func=lambda frames: self._get_stack_info_for_trace(
                frames,
                library_frame_context_lines=self.config.source_lines_span_library_frames,
                in_app_frame_context_lines=self.config.source_lines_span_app_frames,
                with_locals=self.config.collect_local_variables in ("all", "transactions"),
                locals_processor_func=lambda local_var: varmap(
                    lambda k, v: shorten(
                        v,
                        list_length=self.config.local_var_list_max_length,
                        string_length=self.config.local_var_max_length,
                        dict_length=self.config.local_var_dict_max_length,
                    ),
                    local_var,
                ),
            ),
            queue_func=self.queue,
            config=self.config,
            agent=self,
        )
        self.include_paths_re = stacks.get_path_regex(self.config.include_paths) if self.config.include_paths else None
        self.exclude_paths_re = stacks.get_path_regex(self.config.exclude_paths) if self.config.exclude_paths else None
        self._metrics = MetricsRegistry(
            self.config.metrics_interval / 1000.0, self.queue, ignore_patterns=self.config.disable_metrics
        )
        for path in self.config.metrics_sets:
            self._metrics.register(path)
        if self.config.breakdown_metrics:
            self._metrics.register("elasticapm.metrics.sets.breakdown.BreakdownMetricSet")
        compat.atexit_register(self.close)
        if self.config.central_config:
            self._config_updater = IntervalTimer(
                update_config, 1, "eapm conf updater", daemon=True, args=(self,), evaluate_function_interval=True
            )
            self._config_updater.start()
        else:
            self._config_updater = None

    def get_handler(self, name):
        return import_string(name)

    def capture(self, event_type, date=None, context=None, custom=None, stack=None, handled=True, **kwargs):
        """
        Captures and processes an event and pipes it off to Client.send.
        """
        if event_type == "Exception":
            # never gather log stack for exceptions
            stack = False
        data = self._build_msg_for_logging(
            event_type, date=date, context=context, custom=custom, stack=stack, handled=handled, **kwargs
        )

        if data:
            # queue data, and flush the queue if this is an unhandled exception
            self.queue(ERROR, data, flush=not handled)
            return data["id"]

    def capture_message(self, message=None, param_message=None, **kwargs):
        """
        Creates an event from ``message``.

        >>> client.capture_message('My event just happened!')
        """
        return self.capture("Message", message=message, param_message=param_message, **kwargs)

    def capture_exception(self, exc_info=None, handled=True, **kwargs):
        """
        Creates an event from an exception.

        >>> try:
        >>>     exc_info = sys.exc_info()
        >>>     client.capture_exception(exc_info)
        >>> finally:
        >>>     del exc_info

        If exc_info is not provided, or is set to True, then this method will
        perform the ``exc_info = sys.exc_info()`` and the requisite clean-up
        for you.
        """
        return self.capture("Exception", exc_info=exc_info, handled=handled, **kwargs)

    def queue(self, event_type, data, flush=False):
        if self.config.disable_send:
            return
        if flush and is_master_process():
            # don't flush in uWSGI master process to avoid ending up in an unpredictable threading state
            flush = False
        self._transport.queue(event_type, data, flush)

    def begin_transaction(self, transaction_type, trace_parent=None, start=None):
        """
        Register the start of a transaction on the client

        :param transaction_type: type of the transaction, e.g. "request"
        :param trace_parent: an optional TraceParent object for distributed tracing
        :param start: override the start timestamp, mostly useful for testing
        :return: the started transaction object
        """
        return self.tracer.begin_transaction(transaction_type, trace_parent=trace_parent, start=start)

    def end_transaction(self, name=None, result="", duration=None):
        """
        End the current transaction.

        :param name: optional name of the transaction
        :param result: result of the transaction, e.g. "OK" or "HTTP 2xx"
        :param duration: override duration, mostly useful for testing
        :return: the ended transaction object
        """
        transaction = self.tracer.end_transaction(result, name, duration=duration)
        return transaction

    def close(self):
        if self._metrics:
            self._metrics._stop_collect_timer()
        if self._config_updater:
            self._config_updater.cancel()
        self._transport.close()

    def get_service_info(self):
        if self._service_info:
            return self._service_info
        language_version = platform.python_version()
        if hasattr(sys, "pypy_version_info"):
            runtime_version = ".".join(map(str, sys.pypy_version_info[:3]))
        else:
            runtime_version = language_version
        result = {
            "name": keyword_field(self.config.service_name),
            "environment": keyword_field(self.config.environment),
            "version": keyword_field(self.config.service_version),
            "agent": {"name": "python", "version": elasticapm.VERSION},
            "language": {"name": "python", "version": keyword_field(platform.python_version())},
            "runtime": {
                "name": keyword_field(platform.python_implementation()),
                "version": keyword_field(runtime_version),
            },
        }
        if self.config.framework_name:
            result["framework"] = {
                "name": keyword_field(self.config.framework_name),
                "version": keyword_field(self.config.framework_version),
            }
        self._service_info = result
        return result

    def get_process_info(self):
        return {
            "pid": os.getpid(),
            "ppid": os.getppid() if hasattr(os, "getppid") else None,
            "argv": sys.argv,
            "title": None,  # Note: if we implement this, the value needs to be wrapped with keyword_field
        }

    def get_system_info(self):
        system_data = {
            "hostname": keyword_field(socket.gethostname()),
            "architecture": platform.machine(),
            "platform": platform.system().lower(),
        }
        system_data.update(cgroup.get_cgroup_container_metadata())
        pod_name = os.environ.get("KUBERNETES_POD_NAME") or system_data["hostname"]
        changed = False
        if "kubernetes" in system_data:
            k8s = system_data["kubernetes"]
            k8s["pod"]["name"] = pod_name
        else:
            k8s = {"pod": {"name": pod_name}}
        # get kubernetes metadata from environment
        if "KUBERNETES_NODE_NAME" in os.environ:
            k8s["node"] = {"name": os.environ["KUBERNETES_NODE_NAME"]}
            changed = True
        if "KUBERNETES_NAMESPACE" in os.environ:
            k8s["namespace"] = os.environ["KUBERNETES_NAMESPACE"]
            changed = True
        if "KUBERNETES_POD_UID" in os.environ:
            # this takes precedence over any value from /proc/self/cgroup
            k8s["pod"]["uid"] = os.environ["KUBERNETES_POD_UID"]
            changed = True
        if changed:
            system_data["kubernetes"] = k8s
        return system_data

    def _build_metadata(self):
        data = {
            "service": self.get_service_info(),
            "process": self.get_process_info(),
            "system": self.get_system_info(),
        }
        if self.config.global_labels:
            data["labels"] = enforce_label_format(self.config.global_labels)
        return data

    def _build_msg_for_logging(
        self, event_type, date=None, context=None, custom=None, stack=None, handled=True, **kwargs
    ):
        """
        Captures, processes and serializes an event into a dict object
        """
        transaction = execution_context.get_transaction()
        span = execution_context.get_span()
        if transaction:
            transaction_context = deepcopy(transaction.context)
        else:
            transaction_context = {}
        event_data = {}
        if custom is None:
            custom = {}
        if date is not None:
            warnings.warn(
                "The date argument is no longer evaluated and will be removed in a future release", DeprecationWarning
            )
        date = time.time()
        if stack is None:
            stack = self.config.auto_log_stacks
        if context:
            transaction_context.update(context)
            context = transaction_context
        else:
            context = transaction_context
        event_data["context"] = context
        if transaction and transaction.labels:
            context["tags"] = deepcopy(transaction.labels)

        # if '.' not in event_type:
        # Assume it's a builtin
        event_type = "elasticapm.events.%s" % event_type

        handler = self.get_handler(event_type)
        result = handler.capture(self, **kwargs)
        if self._filter_exception_type(result):
            return
        # data (explicit) culprit takes over auto event detection
        culprit = result.pop("culprit", None)
        if custom.get("culprit"):
            culprit = custom.pop("culprit")

        for k, v in compat.iteritems(result):
            if k not in event_data:
                event_data[k] = v

        log = event_data.get("log", {})
        if stack and "stacktrace" not in log:
            if stack is True:
                frames = stacks.iter_stack_frames(skip=3, config=self.config)
            else:
                frames = stack
            frames = stacks.get_stack_info(
                frames,
                with_locals=self.config.collect_local_variables in ("errors", "all"),
                library_frame_context_lines=self.config.source_lines_error_library_frames,
                in_app_frame_context_lines=self.config.source_lines_error_app_frames,
                include_paths_re=self.include_paths_re,
                exclude_paths_re=self.exclude_paths_re,
                locals_processor_func=lambda local_var: varmap(
                    lambda k, v: shorten(
                        v,
                        list_length=self.config.local_var_list_max_length,
                        string_length=self.config.local_var_max_length,
                        dict_length=self.config.local_var_dict_max_length,
                    ),
                    local_var,
                ),
            )
            log["stacktrace"] = frames

        if "stacktrace" in log and not culprit:
            culprit = stacks.get_culprit(log["stacktrace"], self.config.include_paths, self.config.exclude_paths)

        if "level" in log and isinstance(log["level"], compat.integer_types):
            log["level"] = logging.getLevelName(log["level"]).lower()

        if log:
            event_data["log"] = log

        if culprit:
            event_data["culprit"] = culprit

        if "custom" in context:
            context["custom"].update(custom)
        else:
            context["custom"] = custom

        # Make sure all data is coerced
        event_data = transform(event_data)
        if "exception" in event_data:
            event_data["exception"]["handled"] = bool(handled)

        event_data["timestamp"] = int(date * 1000000)

        if transaction:
            if transaction.trace_parent:
                event_data["trace_id"] = transaction.trace_parent.trace_id
            # parent id might already be set in the handler
            event_data.setdefault("parent_id", span.id if span else transaction.id)
            event_data["transaction_id"] = transaction.id
            event_data["transaction"] = {"sampled": transaction.is_sampled, "type": transaction.transaction_type}

        return event_data

    def _filter_exception_type(self, data):
        exception = data.get("exception")
        if not exception:
            return False

        exc_type = exception.get("type")
        exc_module = exception.get("module")
        if exc_module == "None":
            exc_module = None

        if exc_type in self.filter_exception_types_dict:
            exc_to_filter_module = self.filter_exception_types_dict[exc_type]
            if not exc_to_filter_module or exc_to_filter_module == exc_module:
                if exc_module:
                    exc_name = "%s.%s" % (exc_module, exc_type)
                else:
                    exc_name = exc_type
                self.logger.info("Ignored %s exception due to exception type filter", exc_name)
                return True
        return False

    def _get_stack_info_for_trace(
        self,
        frames,
        library_frame_context_lines=None,
        in_app_frame_context_lines=None,
        with_locals=True,
        locals_processor_func=None,
    ):
        """Overrideable in derived clients to add frames/info, e.g. templates"""
        return stacks.get_stack_info(
            frames,
            library_frame_context_lines=library_frame_context_lines,
            in_app_frame_context_lines=in_app_frame_context_lines,
            with_locals=with_locals,
            include_paths_re=self.include_paths_re,
            exclude_paths_re=self.exclude_paths_re,
            locals_processor_func=locals_processor_func,
        )

    def load_processors(self):
        """
        Loads processors from self.config.processors, as well as constants.HARDCODED_PROCESSORS.
        Duplicate processors (based on the path) will be discarded.

        :return: a list of callables
        """
        processors = itertools.chain(self.config.processors, constants.HARDCODED_PROCESSORS)
        seen = {}
        # setdefault has the nice property that it returns the value that it just set on the dict
        return [seen.setdefault(path, import_string(path)) for path in processors if path not in seen]
コード例 #26
0
    def __init__(self, config=None, **inline):
        # configure loggers first
        cls = self.__class__
        self.logger = logging.getLogger("%s.%s" %
                                        (cls.__module__, cls.__name__))
        self.error_logger = logging.getLogger("elasticapm.errors")

        self.tracer = None
        self.processors = []
        self.filter_exception_types_dict = {}
        self._service_info = None

        self.config = Config(config, inline_dict=inline)
        if self.config.errors:
            for msg in self.config.errors.values():
                self.error_logger.error(msg)
            self.config.disable_send = True

        headers = {
            "Content-Type": "application/x-ndjson",
            "Content-Encoding": "gzip",
            "User-Agent": "elasticapm-python/%s" % elasticapm.VERSION,
        }

        if self.config.secret_token:
            headers["Authorization"] = "Bearer %s" % self.config.secret_token
        transport_kwargs = {
            "metadata": self._build_metadata(),
            "headers": headers,
            "verify_server_cert": self.config.verify_server_cert,
            "timeout": self.config.server_timeout,
            "max_flush_time": self.config.api_request_time / 1000.0,
            "max_buffer_size": self.config.api_request_size,
        }
        self._api_endpoint_url = compat.urlparse.urljoin(
            self.config.server_url if self.config.server_url.endswith("/") else
            self.config.server_url + "/",
            constants.EVENTS_API_PATH,
        )
        self._transport = import_string(self.config.transport_class)(
            self._api_endpoint_url, **transport_kwargs)

        for exc_to_filter in self.config.filter_exception_types or []:
            exc_to_filter_type = exc_to_filter.split(".")[-1]
            exc_to_filter_module = ".".join(exc_to_filter.split(".")[:-1])
            self.filter_exception_types_dict[
                exc_to_filter_type] = exc_to_filter_module

        self.processors = [import_string(p) for p in self.config.processors
                           ] if self.config.processors else []

        if platform.python_implementation() == "PyPy":
            # PyPy introduces a `_functools.partial.__call__` frame due to our use
            # of `partial` in AbstractInstrumentedModule
            skip_modules = ("elasticapm.", "_functools")
        else:
            skip_modules = ("elasticapm.", )

        self.tracer = Tracer(
            frames_collector_func=lambda: list(
                stacks.iter_stack_frames(start_frame=inspect.currentframe(),
                                         skip_top_modules=skip_modules)),
            frames_processing_func=lambda frames: self.
            _get_stack_info_for_trace(
                frames,
                library_frame_context_lines=self.config.
                source_lines_span_library_frames,
                in_app_frame_context_lines=self.config.
                source_lines_span_app_frames,
                with_locals=self.config.collect_local_variables in
                ("all", "transactions"),
                locals_processor_func=lambda local_var: varmap(
                    lambda k, v: shorten(
                        v,
                        list_length=self.config.local_var_list_max_length,
                        string_length=self.config.local_var_max_length,
                    ),
                    local_var,
                ),
            ),
            queue_func=self.queue,
            sample_rate=self.config.transaction_sample_rate,
            max_spans=self.config.transaction_max_spans,
            span_frames_min_duration=self.config.span_frames_min_duration,
            ignore_patterns=self.config.transactions_ignore_patterns,
        )
        self.include_paths_re = stacks.get_path_regex(
            self.config.include_paths) if self.config.include_paths else None
        self.exclude_paths_re = stacks.get_path_regex(
            self.config.exclude_paths) if self.config.exclude_paths else None
        self._metrics = MetricsRegistry(self.config.metrics_interval / 1000.0,
                                        self.queue)
        for path in self.config.metrics_sets:
            self._metrics.register(path)
        compat.atexit_register(self.close)
コード例 #27
0
ファイル: base.py プロジェクト: mdelapenya/apm-agent-python
class Client(object):
    """
    The base ElasticAPM client, which handles communication over the
    HTTP API to the APM Server.

    Will read default configuration from the environment variable
    ``ELASTIC_APM_APP_NAME`` and ``ELASTIC_APM_SECRET_TOKEN``
    if available. ::

    >>> from elasticapm import Client

    >>> # Read configuration from environment
    >>> client = Client()

    >>> # Configure the client manually
    >>> client = Client(
    >>>     include_paths=['my.package'],
    >>>     service_name='myapp',
    >>>     secret_token='secret_token',
    >>> )

    >>> # Record an exception
    >>> try:
    >>>     1/0
    >>> except ZeroDivisionError:
    >>>     ident = client.capture_exception()
    >>>     print ("Exception caught; reference is %%s" %% ident)
    """

    logger = get_logger("elasticapm")

    def __init__(self, config=None, **inline):
        # configure loggers first
        cls = self.__class__
        self.logger = get_logger("%s.%s" % (cls.__module__, cls.__name__))
        self.error_logger = get_logger("elasticapm.errors")

        self._pid = None
        self._thread_starter_lock = threading.Lock()
        self._thread_managers = {}

        self.tracer = None
        self.processors = []
        self.filter_exception_types_dict = {}
        self._service_info = None
        # setting server_version here is mainly used for testing
        self.server_version = inline.pop("server_version", None)

        self.check_python_version()

        config = Config(config, inline_dict=inline)
        if config.errors:
            for msg in config.errors.values():
                self.error_logger.error(msg)
            config.disable_send = True
        if config.service_name == "python_service":
            self.logger.warning(
                "No custom SERVICE_NAME was set -- using non-descript default 'python_service'"
            )
        self.config = VersionedConfig(config, version=None)

        # Insert the log_record_factory into the logging library
        # The LogRecordFactory functionality is only available on python 3.2+
        if compat.PY3 and not self.config.disable_log_record_factory:
            record_factory = logging.getLogRecordFactory()
            # Only way to know if it's wrapped is to create a log record
            throwaway_record = record_factory(__name__, logging.DEBUG,
                                              __file__, 252, "dummy_msg", [],
                                              None)
            if not hasattr(throwaway_record, "elasticapm_labels"):
                self.logger.debug(
                    "Inserting elasticapm log_record_factory into logging")

                # Late import due to circular imports
                import elasticapm.handlers.logging as elastic_logging

                new_factory = elastic_logging.log_record_factory(
                    record_factory)
                logging.setLogRecordFactory(new_factory)

        headers = {
            "Content-Type": "application/x-ndjson",
            "Content-Encoding": "gzip",
            "User-Agent": self.get_user_agent(),
        }

        transport_kwargs = {
            "headers": headers,
            "verify_server_cert": self.config.verify_server_cert,
            "server_cert": self.config.server_cert,
            "timeout": self.config.server_timeout,
            "processors": self.load_processors(),
        }
        self._api_endpoint_url = compat.urlparse.urljoin(
            self.config.server_url if self.config.server_url.endswith("/") else
            self.config.server_url + "/",
            constants.EVENTS_API_PATH,
        )
        transport_class = import_string(self.config.transport_class)
        self._transport = transport_class(url=self._api_endpoint_url,
                                          client=self,
                                          **transport_kwargs)
        self.config.transport = self._transport
        self._thread_managers["transport"] = self._transport

        for exc_to_filter in self.config.filter_exception_types or []:
            exc_to_filter_type = exc_to_filter.split(".")[-1]
            exc_to_filter_module = ".".join(exc_to_filter.split(".")[:-1])
            self.filter_exception_types_dict[
                exc_to_filter_type] = exc_to_filter_module

        if platform.python_implementation() == "PyPy":
            # PyPy introduces a `_functools.partial.__call__` frame due to our use
            # of `partial` in AbstractInstrumentedModule
            skip_modules = ("elasticapm.", "_functools")
        else:
            skip_modules = ("elasticapm.", )

        self.tracer = Tracer(
            frames_collector_func=lambda: list(
                stacks.iter_stack_frames(start_frame=inspect.currentframe(),
                                         skip_top_modules=skip_modules,
                                         config=self.config)),
            frames_processing_func=lambda frames: self.
            _get_stack_info_for_trace(
                frames,
                library_frame_context_lines=self.config.
                source_lines_span_library_frames,
                in_app_frame_context_lines=self.config.
                source_lines_span_app_frames,
                with_locals=self.config.collect_local_variables in
                ("all", "transactions"),
                locals_processor_func=lambda local_var: varmap(
                    lambda k, v: shorten(
                        v,
                        list_length=self.config.local_var_list_max_length,
                        string_length=self.config.local_var_max_length,
                        dict_length=self.config.local_var_dict_max_length,
                    ),
                    local_var,
                ),
            ),
            queue_func=self.queue,
            config=self.config,
            agent=self,
        )
        self.include_paths_re = stacks.get_path_regex(
            self.config.include_paths) if self.config.include_paths else None
        self.exclude_paths_re = stacks.get_path_regex(
            self.config.exclude_paths) if self.config.exclude_paths else None
        self._metrics = MetricsRegistry(self)
        for path in self.config.metrics_sets:
            self._metrics.register(path)
        if self.config.breakdown_metrics:
            self._metrics.register(
                "elasticapm.metrics.sets.breakdown.BreakdownMetricSet")
        if self.config.prometheus_metrics:
            self._metrics.register(
                "elasticapm.metrics.sets.prometheus.PrometheusMetrics")
        if self.config.metrics_interval:
            self._thread_managers["metrics"] = self._metrics
        compat.atexit_register(self.close)
        if self.config.central_config:
            self._thread_managers["config"] = self.config
        else:
            self._config_updater = None
        if self.config.use_elastic_excepthook:
            self.original_excepthook = sys.excepthook
            sys.excepthook = self._excepthook
        if config.enabled:
            self.start_threads()

        # Save this Client object as the global CLIENT_SINGLETON
        set_client(self)

    def start_threads(self):
        current_pid = os.getpid()
        if self._pid != current_pid:
            with self._thread_starter_lock:
                self.logger.debug(
                    "Detected PID change from %r to %r, starting threads",
                    self._pid, current_pid)
                for manager_type, manager in sorted(
                        self._thread_managers.items(),
                        key=lambda item: item[1].start_stop_order):
                    self.logger.debug("Starting %s thread", manager_type)
                    manager.start_thread(pid=current_pid)
                self._pid = current_pid

    def get_handler(self, name):
        return import_string(name)

    def capture(self,
                event_type,
                date=None,
                context=None,
                custom=None,
                stack=None,
                handled=True,
                **kwargs):
        """
        Captures and processes an event and pipes it off to Client.send.
        """
        if not self.config.is_recording:
            return
        if event_type == "Exception":
            # never gather log stack for exceptions
            stack = False
        data = self._build_msg_for_logging(event_type,
                                           date=date,
                                           context=context,
                                           custom=custom,
                                           stack=stack,
                                           handled=handled,
                                           **kwargs)

        if data:
            # queue data, and flush the queue if this is an unhandled exception
            self.queue(ERROR, data, flush=not handled)
            return data["id"]

    def capture_message(self, message=None, param_message=None, **kwargs):
        """
        Creates an event from ``message``.

        >>> client.capture_message('My event just happened!')
        """
        return self.capture("Message",
                            message=message,
                            param_message=param_message,
                            **kwargs)

    def capture_exception(self, exc_info=None, handled=True, **kwargs):
        """
        Creates an event from an exception.

        >>> try:
        >>>     exc_info = sys.exc_info()
        >>>     client.capture_exception(exc_info)
        >>> finally:
        >>>     del exc_info

        If exc_info is not provided, or is set to True, then this method will
        perform the ``exc_info = sys.exc_info()`` and the requisite clean-up
        for you.
        """
        return self.capture("Exception",
                            exc_info=exc_info,
                            handled=handled,
                            **kwargs)

    def queue(self, event_type, data, flush=False):
        if self.config.disable_send:
            return
        self.start_threads()
        if flush and is_master_process():
            # don't flush in uWSGI master process to avoid ending up in an unpredictable threading state
            flush = False
        self._transport.queue(event_type, data, flush)

    def begin_transaction(self,
                          transaction_type,
                          trace_parent=None,
                          start=None):
        """
        Register the start of a transaction on the client

        :param transaction_type: type of the transaction, e.g. "request"
        :param trace_parent: an optional TraceParent object for distributed tracing
        :param start: override the start timestamp, mostly useful for testing
        :return: the started transaction object
        """
        if self.config.is_recording:
            return self.tracer.begin_transaction(transaction_type,
                                                 trace_parent=trace_parent,
                                                 start=start)

    def end_transaction(self, name=None, result="", duration=None):
        """
        End the current transaction.

        :param name: optional name of the transaction
        :param result: result of the transaction, e.g. "OK" or "HTTP 2xx"
        :param duration: override duration, mostly useful for testing
        :return: the ended transaction object
        """
        transaction = self.tracer.end_transaction(result,
                                                  name,
                                                  duration=duration)
        return transaction

    def close(self):
        if self.config.enabled:
            with self._thread_starter_lock:
                for _, manager in sorted(
                        self._thread_managers.items(),
                        key=lambda item: item[1].start_stop_order):
                    manager.stop_thread()
        global CLIENT_SINGLETON
        CLIENT_SINGLETON = None

    def get_service_info(self):
        if self._service_info:
            return self._service_info
        language_version = platform.python_version()
        if hasattr(sys, "pypy_version_info"):
            runtime_version = ".".join(map(str, sys.pypy_version_info[:3]))
        else:
            runtime_version = language_version
        result = {
            "name": keyword_field(self.config.service_name),
            "environment": keyword_field(self.config.environment),
            "version": keyword_field(self.config.service_version),
            "agent": {
                "name": "python",
                "version": elasticapm.VERSION
            },
            "language": {
                "name": "python",
                "version": keyword_field(platform.python_version())
            },
            "runtime": {
                "name": keyword_field(platform.python_implementation()),
                "version": keyword_field(runtime_version),
            },
        }
        if self.config.framework_name:
            result["framework"] = {
                "name": keyword_field(self.config.framework_name),
                "version": keyword_field(self.config.framework_version),
            }
        if self.config.service_node_name:
            result["node"] = {
                "configured_name": keyword_field(self.config.service_node_name)
            }
        self._service_info = result
        return result

    def get_process_info(self):
        return {
            "pid": os.getpid(),
            "ppid": os.getppid() if hasattr(os, "getppid") else None,
            "argv": sys.argv,
            "title":
            None,  # Note: if we implement this, the value needs to be wrapped with keyword_field
        }

    def get_system_info(self):
        system_data = {
            "hostname": keyword_field(self.config.hostname),
            "architecture": platform.machine(),
            "platform": platform.system().lower(),
        }
        system_data.update(cgroup.get_cgroup_container_metadata())
        pod_name = os.environ.get(
            "KUBERNETES_POD_NAME") or system_data["hostname"]
        changed = False
        if "kubernetes" in system_data:
            k8s = system_data["kubernetes"]
            k8s["pod"]["name"] = pod_name
        else:
            k8s = {"pod": {"name": pod_name}}
        # get kubernetes metadata from environment
        if "KUBERNETES_NODE_NAME" in os.environ:
            k8s["node"] = {"name": os.environ["KUBERNETES_NODE_NAME"]}
            changed = True
        if "KUBERNETES_NAMESPACE" in os.environ:
            k8s["namespace"] = os.environ["KUBERNETES_NAMESPACE"]
            changed = True
        if "KUBERNETES_POD_UID" in os.environ:
            # this takes precedence over any value from /proc/self/cgroup
            k8s["pod"]["uid"] = os.environ["KUBERNETES_POD_UID"]
            changed = True
        if changed:
            system_data["kubernetes"] = k8s
        return system_data

    def get_cloud_info(self):
        """
        Detects if the app is running in a cloud provider and fetches relevant
        metadata from the cloud provider's metadata endpoint.
        """
        provider = str(self.config.cloud_provider).lower()

        if not provider or provider == "none" or provider == "false":
            return {}
        if provider == "aws":
            data = cloud.aws_metadata()
            if not data:
                self.logger.warning(
                    "Cloud provider {0} defined, but no metadata was found.".
                    format(provider))
            return data
        elif provider == "gcp":
            data = cloud.gcp_metadata()
            if not data:
                self.logger.warning(
                    "Cloud provider {0} defined, but no metadata was found.".
                    format(provider))
            return data
        elif provider == "azure":
            data = cloud.azure_metadata()
            if not data:
                self.logger.warning(
                    "Cloud provider {0} defined, but no metadata was found.".
                    format(provider))
            return data
        elif provider == "auto" or provider == "true":
            # Trial and error
            data = {}
            data = cloud.aws_metadata()
            if data:
                return data
            data = cloud.gcp_metadata()
            if data:
                return data
            data = cloud.azure_metadata()
            return data
        else:
            self.logger.warning(
                "Unknown value for CLOUD_PROVIDER, skipping cloud metadata: {}"
                .format(provider))
            return {}

    def get_user_agent(self) -> str:
        """
        Compiles the user agent, which will be added as a header to all requests
        to the APM Server
        """
        if self.config.service_version:
            service_version = re.sub(
                r"[^\t _\x21-\x27\x2a-\x5b\x5d-\x7e\x80-\xff]", "_",
                self.config.service_version)
            return "apm-agent-python/{} ({} {})".format(
                elasticapm.VERSION, self.config.service_name, service_version)
        else:
            return "apm-agent-python/{} ({})".format(elasticapm.VERSION,
                                                     self.config.service_name)

    def build_metadata(self):
        data = {
            "service": self.get_service_info(),
            "process": self.get_process_info(),
            "system": self.get_system_info(),
            "cloud": self.get_cloud_info(),
        }
        if not data["cloud"]:
            data.pop("cloud")
        if self.config.global_labels:
            data["labels"] = enforce_label_format(self.config.global_labels)
        return data

    def _build_msg_for_logging(self,
                               event_type,
                               date=None,
                               context=None,
                               custom=None,
                               stack=None,
                               handled=True,
                               **kwargs):
        """
        Captures, processes and serializes an event into a dict object
        """
        transaction = execution_context.get_transaction()
        span = execution_context.get_span()
        if transaction:
            transaction_context = deepcopy(transaction.context)
        else:
            transaction_context = {}
        event_data = {}
        if custom is None:
            custom = {}
        if date is not None:
            warnings.warn(
                "The date argument is no longer evaluated and will be removed in a future release",
                DeprecationWarning)
        date = time.time()
        if stack is None:
            stack = self.config.auto_log_stacks
        if context:
            transaction_context.update(context)
            context = transaction_context
        else:
            context = transaction_context
        event_data["context"] = context
        if transaction and transaction.labels:
            context["tags"] = deepcopy(transaction.labels)

        # if '.' not in event_type:
        # Assume it's a builtin
        event_type = "elasticapm.events.%s" % event_type

        handler = self.get_handler(event_type)
        result = handler.capture(self, **kwargs)
        if self._filter_exception_type(result):
            return
        # data (explicit) culprit takes over auto event detection
        culprit = result.pop("culprit", None)
        if custom.get("culprit"):
            culprit = custom.pop("culprit")

        for k, v in compat.iteritems(result):
            if k not in event_data:
                event_data[k] = v

        log = event_data.get("log", {})
        if stack and "stacktrace" not in log:
            if stack is True:
                frames = stacks.iter_stack_frames(skip=3, config=self.config)
            else:
                frames = stack
            frames = stacks.get_stack_info(
                frames,
                with_locals=self.config.collect_local_variables
                in ("errors", "all"),
                library_frame_context_lines=self.config.
                source_lines_error_library_frames,
                in_app_frame_context_lines=self.config.
                source_lines_error_app_frames,
                include_paths_re=self.include_paths_re,
                exclude_paths_re=self.exclude_paths_re,
                locals_processor_func=lambda local_var: varmap(
                    lambda k, v: shorten(
                        v,
                        list_length=self.config.local_var_list_max_length,
                        string_length=self.config.local_var_max_length,
                        dict_length=self.config.local_var_dict_max_length,
                    ),
                    local_var,
                ),
            )
            log["stacktrace"] = frames

        if "stacktrace" in log and not culprit:
            culprit = stacks.get_culprit(log["stacktrace"],
                                         self.config.include_paths,
                                         self.config.exclude_paths)

        if "level" in log and isinstance(log["level"], compat.integer_types):
            log["level"] = logging.getLevelName(log["level"]).lower()

        if log:
            event_data["log"] = log

        if culprit:
            event_data["culprit"] = culprit

        if "custom" in context:
            context["custom"].update(custom)
        else:
            context["custom"] = custom

        # Make sure all data is coerced
        event_data = transform(event_data)
        if "exception" in event_data:
            event_data["exception"]["handled"] = bool(handled)

        event_data["timestamp"] = int(date * 1000000)

        if transaction:
            if transaction.trace_parent:
                event_data["trace_id"] = transaction.trace_parent.trace_id
            # parent id might already be set in the handler
            event_data.setdefault("parent_id",
                                  span.id if span else transaction.id)
            event_data["transaction_id"] = transaction.id
            event_data["transaction"] = {
                "sampled": transaction.is_sampled,
                "type": transaction.transaction_type
            }

        return event_data

    def _filter_exception_type(self, data):
        exception = data.get("exception")
        if not exception:
            return False

        exc_type = exception.get("type")
        exc_module = exception.get("module")
        if exc_module == "None":
            exc_module = None

        if exc_type in self.filter_exception_types_dict:
            exc_to_filter_module = self.filter_exception_types_dict[exc_type]
            if not exc_to_filter_module or exc_to_filter_module == exc_module:
                if exc_module:
                    exc_name = "%s.%s" % (exc_module, exc_type)
                else:
                    exc_name = exc_type
                self.logger.debug(
                    "Ignored %s exception due to exception type filter",
                    exc_name)
                return True
        return False

    def _get_stack_info_for_trace(
        self,
        frames,
        library_frame_context_lines=None,
        in_app_frame_context_lines=None,
        with_locals=True,
        locals_processor_func=None,
    ):
        """Overrideable in derived clients to add frames/info, e.g. templates"""
        return stacks.get_stack_info(
            frames,
            library_frame_context_lines=library_frame_context_lines,
            in_app_frame_context_lines=in_app_frame_context_lines,
            with_locals=with_locals,
            include_paths_re=self.include_paths_re,
            exclude_paths_re=self.exclude_paths_re,
            locals_processor_func=locals_processor_func,
        )

    def _excepthook(self, type_, value, traceback):
        try:
            self.original_excepthook(type_, value, traceback)
        except Exception:
            self.capture_exception(handled=False)
        finally:
            self.capture_exception(exc_info=(type_, value, traceback),
                                   handled=False)

    def load_processors(self):
        """
        Loads processors from self.config.processors, as well as constants.HARDCODED_PROCESSORS.
        Duplicate processors (based on the path) will be discarded.

        :return: a list of callables
        """
        processors = itertools.chain(self.config.processors,
                                     constants.HARDCODED_PROCESSORS)
        seen = {}
        # setdefault has the nice property that it returns the value that it just set on the dict
        return [
            seen.setdefault(path, import_string(path)) for path in processors
            if path not in seen
        ]

    def should_ignore_url(self, url):
        if self.config.transaction_ignore_urls:
            for pattern in self.config.transaction_ignore_urls:
                if pattern.match(url):
                    return True
        return False

    def check_python_version(self):
        v = tuple(map(int, platform.python_version_tuple()[:2]))
        if v == (2, 7):
            warnings.warn(
                ("The Elastic APM agent will stop supporting Python 2.7 starting in 6.0.0 -- "
                 "Please upgrade to Python 3.5+ to continue to use the latest features."
                 ),
                PendingDeprecationWarning,
            )
        elif v < (3, 5):
            warnings.warn("The Elastic APM agent only supports Python 3.5+",
                          DeprecationWarning)

    def check_server_version(self,
                             gte: Optional[Tuple[int]] = None,
                             lte: Optional[Tuple[int]] = None) -> bool:
        """
        Check APM Server version against greater-or-equal and/or lower-or-equal limits, provided as tuples of integers.
        If server_version is not set, always returns True.
        :param gte: a tuple of ints describing the greater-or-equal limit, e.g. (7, 16)
        :param lte: a tuple of ints describing the lower-or-equal limit, e.g. (7, 99)
        :return: bool
        """
        if not self.server_version:
            return True
        gte = gte or (0, )
        lte = lte or (
            2**32,
        )  # let's assume APM Server version will never be greater than 2^32
        return bool(gte <= self.server_version <= lte)
コード例 #28
0
ファイル: base.py プロジェクト: tonyman19/apm-agent-python
    def __init__(self, config=None, **inline):
        # configure loggers first
        cls = self.__class__
        self.logger = get_logger("%s.%s" % (cls.__module__, cls.__name__))
        self.error_logger = get_logger("elasticapm.errors")

        self._pid = None
        self._thread_starter_lock = threading.Lock()
        self._thread_managers = {}

        self.tracer = None
        self.processors = []
        self.filter_exception_types_dict = {}
        self._service_info = None

        self.check_python_version()

        config = Config(config, inline_dict=inline)
        if config.errors:
            for msg in config.errors.values():
                self.error_logger.error(msg)
            config.disable_send = True
        if config.service_name == "python_service":
            self.logger.warning(
                "No custom SERVICE_NAME was set -- using non-descript default 'python_service'"
            )
        self.config = VersionedConfig(config, version=None)

        # Insert the log_record_factory into the logging library
        # The LogRecordFactory functionality is only available on python 3.2+
        if compat.PY3 and not self.config.disable_log_record_factory:
            record_factory = logging.getLogRecordFactory()
            # Only way to know if it's wrapped is to create a log record
            throwaway_record = record_factory(__name__, logging.DEBUG,
                                              __file__, 252, "dummy_msg", [],
                                              None)
            if not hasattr(throwaway_record, "elasticapm_labels"):
                self.logger.debug(
                    "Inserting elasticapm log_record_factory into logging")

                # Late import due to circular imports
                import elasticapm.handlers.logging as elastic_logging

                new_factory = elastic_logging.log_record_factory(
                    record_factory)
                logging.setLogRecordFactory(new_factory)

        headers = {
            "Content-Type": "application/x-ndjson",
            "Content-Encoding": "gzip",
            "User-Agent": "elasticapm-python/%s" % elasticapm.VERSION,
        }

        transport_kwargs = {
            "headers": headers,
            "verify_server_cert": self.config.verify_server_cert,
            "server_cert": self.config.server_cert,
            "timeout": self.config.server_timeout,
            "processors": self.load_processors(),
        }
        self._api_endpoint_url = compat.urlparse.urljoin(
            self.config.server_url if self.config.server_url.endswith("/") else
            self.config.server_url + "/",
            constants.EVENTS_API_PATH,
        )
        transport_class = import_string(self.config.transport_class)
        self._transport = transport_class(self._api_endpoint_url, self,
                                          **transport_kwargs)
        self.config.transport = self._transport
        self._thread_managers["transport"] = self._transport

        for exc_to_filter in self.config.filter_exception_types or []:
            exc_to_filter_type = exc_to_filter.split(".")[-1]
            exc_to_filter_module = ".".join(exc_to_filter.split(".")[:-1])
            self.filter_exception_types_dict[
                exc_to_filter_type] = exc_to_filter_module

        if platform.python_implementation() == "PyPy":
            # PyPy introduces a `_functools.partial.__call__` frame due to our use
            # of `partial` in AbstractInstrumentedModule
            skip_modules = ("elasticapm.", "_functools")
        else:
            skip_modules = ("elasticapm.", )

        self.tracer = Tracer(
            frames_collector_func=lambda: list(
                stacks.iter_stack_frames(start_frame=inspect.currentframe(),
                                         skip_top_modules=skip_modules,
                                         config=self.config)),
            frames_processing_func=lambda frames: self.
            _get_stack_info_for_trace(
                frames,
                library_frame_context_lines=self.config.
                source_lines_span_library_frames,
                in_app_frame_context_lines=self.config.
                source_lines_span_app_frames,
                with_locals=self.config.collect_local_variables in
                ("all", "transactions"),
                locals_processor_func=lambda local_var: varmap(
                    lambda k, v: shorten(
                        v,
                        list_length=self.config.local_var_list_max_length,
                        string_length=self.config.local_var_max_length,
                        dict_length=self.config.local_var_dict_max_length,
                    ),
                    local_var,
                ),
            ),
            queue_func=self.queue,
            config=self.config,
            agent=self,
        )
        self.include_paths_re = stacks.get_path_regex(
            self.config.include_paths) if self.config.include_paths else None
        self.exclude_paths_re = stacks.get_path_regex(
            self.config.exclude_paths) if self.config.exclude_paths else None
        self._metrics = MetricsRegistry(self)
        for path in self.config.metrics_sets:
            self._metrics.register(path)
        if self.config.breakdown_metrics:
            self._metrics.register(
                "elasticapm.metrics.sets.breakdown.BreakdownMetricSet")
        if self.config.prometheus_metrics:
            self._metrics.register(
                "elasticapm.metrics.sets.prometheus.PrometheusMetrics")
        self._thread_managers["metrics"] = self._metrics
        compat.atexit_register(self.close)
        if self.config.central_config:
            self._thread_managers["config"] = self.config
        else:
            self._config_updater = None
        if self.config.use_elastic_excepthook:
            self.original_excepthook = sys.excepthook
            sys.excepthook = self._excepthook
        if config.enabled:
            self.start_threads()

        # Save this Client object as the global CLIENT_SINGLETON
        set_client(self)
コード例 #29
0
def test_metrics_registry(elasticapm_client):
    registry = MetricsRegistry(elasticapm_client)
    registry.register("tests.metrics.base_tests.DummyMetricSet")
    registry.collect()
    assert len(elasticapm_client.events[constants.METRICSET])