コード例 #1
0
ファイル: server.py プロジェクト: heavenlyhash/synapse
logger = logging.getLogger(__name__)

metrics = synapse.metrics.get_metrics_for(__name__)

incoming_requests_counter = metrics.register_counter(
    "requests",
    labels=["method", "servlet"],
)
outgoing_responses_counter = metrics.register_counter(
    "responses",
    labels=["method", "code"],
)

response_timer = metrics.register_distribution(
    "response_time",
    labels=["method", "servlet"]
)

_next_request_id = 0


def request_handler(request_handler):
    """Wraps a method that acts as a request handler with the necessary logging
    and exception handling.

    The method must have a signature of "handle_foo(self, request)". The
    argument "self" must have "version_string" and "clock" attributes. The
    argument "request" must be a twisted HTTP request.

    The method must return a deferred. If the deferred succeeds we assume that
    a response has been sent. If the deferred fails with a SynapseError we use
コード例 #2
0
ファイル: _base.py プロジェクト: heavenlyhash/synapse
import sys
import time
import threading

DEBUG_CACHES = False

logger = logging.getLogger(__name__)

sql_logger = logging.getLogger("synapse.storage.SQL")
transaction_logger = logging.getLogger("synapse.storage.txn")
perf_logger = logging.getLogger("synapse.storage.TIME")


metrics = synapse.metrics.get_metrics_for("synapse.storage")

sql_scheduling_timer = metrics.register_distribution("schedule_time")

sql_query_timer = metrics.register_distribution("query_time", labels=["verb"])
sql_txn_timer = metrics.register_distribution("transaction_time", labels=["desc"])

caches_by_name = {}
cache_counter = metrics.register_cache(
    "cache",
    lambda: {(name,): len(caches_by_name[name]) for name in caches_by_name.keys()},
    labels=["name"],
)


class Cache(object):

    def __init__(self, name, max_entries=1000, keylen=1, lru=False):
コード例 #3
0
ファイル: server.py プロジェクト: mebjas/synapse
logger = logging.getLogger(__name__)

metrics = synapse.metrics.get_metrics_for(__name__)

incoming_requests_counter = metrics.register_counter(
    "requests",
    labels=["method", "servlet", "tag"],
)
outgoing_responses_counter = metrics.register_counter(
    "responses",
    labels=["method", "code"],
)

response_timer = metrics.register_distribution(
    "response_time",
    labels=["method", "servlet", "tag"]
)

response_ru_utime = metrics.register_distribution(
    "response_ru_utime", labels=["method", "servlet", "tag"]
)

response_ru_stime = metrics.register_distribution(
    "response_ru_stime", labels=["method", "servlet", "tag"]
)

response_db_txn_count = metrics.register_distribution(
    "response_db_txn_count", labels=["method", "servlet", "tag"]
)

response_db_txn_duration = metrics.register_distribution(
コード例 #4
0
import synapse.metrics

from synapse.util.retryutils import get_retry_limiter, NotRetryingDestination

import itertools
import logging
import random


logger = logging.getLogger(__name__)


# synapse.federation.federation_client is a silly name
metrics = synapse.metrics.get_metrics_for("synapse.federation.client")

sent_pdus_destination_dist = metrics.register_distribution("sent_pdu_destinations")

sent_edus_counter = metrics.register_counter("sent_edus")

sent_queries_counter = metrics.register_counter("sent_queries", labels=["type"])


class FederationClient(FederationBase):

    def start_get_pdu_cache(self):
        self._get_pdu_cache = ExpiringCache(
            cache_name="get_pdu_cache",
            clock=self._clock,
            max_len=1000,
            expiry_ms=120*1000,
            reset_expiry_on_get=False,
コード例 #5
0
ファイル: metrics.py プロジェクト: 0-T-0/synapse
# limitations under the License.


from synapse.util.logcontext import LoggingContext
import synapse.metrics

import logging


logger = logging.getLogger(__name__)


metrics = synapse.metrics.get_metrics_for(__name__)

block_timer = metrics.register_distribution(
    "block_timer",
    labels=["block_name"]
)

block_ru_utime = metrics.register_distribution(
    "block_ru_utime", labels=["block_name"]
)

block_ru_stime = metrics.register_distribution(
    "block_ru_stime", labels=["block_name"]
)

block_db_txn_count = metrics.register_distribution(
    "block_db_txn_count", labels=["block_name"]
)

block_db_txn_duration = metrics.register_distribution(