Пример #1
0
logger = logging.getLogger(__name__)

metrics = synapse.metrics.get_metrics_for(__name__)

incoming_requests_counter = metrics.register_counter(
    "requests",
    labels=["method", "servlet"],
)
outgoing_responses_counter = metrics.register_counter(
    "responses",
    labels=["method", "code"],
)

response_timer = metrics.register_distribution(
    "response_time",
    labels=["method", "servlet"]
)

_next_request_id = 0


def request_handler(request_handler):
    """Wraps a method that acts as a request handler with the necessary logging
    and exception handling.

    The method must have a signature of "handle_foo(self, request)". The
    argument "self" must have "version_string" and "clock" attributes. The
    argument "request" must be a twisted HTTP request.

    The method must return a deferred. If the deferred succeeds we assume that
    a response has been sent. If the deferred fails with a SynapseError we use
Пример #2
0
import synapse.metrics

from synapse.util.retryutils import get_retry_limiter, NotRetryingDestination

import itertools
import logging
import random


logger = logging.getLogger(__name__)


# synapse.federation.federation_client is a silly name
metrics = synapse.metrics.get_metrics_for("synapse.federation.client")

sent_pdus_destination_dist = metrics.register_distribution("sent_pdu_destinations")

sent_edus_counter = metrics.register_counter("sent_edus")

sent_queries_counter = metrics.register_counter("sent_queries", labels=["type"])


class FederationClient(FederationBase):

    def start_get_pdu_cache(self):
        self._get_pdu_cache = ExpiringCache(
            cache_name="get_pdu_cache",
            clock=self._clock,
            max_len=1000,
            expiry_ms=120*1000,
            reset_expiry_on_get=False,
Пример #3
0
import sys
import time
import threading

DEBUG_CACHES = False

logger = logging.getLogger(__name__)

sql_logger = logging.getLogger("synapse.storage.SQL")
transaction_logger = logging.getLogger("synapse.storage.txn")
perf_logger = logging.getLogger("synapse.storage.TIME")


metrics = synapse.metrics.get_metrics_for("synapse.storage")

sql_scheduling_timer = metrics.register_distribution("schedule_time")

sql_query_timer = metrics.register_distribution("query_time", labels=["verb"])
sql_txn_timer = metrics.register_distribution("transaction_time", labels=["desc"])

caches_by_name = {}
cache_counter = metrics.register_cache(
    "cache",
    lambda: {(name,): len(caches_by_name[name]) for name in caches_by_name.keys()},
    labels=["name"],
)


class Cache(object):

    def __init__(self, name, max_entries=1000, keylen=1, lru=False):
Пример #4
0
logger = logging.getLogger(__name__)

metrics = synapse.metrics.get_metrics_for(__name__)

incoming_requests_counter = metrics.register_counter(
    "requests",
    labels=["method", "servlet", "tag"],
)
outgoing_responses_counter = metrics.register_counter(
    "responses",
    labels=["method", "code"],
)

response_timer = metrics.register_distribution(
    "response_time",
    labels=["method", "servlet", "tag"]
)

response_ru_utime = metrics.register_distribution(
    "response_ru_utime", labels=["method", "servlet", "tag"]
)

response_ru_stime = metrics.register_distribution(
    "response_ru_stime", labels=["method", "servlet", "tag"]
)

response_db_txn_count = metrics.register_distribution(
    "response_db_txn_count", labels=["method", "servlet", "tag"]
)

response_db_txn_duration = metrics.register_distribution(
Пример #5
0
logger = logging.getLogger(__name__)

metrics = synapse.metrics.get_metrics_for(__name__)

incoming_requests_counter = metrics.register_counter(
    "requests",
    labels=["method", "servlet", "tag"],
)
outgoing_responses_counter = metrics.register_counter(
    "responses",
    labels=["method", "code"],
)

response_timer = metrics.register_distribution(
    "response_time",
    labels=["method", "servlet", "tag"]
)

response_ru_utime = metrics.register_distribution(
    "response_ru_utime", labels=["method", "servlet", "tag"]
)

response_ru_stime = metrics.register_distribution(
    "response_ru_stime", labels=["method", "servlet", "tag"]
)

response_db_txn_count = metrics.register_distribution(
    "response_db_txn_count", labels=["method", "servlet", "tag"]
)

response_db_txn_duration = metrics.register_distribution(
Пример #6
0
import sys
import time
import threading


logger = logging.getLogger(__name__)

sql_logger = logging.getLogger("synapse.storage.SQL")
transaction_logger = logging.getLogger("synapse.storage.txn")
perf_logger = logging.getLogger("synapse.storage.TIME")


metrics = synapse.metrics.get_metrics_for("synapse.storage")

sql_scheduling_timer = metrics.register_distribution("schedule_time")

sql_query_timer = metrics.register_distribution("query_time", labels=["verb"])
sql_txn_timer = metrics.register_distribution("transaction_time", labels=["desc"])


class LoggingTransaction(object):
    """An object that almost-transparently proxies for the 'txn' object
    passed to the constructor. Adds logging and metrics to the .execute()
    method."""
    __slots__ = [
        "txn", "name", "database_engine", "after_callbacks", "exception_callbacks",
    ]

    def __init__(self, txn, name, database_engine, after_callbacks,
                 exception_callbacks):
Пример #7
0
from synapse.events import FrozenEvent
import synapse.metrics

from synapse.util.retryutils import get_retry_limiter, NotRetryingDestination

import copy
import itertools
import logging
import random

logger = logging.getLogger(__name__)

# synapse.federation.federation_client is a silly name
metrics = synapse.metrics.get_metrics_for("synapse.federation.client")

sent_pdus_destination_dist = metrics.register_distribution(
    "sent_pdu_destinations")

sent_edus_counter = metrics.register_counter("sent_edus")

sent_queries_counter = metrics.register_counter("sent_queries",
                                                labels=["type"])


class FederationClient(FederationBase):
    def start_get_pdu_cache(self):
        self._get_pdu_cache = ExpiringCache(
            cache_name="get_pdu_cache",
            clock=self._clock,
            max_len=1000,
            expiry_ms=120 * 1000,
            reset_expiry_on_get=False,
Пример #8
0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from synapse.util.logcontext import LoggingContext
import synapse.metrics

import logging

logger = logging.getLogger(__name__)

metrics = synapse.metrics.get_metrics_for(__name__)

block_timer = metrics.register_distribution("block_timer",
                                            labels=["block_name"])

block_ru_utime = metrics.register_distribution("block_ru_utime",
                                               labels=["block_name"])

block_ru_stime = metrics.register_distribution("block_ru_stime",
                                               labels=["block_name"])

block_db_txn_count = metrics.register_distribution("block_db_txn_count",
                                                   labels=["block_name"])

block_db_txn_duration = metrics.register_distribution("block_db_txn_duration",
                                                      labels=["block_name"])


class Measure(object):
Пример #9
0
import urllib

logger = logging.getLogger(__name__)

metrics = synapse.metrics.get_metrics_for(__name__)

incoming_requests_counter = metrics.register_counter(
    "requests",
    labels=["method", "servlet"],
)
outgoing_responses_counter = metrics.register_counter(
    "responses",
    labels=["method", "code"],
)

response_timer = metrics.register_distribution("response_time",
                                               labels=["method", "servlet"])

_next_request_id = 0


def request_handler(request_handler):
    """Wraps a method that acts as a request handler with the necessary logging
    and exception handling.

    The method must have a signature of "handle_foo(self, request)". The
    argument "self" must have "version_string" and "clock" attributes. The
    argument "request" must be a twisted HTTP request.

    The method must return a deferred. If the deferred succeeds we assume that
    a response has been sent. If the deferred fails with a SynapseError we use
    it to send a JSON response with the appropriate HTTP reponse code. If the
Пример #10
0
# limitations under the License.


from synapse.util.logcontext import LoggingContext
import synapse.metrics

import logging


logger = logging.getLogger(__name__)


metrics = synapse.metrics.get_metrics_for(__name__)

block_timer = metrics.register_distribution(
    "block_timer",
    labels=["block_name"]
)

block_ru_utime = metrics.register_distribution(
    "block_ru_utime", labels=["block_name"]
)

block_ru_stime = metrics.register_distribution(
    "block_ru_stime", labels=["block_name"]
)

block_db_txn_count = metrics.register_distribution(
    "block_db_txn_count", labels=["block_name"]
)

block_db_txn_duration = metrics.register_distribution(