Exemplo n.º 1
0
# limitations under the License.

from twisted.internet import defer

from synapse.util.logutils import log_function
from synapse.util. async import run_on_reactor, ObservableDeferred
from synapse.types import StreamToken
import synapse.metrics

import logging

logger = logging.getLogger(__name__)

metrics = synapse.metrics.get_metrics_for(__name__)

notified_events_counter = metrics.register_counter("notified_events")


# TODO(paul): Should be shared somewhere
def count(func, l):
    """Return the number of items in l for which func returns true."""
    n = 0
    for x in l:
        if func(x):
            n += 1
    return n


class _NotificationListener(object):
    """ This represents a single client connection to the events stream.
    The events stream handler will have yielded to the deferred, so to
Exemplo n.º 2
0
"""The server side of the replication stream.
"""

from twisted.internet import defer, reactor
from twisted.internet.protocol import Factory

from .streams import STREAMS_MAP, FederationStream
from .protocol import ServerReplicationStreamProtocol

from synapse.util.metrics import Measure, measure_func

import logging
import synapse.metrics

metrics = synapse.metrics.get_metrics_for(__name__)
stream_updates_counter = metrics.register_counter("stream_updates",
                                                  labels=["stream_name"])
user_sync_counter = metrics.register_counter("user_sync")
federation_ack_counter = metrics.register_counter("federation_ack")
remove_pusher_counter = metrics.register_counter("remove_pusher")
invalidate_cache_counter = metrics.register_counter("invalidate_cache")
user_ip_cache_counter = metrics.register_counter("user_ip_cache")

logger = logging.getLogger(__name__)


class ReplicationStreamProtocolFactory(Factory):
    """Factory for new replication connections.
    """
    def __init__(self, hs):
        self.streamer = ReplicationStreamer(hs)
        self.clock = hs.get_clock()
from synapse.util. async import ObservableDeferred
from synapse.util.logcontext import PreserveLoggingContext, preserve_fn
from synapse.util.metrics import Measure
from synapse.types import StreamToken
from synapse.visibility import filter_events_for_client
import synapse.metrics

from collections import namedtuple

import logging

logger = logging.getLogger(__name__)

metrics = synapse.metrics.get_metrics_for(__name__)

notified_events_counter = metrics.register_counter("notified_events")

users_woken_by_stream_counter = metrics.register_counter(
    "users_woken_by_stream", labels=["stream"])


# TODO(paul): Should be shared somewhere
def count(func, l):
    """Return the number of items in l for which func returns true."""
    n = 0
    for x in l:
        if func(x):
            n += 1
    return n

Exemplo n.º 4
0
from synapse.types import get_domain_from_id
from synapse.util import async
from synapse.util.caches.response_cache import ResponseCache
from synapse.util.logcontext import make_deferred_yieldable, preserve_fn
from synapse.util.logutils import log_function

# when processing incoming transactions, we try to handle multiple rooms in
# parallel, up to this limit.
TRANSACTION_CONCURRENCY_LIMIT = 10

logger = logging.getLogger(__name__)

# synapse.federation.federation_server is a silly name
metrics = synapse.metrics.get_metrics_for("synapse.federation.server")

received_pdus_counter = metrics.register_counter("received_pdus")

received_edus_counter = metrics.register_counter("received_edus")

received_queries_counter = metrics.register_counter("received_queries",
                                                    labels=["type"])


class FederationServer(FederationBase):
    def __init__(self, hs):
        super(FederationServer, self).__init__(hs)

        self.auth = hs.get_auth()
        self.handler = hs.get_handlers().federation_handler

        self._server_linearizer = async .Linearizer("fed_server")
Exemplo n.º 5
0
from synapse.util.retryutils import get_retry_limiter, NotRetryingDestination

import copy
import itertools
import logging
import random

logger = logging.getLogger(__name__)

# synapse.federation.federation_client is a silly name
metrics = synapse.metrics.get_metrics_for("synapse.federation.client")

sent_pdus_destination_dist = metrics.register_distribution(
    "sent_pdu_destinations")

sent_edus_counter = metrics.register_counter("sent_edus")

sent_queries_counter = metrics.register_counter("sent_queries",
                                                labels=["type"])


class FederationClient(FederationBase):
    def __init__(self, hs):
        super(FederationClient, self).__init__(hs)

    def start_get_pdu_cache(self):
        self._get_pdu_cache = ExpiringCache(
            cache_name="get_pdu_cache",
            clock=self._clock,
            max_len=1000,
            expiry_ms=120 * 1000,
Exemplo n.º 6
0
    SyncCommand,
)
from streams import STREAMS_MAP

from synapse.util.stringutils import random_string
from synapse.metrics.metric import CounterMetric

import logging
import synapse.metrics
import struct
import fcntl

metrics = synapse.metrics.get_metrics_for(__name__)

connection_close_counter = metrics.register_counter(
    "close_reason",
    labels=["reason_type"],
)

# A list of all connected protocols. This allows us to send metrics about the
# connections.
connected_connections = []

logger = logging.getLogger(__name__)

PING_TIME = 5000
PING_TIMEOUT_MULTIPLIER = 5
PING_TIMEOUT_MS = PING_TIME * PING_TIMEOUT_MULTIPLIER


class ConnectionStates(object):
    CONNECTING = "connecting"
Exemplo n.º 7
0
from synapse.util.retryutils import get_retry_limiter, NotRetryingDestination

import itertools
import logging
import random


logger = logging.getLogger(__name__)


# synapse.federation.federation_client is a silly name
metrics = synapse.metrics.get_metrics_for("synapse.federation.client")

sent_pdus_destination_dist = metrics.register_distribution("sent_pdu_destinations")

sent_edus_counter = metrics.register_counter("sent_edus")

sent_queries_counter = metrics.register_counter("sent_queries", labels=["type"])


class FederationClient(FederationBase):

    def start_get_pdu_cache(self):
        self._get_pdu_cache = ExpiringCache(
            cache_name="get_pdu_cache",
            clock=self._clock,
            max_len=1000,
            expiry_ms=120*1000,
            reset_expiry_on_get=False,
        )
Exemplo n.º 8
0
import logging
import urllib
import simplejson

logger = logging.getLogger(__name__)

metrics = synapse.metrics.get_metrics_for(__name__)

# total number of responses served, split by method/servlet/tag
response_count = metrics.register_counter(
    "response_count",
    labels=["method", "servlet", "tag"],
    alternative_names=(
        # the following are all deprecated aliases for the same metric
        metrics.name_prefix + x for x in (
            "_requests",
            "_response_time:count",
            "_response_ru_utime:count",
            "_response_ru_stime:count",
            "_response_db_txn_count:count",
            "_response_db_txn_duration:count",
        )))

requests_counter = metrics.register_counter(
    "requests_received",
    labels=[
        "method",
        "servlet",
    ],
)
Exemplo n.º 9
0
from synapse.storage.presence import UserPresenceState

from synapse.util.logcontext import preserve_fn
from synapse.util.logutils import log_function
from synapse.util.metrics import Measure
from synapse.util.wheel_timer import WheelTimer
from synapse.types import UserID, get_domain_from_id
import synapse.metrics

import logging

logger = logging.getLogger(__name__)

metrics = synapse.metrics.get_metrics_for(__name__)

notified_presence_counter = metrics.register_counter("notified_presence")
federation_presence_out_counter = metrics.register_counter(
    "federation_presence_out")
presence_updates_counter = metrics.register_counter("presence_updates")
timers_fired_counter = metrics.register_counter("timers_fired")
federation_presence_counter = metrics.register_counter("federation_presence")
bump_active_time_counter = metrics.register_counter("bump_active_time")

get_updates_counter = metrics.register_counter("get_updates", labels=["type"])

notify_reason_counter = metrics.register_counter("notify_reason",
                                                 labels=["reason"])
state_transition_counter = metrics.register_counter("state_transition",
                                                    labels=["from", "to"])

# If a user was last active in the last LAST_ACTIVE_GRANULARITY, consider them
Exemplo n.º 10
0
from canonicaljson import encode_canonical_json
from collections import deque, namedtuple

import synapse
import synapse.metrics


import logging
import math
import ujson as json

logger = logging.getLogger(__name__)


metrics = synapse.metrics.get_metrics_for(__name__)
persist_event_counter = metrics.register_counter("persisted_events")


def encode_json(json_object):
    if USE_FROZEN_DICTS:
        # ujson doesn't like frozen_dicts
        return encode_canonical_json(json_object)
    else:
        return json.dumps(json_object, ensure_ascii=False)

# These values are used in the `enqueus_event` and `_do_fetch` methods to
# control how we batch/bulk fetch events from the database.
# The values are plucked out of thing air to make initial sync run faster
# on jki.re
# TODO: Make these configurable.
EVENT_QUEUE_THREADS = 3  # Max number of threads that will fetch events
Exemplo n.º 11
0
from twisted.internet import defer, reactor
from twisted.internet.protocol import Factory

from .streams import STREAMS_MAP, FederationStream
from .protocol import ServerReplicationStreamProtocol

from synapse.util.metrics import Measure, measure_func

import logging
import synapse.metrics


metrics = synapse.metrics.get_metrics_for(__name__)
stream_updates_counter = metrics.register_counter(
    "stream_updates", labels=["stream_name"]
)
user_sync_counter = metrics.register_counter("user_sync")
federation_ack_counter = metrics.register_counter("federation_ack")
remove_pusher_counter = metrics.register_counter("remove_pusher")
invalidate_cache_counter = metrics.register_counter("invalidate_cache")
user_ip_cache_counter = metrics.register_counter("user_ip_cache")

logger = logging.getLogger(__name__)


class ReplicationStreamProtocolFactory(Factory):
    """Factory for new replication connections.
    """
    def __init__(self, hs):
        self.streamer = ReplicationStreamer(hs)
Exemplo n.º 12
0
from synapse.util.logutils import log_function
from synapse.util.metrics import Measure
from synapse.util.wheel_timer import WheelTimer
from synapse.types import UserID
import synapse.metrics

from ._base import BaseHandler

import logging


logger = logging.getLogger(__name__)

metrics = synapse.metrics.get_metrics_for(__name__)

notified_presence_counter = metrics.register_counter("notified_presence")
federation_presence_out_counter = metrics.register_counter("federation_presence_out")
presence_updates_counter = metrics.register_counter("presence_updates")
timers_fired_counter = metrics.register_counter("timers_fired")
federation_presence_counter = metrics.register_counter("federation_presence")
bump_active_time_counter = metrics.register_counter("bump_active_time")


# If a user was last active in the last LAST_ACTIVE_GRANULARITY, consider them
# "currently_active"
LAST_ACTIVE_GRANULARITY = 60 * 1000

# How long to wait until a new /events or /sync request before assuming
# the client has gone.
SYNC_ONLINE_TIMEOUT = 30 * 1000
Exemplo n.º 13
0
import logging


logger = logging.getLogger(__name__)


metrics = synapse.metrics.get_metrics_for(__name__)

# total number of times we have hit this block
block_counter = metrics.register_counter(
    "block_count",
    labels=["block_name"],
    alternative_names=(
        # the following are all deprecated aliases for the same metric
        metrics.name_prefix + x for x in (
            "_block_timer:count",
            "_block_ru_utime:count",
            "_block_ru_stime:count",
            "_block_db_txn_count:count",
            "_block_db_txn_duration:count",
        )
    )
)

block_timer = metrics.register_counter(
    "block_time_seconds",
    labels=["block_name"],
    alternative_names=(
        metrics.name_prefix + "_block_timer:total",
    ),
)
Exemplo n.º 14
0
from synapse.util.logcontext import PreserveLoggingContext, run_in_background
from synapse.util.metrics import Measure
from synapse.types import StreamToken
from synapse.visibility import filter_events_for_client
import synapse.metrics

from collections import namedtuple

import logging


logger = logging.getLogger(__name__)

metrics = synapse.metrics.get_metrics_for(__name__)

notified_events_counter = metrics.register_counter("notified_events")

users_woken_by_stream_counter = metrics.register_counter(
    "users_woken_by_stream", labels=["stream"]
)


# TODO(paul): Should be shared somewhere
def count(func, l):
    """Return the number of items in l for which func returns true."""
    n = 0
    for x in l:
        if func(x):
            n += 1
    return n
Exemplo n.º 15
0
from synapse.events import FrozenEvent
import synapse.metrics

from synapse.api.errors import FederationError, SynapseError

from synapse.crypto.event_signing import compute_event_signature

import logging


logger = logging.getLogger(__name__)

# synapse.federation.federation_server is a silly name
metrics = synapse.metrics.get_metrics_for("synapse.federation.server")

received_pdus_counter = metrics.register_counter("received_pdus")

received_edus_counter = metrics.register_counter("received_edus")

received_queries_counter = metrics.register_counter("received_queries", labels=["type"])


class FederationServer(FederationBase):
    def set_handler(self, handler):
        """Sets the handler that the replication layer will use to communicate
        receipt of new PDUs from other home servers. The required methods are
        documented on :py:class:`.ReplicationHandler`.
        """
        self.handler = handler

    def register_edu_handler(self, edu_type, handler):
Exemplo n.º 16
0
from synapse.util.async import ObservableDeferred
from synapse.util.logcontext import PreserveLoggingContext
from synapse.types import StreamToken
from synapse.visibility import filter_events_for_client
import synapse.metrics

from collections import namedtuple

import logging


logger = logging.getLogger(__name__)

metrics = synapse.metrics.get_metrics_for(__name__)

notified_events_counter = metrics.register_counter("notified_events")


# TODO(paul): Should be shared somewhere
def count(func, l):
    """Return the number of items in l for which func returns true."""
    n = 0
    for x in l:
        if func(x):
            n += 1
    return n


class _NotificationListener(object):
    """ This represents a single client connection to the events stream.
    The events stream handler will have yielded to the deferred, so to
Exemplo n.º 17
0
from synapse.api.constants import EventTypes

from canonicaljson import encode_canonical_json
from collections import deque, namedtuple

import synapse
import synapse.metrics

import logging
import math
import ujson as json

logger = logging.getLogger(__name__)

metrics = synapse.metrics.get_metrics_for(__name__)
persist_event_counter = metrics.register_counter("persisted_events")


def encode_json(json_object):
    if USE_FROZEN_DICTS:
        # ujson doesn't like frozen_dicts
        return encode_canonical_json(json_object)
    else:
        return json.dumps(json_object, ensure_ascii=False)


# These values are used in the `enqueus_event` and `_do_fetch` methods to
# control how we batch/bulk fetch events from the database.
# The values are plucked out of thing air to make initial sync run faster
# on jki.re
# TODO: Make these configurable.
Exemplo n.º 18
0
    SyncCommand,
)
from .streams import STREAMS_MAP

from synapse.util.stringutils import random_string
from synapse.metrics.metric import CounterMetric

import logging
import synapse.metrics
import struct
import fcntl

metrics = synapse.metrics.get_metrics_for(__name__)

connection_close_counter = metrics.register_counter(
    "close_reason",
    labels=["reason_type"],
)

# A list of all connected protocols. This allows us to send metrics about the
# connections.
connected_connections = []

logger = logging.getLogger(__name__)

PING_TIME = 5000
PING_TIMEOUT_MULTIPLIER = 5
PING_TIMEOUT_MS = PING_TIME * PING_TIMEOUT_MULTIPLIER


class ConnectionStates(object):
    CONNECTING = "connecting"
Exemplo n.º 19
0
)
from .streams import STREAMS_MAP

from synapse.util.stringutils import random_string
from synapse.metrics.metric import CounterMetric

import logging
import synapse.metrics
import struct
import fcntl


metrics = synapse.metrics.get_metrics_for(__name__)

connection_close_counter = metrics.register_counter(
    "close_reason", labels=["reason_type"],
)


# A list of all connected protocols. This allows us to send metrics about the
# connections.
connected_connections = []


logger = logging.getLogger(__name__)


PING_TIME = 5000
PING_TIMEOUT_MULTIPLIER = 5
PING_TIMEOUT_MS = PING_TIME * PING_TIMEOUT_MULTIPLIER
Exemplo n.º 20
0
from twisted.web.http_headers import Headers
from twisted.web._newclient import ResponseDone

from StringIO import StringIO

import simplejson as json
import logging
import urllib


logger = logging.getLogger(__name__)

metrics = synapse.metrics.get_metrics_for(__name__)

outgoing_requests_counter = metrics.register_counter(
    "requests",
    labels=["method"],
)
incoming_responses_counter = metrics.register_counter(
    "responses",
    labels=["method", "code"],
)


class SimpleHttpClient(object):
    """
    A simple, no-frills HTTP client with methods that wrap up common ways of
    using HTTP in Matrix
    """
    def __init__(self, hs):
        self.hs = hs
        # The default context factory in Twisted 14.0.0 (which we require) is
Exemplo n.º 21
0
import synapse.metrics
from synapse.util.logcontext import LoggingContext

logger = logging.getLogger(__name__)

metrics = synapse.metrics.get_metrics_for("synapse.http.server")

# total number of responses served, split by method/servlet/tag
response_count = metrics.register_counter(
    "response_count",
    labels=["method", "servlet", "tag"],
    alternative_names=(
        # the following are all deprecated aliases for the same metric
        metrics.name_prefix + x for x in (
            "_requests",
            "_response_time:count",
            "_response_ru_utime:count",
            "_response_ru_stime:count",
            "_response_db_txn_count:count",
            "_response_db_txn_duration:count",
        )
    )
)

requests_counter = metrics.register_counter(
    "requests_received",
    labels=["method", "servlet", ],
)

outgoing_responses_counter = metrics.register_counter(
    "responses",