def __init__(self, hs): self.store = hs.get_datastore() self.presence_handler = hs.get_presence_handler() self.clock = hs.get_clock() self.notifier = hs.get_notifier() # Current connections. self.connections = [] metrics.register_callback("total_connections", lambda: len(self.connections)) # List of streams that clients can subscribe to. # We only support federation stream if federation sending hase been # disabled on the master. self.streams = [ stream(hs) for stream in STREAMS_MAP.itervalues() if stream != FederationStream or not hs.config.send_federation ] self.streams_by_name = {stream.NAME: stream for stream in self.streams} metrics.register_callback( "connections_per_stream", lambda: { (stream_name,): len([ conn for conn in self.connections if stream_name in conn.replication_streams ]) for stream_name in self.streams_by_name }, labels=["stream_name"], ) self.federation_sender = None if not hs.config.send_federation: self.federation_sender = hs.get_federation_sender() self.notifier.add_replication_callback(self.on_notifier_poke) # Keeps track of whether we are currently checking for updates self.is_looping = False self.pending_updates = False reactor.addSystemEventTrigger("before", "shutdown", self.on_shutdown)
def __init__(self, hs): self.hs = hs self.user_to_user_stream = {} self.room_to_user_streams = {} self.appservice_to_user_streams = {} self.event_sources = hs.get_event_sources() self.store = hs.get_datastore() self.pending_new_room_events = [] self.clock = hs.get_clock() hs.get_distributor().observe( "user_joined_room", self._user_joined_room ) self.clock.looping_call( self.remove_expired_streams, self.UNUSED_STREAM_EXPIRY_MS ) self.replication_deferred = ObservableDeferred(defer.Deferred()) # This is not a very cheap test to perform, but it's only executed # when rendering the metrics page, which is likely once per minute at # most when scraping it. def count_listeners(): all_user_streams = set() for x in self.room_to_user_streams.values(): all_user_streams |= x for x in self.user_to_user_stream.values(): all_user_streams.add(x) for x in self.appservice_to_user_streams.values(): all_user_streams |= x return sum(stream.count_listeners() for stream in all_user_streams) metrics.register_callback("listeners", count_listeners) metrics.register_callback( "rooms", lambda: count(bool, self.room_to_user_streams.values()), ) metrics.register_callback( "users", lambda: len(self.user_to_user_stream), ) metrics.register_callback( "appservices", lambda: count(bool, self.appservice_to_user_streams.values()), )
def __init__(self, hs, transport_layer): self.server_name = hs.hostname self.store = hs.get_datastore() self.transaction_actions = TransactionActions(self.store) self.transport_layer = transport_layer self._clock = hs.get_clock() # Is a mapping from destinations -> deferreds. Used to keep track # of which destinations have transactions in flight and when they are # done self.pending_transactions = {} metrics.register_callback("pending_destinations", lambda: len(self.pending_transactions)) # Is a mapping from destination -> list of # tuple(pending pdus, deferred, order) self.pending_pdus_by_dest = pdus = {} # destination -> list of tuple(edu, deferred) self.pending_edus_by_dest = edus = {} metrics.register_callback("pending_pdus", lambda: sum(map(len, pdus.values()))) metrics.register_callback("pending_edus", lambda: sum(map(len, edus.values()))) # destination -> list of tuple(failure, deferred) self.pending_failures_by_dest = {} # HACK to get unique tx id self._next_txn_id = int(self._clock.time_msec())
def __init__(self, hs): self.hs = hs self.user_to_user_stream = {} self.room_to_user_streams = {} self.appservice_to_user_streams = {} self.event_sources = hs.get_event_sources() self.store = hs.get_datastore() self.pending_new_room_events = [] self.clock = hs.get_clock() hs.get_distributor().observe("user_joined_room", self._user_joined_room) self.clock.looping_call(self.remove_expired_streams, self.UNUSED_STREAM_EXPIRY_MS) self.replication_deferred = ObservableDeferred(defer.Deferred()) # This is not a very cheap test to perform, but it's only executed # when rendering the metrics page, which is likely once per minute at # most when scraping it. def count_listeners(): all_user_streams = set() for x in self.room_to_user_streams.values(): all_user_streams |= x for x in self.user_to_user_stream.values(): all_user_streams.add(x) for x in self.appservice_to_user_streams.values(): all_user_streams |= x return sum(stream.count_listeners() for stream in all_user_streams) metrics.register_callback("listeners", count_listeners) metrics.register_callback( "rooms", lambda: count(bool, self.room_to_user_streams.values()), ) metrics.register_callback( "users", lambda: len(self.user_to_user_stream), ) metrics.register_callback( "appservices", lambda: count(bool, self.appservice_to_user_streams.values()), )
def __init__(self, hs): self.server_name = hs.hostname self.store = hs.get_datastore() self.state = hs.get_state_handler() self.transaction_actions = TransactionActions(self.store) self.transport_layer = hs.get_federation_transport_client() self.clock = hs.get_clock() self.is_mine_id = hs.is_mine_id # Is a mapping from destinations -> deferreds. Used to keep track # of which destinations have transactions in flight and when they are # done self.pending_transactions = {} metrics.register_callback( "pending_destinations", lambda: len(self.pending_transactions), ) # Is a mapping from destination -> list of # tuple(pending pdus, deferred, order) self.pending_pdus_by_dest = pdus = {} # destination -> list of tuple(edu, deferred) self.pending_edus_by_dest = edus = {} # Presence needs to be separate as we send single aggragate EDUs self.pending_presence_by_dest = presence = {} self.pending_edus_keyed_by_dest = edus_keyed = {} metrics.register_callback( "pending_pdus", lambda: sum(map(len, pdus.values())), ) metrics.register_callback( "pending_edus", lambda: ( sum(map(len, edus.values())) + sum(map(len, presence.values())) + sum(map(len, edus_keyed.values())) ), ) # destination -> list of tuple(failure, deferred) self.pending_failures_by_dest = {} self.last_device_stream_id_by_dest = {} self.last_device_list_stream_id_by_dest = {} # HACK to get unique tx id self._next_txn_id = int(self.clock.time_msec()) self._order = 1 self._is_processing = False self._last_poked_id = -1
def __init__(self, hs): self.hs = hs self.room_to_listeners = {} self.user_to_listeners = {} self.appservice_to_listeners = {} self.event_sources = hs.get_event_sources() self.clock = hs.get_clock() hs.get_distributor().observe("user_joined_room", self._user_joined_room) # This is not a very cheap test to perform, but it's only executed # when rendering the metrics page, which is likely once per minute at # most when scraping it. def count_listeners(): all_listeners = set() for x in self.room_to_listeners.values(): all_listeners |= x for x in self.user_to_listeners.values(): all_listeners |= x for x in self.appservice_to_listeners.values(): all_listeners |= x return len(all_listeners) metrics.register_callback("listeners", count_listeners) metrics.register_callback( "rooms", lambda: count(bool, self.room_to_listeners.values()), ) metrics.register_callback( "users", lambda: count(bool, self.user_to_listeners.values()), ) metrics.register_callback( "appservices", lambda: count(bool, self.appservice_to_listeners.values()), )
def __init__(self, hs): self.hs = hs self.room_to_listeners = {} self.user_to_listeners = {} self.appservice_to_listeners = {} self.event_sources = hs.get_event_sources() self.clock = hs.get_clock() hs.get_distributor().observe( "user_joined_room", self._user_joined_room ) # This is not a very cheap test to perform, but it's only executed # when rendering the metrics page, which is likely once per minute at # most when scraping it. def count_listeners(): all_listeners = set() for x in self.room_to_listeners.values(): all_listeners |= x for x in self.user_to_listeners.values(): all_listeners |= x for x in self.appservice_to_listeners.values(): all_listeners |= x return len(all_listeners) metrics.register_callback("listeners", count_listeners) metrics.register_callback( "rooms", lambda: count(bool, self.room_to_listeners.values()), ) metrics.register_callback( "users", lambda: count(bool, self.user_to_listeners.values()), ) metrics.register_callback( "appservices", lambda: count(bool, self.appservice_to_listeners.values()), )
def __init__(self, hs, transport_layer): self.server_name = hs.hostname self.store = hs.get_datastore() self.transaction_actions = TransactionActions(self.store) self.transport_layer = transport_layer self._clock = hs.get_clock() # Is a mapping from destinations -> deferreds. Used to keep track # of which destinations have transactions in flight and when they are # done self.pending_transactions = {} metrics.register_callback( "pending_destinations", lambda: len(self.pending_transactions), ) # Is a mapping from destination -> list of # tuple(pending pdus, deferred, order) self.pending_pdus_by_dest = pdus = {} # destination -> list of tuple(edu, deferred) self.pending_edus_by_dest = edus = {} metrics.register_callback( "pending_pdus", lambda: sum(map(len, pdus.values())), ) metrics.register_callback( "pending_edus", lambda: sum(map(len, edus.values())), ) # destination -> list of tuple(failure, deferred) self.pending_failures_by_dest = {} # HACK to get unique tx id self._next_txn_id = int(self._clock.time_msec())
def __init__(self, hs): self.is_mine = hs.is_mine self.is_mine_id = hs.is_mine_id self.clock = hs.get_clock() self.store = hs.get_datastore() self.wheel_timer = WheelTimer() self.notifier = hs.get_notifier() self.replication = hs.get_replication_layer() self.federation = hs.get_federation_sender() self.state = hs.get_state_handler() self.replication.register_edu_handler("m.presence", self.incoming_presence) self.replication.register_edu_handler( "m.presence_invite", lambda origin, content: self.invite_presence( observed_user=UserID.from_string(content["observed_user"]), observer_user=UserID.from_string(content["observer_user"]), )) self.replication.register_edu_handler( "m.presence_accept", lambda origin, content: self.accept_presence( observed_user=UserID.from_string(content["observed_user"]), observer_user=UserID.from_string(content["observer_user"]), )) self.replication.register_edu_handler( "m.presence_deny", lambda origin, content: self.deny_presence( observed_user=UserID.from_string(content["observed_user"]), observer_user=UserID.from_string(content["observer_user"]), )) distributor = hs.get_distributor() distributor.observe("user_joined_room", self.user_joined_room) active_presence = self.store.take_presence_startup_info() # A dictionary of the current state of users. This is prefilled with # non-offline presence from the DB. We should fetch from the DB if # we can't find a users presence in here. self.user_to_current_state = { state.user_id: state for state in active_presence } metrics.register_callback("user_to_current_state_size", lambda: len(self.user_to_current_state)) now = self.clock.time_msec() for state in active_presence: self.wheel_timer.insert( now=now, obj=state.user_id, then=state.last_active_ts + IDLE_TIMER, ) self.wheel_timer.insert( now=now, obj=state.user_id, then=state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT, ) if self.is_mine_id(state.user_id): self.wheel_timer.insert( now=now, obj=state.user_id, then=state.last_federation_update_ts + FEDERATION_PING_INTERVAL, ) else: self.wheel_timer.insert( now=now, obj=state.user_id, then=state.last_federation_update_ts + FEDERATION_TIMEOUT, ) # Set of users who have presence in the `user_to_current_state` that # have not yet been persisted self.unpersisted_users_changes = set() reactor.addSystemEventTrigger("before", "shutdown", self._on_shutdown) self.serial_to_user = {} self._next_serial = 1 # Keeps track of the number of *ongoing* syncs on this process. While # this is non zero a user will never go offline. self.user_to_num_current_syncs = {} # Keeps track of the number of *ongoing* syncs on other processes. # While any sync is ongoing on another process the user will never # go offline. # Each process has a unique identifier and an update frequency. If # no update is received from that process within the update period then # we assume that all the sync requests on that process have stopped. # Stored as a dict from process_id to set of user_id, and a dict of # process_id to millisecond timestamp last updated. self.external_process_to_current_syncs = {} self.external_process_last_updated_ms = {} # Start a LoopingCall in 30s that fires every 5s. # The initial delay is to allow disconnected clients a chance to # reconnect before we treat them as offline. self.clock.call_later( 30, self.clock.looping_call, self._handle_timeouts, 5000, ) self.clock.call_later( 60, self.clock.looping_call, self._persist_unpersisted_changes, 60 * 1000, ) metrics.register_callback("wheel_timer_size", lambda: len(self.wheel_timer))
def register(name, queue): metrics.register_callback( queue_name + "_size", lambda: len(queue), )
# Map from (method, name) -> int, the number of in flight requests of that # type counts = {} for rm in _in_flight_requests: key = ( rm.method, rm.name, ) counts[key] = counts.get(key, 0) + 1 return counts metrics.register_callback("in_flight_requests_count", _get_in_flight_counts, labels=["method", "servlet"]) class RequestMetrics(object): def start(self, time_msec, name, method): self.start = time_msec self.start_context = LoggingContext.current_context() self.name = name self.method = method self._request_stats = _RequestStats.from_context(self.start_context) _in_flight_requests.add(self) def stop(self, time_msec, request):
def __init__(self, hs): super(PresenceHandler, self).__init__(hs) self.homeserver = hs self.clock = hs.get_clock() distributor = hs.get_distributor() distributor.observe("registered_user", self.registered_user) distributor.observe( "started_user_eventstream", self.started_user_eventstream ) distributor.observe( "stopped_user_eventstream", self.stopped_user_eventstream ) distributor.observe("user_joined_room", self.user_joined_room) distributor.declare("collect_presencelike_data") distributor.declare("changed_presencelike_data") distributor.observe( "changed_presencelike_data", self.changed_presencelike_data ) # outbound signal from the presence module to advertise when a user's # presence has changed distributor.declare("user_presence_changed") self.distributor = distributor self.federation = hs.get_replication_layer() self.federation.register_edu_handler( "m.presence", self.incoming_presence ) self.federation.register_edu_handler( "m.presence_invite", lambda origin, content: self.invite_presence( observed_user=UserID.from_string(content["observed_user"]), observer_user=UserID.from_string(content["observer_user"]), ) ) self.federation.register_edu_handler( "m.presence_accept", lambda origin, content: self.accept_presence( observed_user=UserID.from_string(content["observed_user"]), observer_user=UserID.from_string(content["observer_user"]), ) ) self.federation.register_edu_handler( "m.presence_deny", lambda origin, content: self.deny_presence( observed_user=UserID.from_string(content["observed_user"]), observer_user=UserID.from_string(content["observer_user"]), ) ) # IN-MEMORY store, mapping local userparts to sets of local users to # be informed of state changes. self._local_pushmap = {} # map local users to sets of remote /domain names/ who are interested # in them self._remote_sendmap = {} # map remote users to sets of local users who're interested in them self._remote_recvmap = {} # list of (serial, set of(userids)) tuples, ordered by serial, latest # first self._remote_offline_serials = [] # map any user to a UserPresenceCache self._user_cachemap = {} self._user_cachemap_latest_serial = 0 # map room_ids to the latest presence serial for a member of that # room self._room_serials = {} metrics.register_callback( "userCachemap:size", lambda: len(self._user_cachemap), )
def __init__(self, hs): super(PresenceHandler, self).__init__(hs) self.hs = hs self.clock = hs.get_clock() self.store = hs.get_datastore() self.wheel_timer = WheelTimer() self.notifier = hs.get_notifier() self.federation = hs.get_replication_layer() self.federation.register_edu_handler( "m.presence", self.incoming_presence ) self.federation.register_edu_handler( "m.presence_invite", lambda origin, content: self.invite_presence( observed_user=UserID.from_string(content["observed_user"]), observer_user=UserID.from_string(content["observer_user"]), ) ) self.federation.register_edu_handler( "m.presence_accept", lambda origin, content: self.accept_presence( observed_user=UserID.from_string(content["observed_user"]), observer_user=UserID.from_string(content["observer_user"]), ) ) self.federation.register_edu_handler( "m.presence_deny", lambda origin, content: self.deny_presence( observed_user=UserID.from_string(content["observed_user"]), observer_user=UserID.from_string(content["observer_user"]), ) ) distributor = hs.get_distributor() distributor.observe("user_joined_room", self.user_joined_room) active_presence = self.store.take_presence_startup_info() # A dictionary of the current state of users. This is prefilled with # non-offline presence from the DB. We should fetch from the DB if # we can't find a users presence in here. self.user_to_current_state = { state.user_id: state for state in active_presence } metrics.register_callback( "user_to_current_state_size", lambda: len(self.user_to_current_state) ) now = self.clock.time_msec() for state in active_presence: self.wheel_timer.insert( now=now, obj=state.user_id, then=state.last_active_ts + IDLE_TIMER, ) self.wheel_timer.insert( now=now, obj=state.user_id, then=state.last_user_sync_ts + SYNC_ONLINE_TIMEOUT, ) if self.hs.is_mine_id(state.user_id): self.wheel_timer.insert( now=now, obj=state.user_id, then=state.last_federation_update_ts + FEDERATION_PING_INTERVAL, ) else: self.wheel_timer.insert( now=now, obj=state.user_id, then=state.last_federation_update_ts + FEDERATION_TIMEOUT, ) # Set of users who have presence in the `user_to_current_state` that # have not yet been persisted self.unpersisted_users_changes = set() reactor.addSystemEventTrigger("before", "shutdown", self._on_shutdown) self.serial_to_user = {} self._next_serial = 1 # Keeps track of the number of *ongoing* syncs. While this is non zero # a user will never go offline. self.user_to_num_current_syncs = {} # Start a LoopingCall in 30s that fires every 5s. # The initial delay is to allow disconnected clients a chance to # reconnect before we treat them as offline. self.clock.call_later( 0 * 1000, self.clock.looping_call, self._handle_timeouts, 5000, ) metrics.register_callback("wheel_timer_size", lambda: len(self.wheel_timer))
self.id(), stream_name, token ) self.send_command(ReplicateCommand(stream_name, token)) def on_connection_closed(self): BaseReplicationStreamProtocol.on_connection_closed(self) self.handler.update_connection(None) # The following simply registers metrics for the replication connections metrics.register_callback( "pending_commands", lambda: { (p.name, p.conn_id): len(p.pending_commands) for p in connected_connections }, labels=["name", "conn_id"], ) def transport_buffer_size(protocol): if protocol.transport: size = len(protocol.transport.dataBuffer) + protocol.transport._tempDataLen return size return 0 metrics.register_callback( "transport_send_buffer", lambda: {
def __init__(self, hs): self.server_name = hs.hostname self.store = hs.get_datastore() self.state = hs.get_state_handler() self.transaction_actions = TransactionActions(self.store) self.transport_layer = hs.get_federation_transport_client() self.clock = hs.get_clock() self.is_mine_id = hs.is_mine_id # Is a mapping from destinations -> deferreds. Used to keep track # of which destinations have transactions in flight and when they are # done self.pending_transactions = {} metrics.register_callback( "pending_destinations", lambda: len(self.pending_transactions), ) # Is a mapping from destination -> list of # tuple(pending pdus, deferred, order) self.pending_pdus_by_dest = pdus = {} # destination -> list of tuple(edu, deferred) self.pending_edus_by_dest = edus = {} # Map of user_id -> UserPresenceState for all the pending presence # to be sent out by user_id. Entries here get processed and put in # pending_presence_by_dest self.pending_presence = {} # Map of destination -> user_id -> UserPresenceState of pending presence # to be sent to each destinations self.pending_presence_by_dest = presence = {} # Pending EDUs by their "key". Keyed EDUs are EDUs that get clobbered # based on their key (e.g. typing events by room_id) # Map of destination -> (edu_type, key) -> Edu self.pending_edus_keyed_by_dest = edus_keyed = {} metrics.register_callback( "pending_pdus", lambda: sum(map(len, pdus.values())), ) metrics.register_callback( "pending_edus", lambda: (sum(map(len, edus.values())) + sum(map(len, presence.values())) + sum(map(len, edus_keyed.values()))), ) # destination -> list of tuple(failure, deferred) self.pending_failures_by_dest = {} # destination -> stream_id of last successfully sent to-device message. # NB: may be a long or an int. self.last_device_stream_id_by_dest = {} # destination -> stream_id of last successfully sent device list # update. self.last_device_list_stream_id_by_dest = {} # HACK to get unique tx id self._next_txn_id = int(self.clock.time_msec()) self._order = 1 self._is_processing = False self._last_poked_id = -1 self._processing_pending_presence = False
def __init__(self, hs): super(PresenceHandler, self).__init__(hs) self.homeserver = hs self.clock = hs.get_clock() distributor = hs.get_distributor() distributor.observe("registered_user", self.registered_user) distributor.observe("started_user_eventstream", self.started_user_eventstream) distributor.observe("stopped_user_eventstream", self.stopped_user_eventstream) distributor.observe("user_joined_room", self.user_joined_room) distributor.declare("collect_presencelike_data") distributor.declare("changed_presencelike_data") distributor.observe("changed_presencelike_data", self.changed_presencelike_data) # outbound signal from the presence module to advertise when a user's # presence has changed distributor.declare("user_presence_changed") self.distributor = distributor self.federation = hs.get_replication_layer() self.federation.register_edu_handler("m.presence", self.incoming_presence) self.federation.register_edu_handler( "m.presence_invite", lambda origin, content: self.invite_presence( observed_user=UserID.from_string(content["observed_user"]), observer_user=UserID.from_string(content["observer_user"]), )) self.federation.register_edu_handler( "m.presence_accept", lambda origin, content: self.accept_presence( observed_user=UserID.from_string(content["observed_user"]), observer_user=UserID.from_string(content["observer_user"]), )) self.federation.register_edu_handler( "m.presence_deny", lambda origin, content: self.deny_presence( observed_user=UserID.from_string(content["observed_user"]), observer_user=UserID.from_string(content["observer_user"]), )) # IN-MEMORY store, mapping local userparts to sets of local users to # be informed of state changes. self._local_pushmap = {} # map local users to sets of remote /domain names/ who are interested # in them self._remote_sendmap = {} # map remote users to sets of local users who're interested in them self._remote_recvmap = {} # list of (serial, set of(userids)) tuples, ordered by serial, latest # first self._remote_offline_serials = [] # map any user to a UserPresenceCache self._user_cachemap = {} self._user_cachemap_latest_serial = 0 # map room_ids to the latest presence serial for a member of that # room self._room_serials = {} metrics.register_callback( "userCachemap:size", lambda: len(self._user_cachemap), )
logger.info("[%s] Subscribing to replication stream: %r from %r", self.id(), stream_name, token) self.send_command(ReplicateCommand(stream_name, token)) def on_connection_closed(self): BaseReplicationStreamProtocol.on_connection_closed(self) self.handler.update_connection(None) # The following simply registers metrics for the replication connections metrics.register_callback( "pending_commands", lambda: {(p.name, p.conn_id): len(p.pending_commands) for p in connected_connections}, labels=["name", "conn_id"], ) def transport_buffer_size(protocol): if protocol.transport: size = len( protocol.transport.dataBuffer) + protocol.transport._tempDataLen return size return 0 metrics.register_callback( "transport_send_buffer", lambda: {(p.name, p.conn_id): transport_buffer_size(p)
def __init__(self, hs): self.server_name = hs.hostname self.store = hs.get_datastore() self.state = hs.get_state_handler() self.transaction_actions = TransactionActions(self.store) self.transport_layer = hs.get_federation_transport_client() self.clock = hs.get_clock() self.is_mine_id = hs.is_mine_id # Is a mapping from destinations -> deferreds. Used to keep track # of which destinations have transactions in flight and when they are # done self.pending_transactions = {} metrics.register_callback( "pending_destinations", lambda: len(self.pending_transactions), ) # Is a mapping from destination -> list of # tuple(pending pdus, deferred, order) self.pending_pdus_by_dest = pdus = {} # destination -> list of tuple(edu, deferred) self.pending_edus_by_dest = edus = {} # Map of user_id -> UserPresenceState for all the pending presence # to be sent out by user_id. Entries here get processed and put in # pending_presence_by_dest self.pending_presence = {} # Map of destination -> user_id -> UserPresenceState of pending presence # to be sent to each destinations self.pending_presence_by_dest = presence = {} # Pending EDUs by their "key". Keyed EDUs are EDUs that get clobbered # based on their key (e.g. typing events by room_id) # Map of destination -> (edu_type, key) -> Edu self.pending_edus_keyed_by_dest = edus_keyed = {} metrics.register_callback( "pending_pdus", lambda: sum(map(len, pdus.values())), ) metrics.register_callback( "pending_edus", lambda: ( sum(map(len, edus.values())) + sum(map(len, presence.values())) + sum(map(len, edus_keyed.values())) ), ) # destination -> list of tuple(failure, deferred) self.pending_failures_by_dest = {} # destination -> stream_id of last successfully sent to-device message. # NB: may be a long or an int. self.last_device_stream_id_by_dest = {} # destination -> stream_id of last successfully sent device list # update. self.last_device_list_stream_id_by_dest = {} # HACK to get unique tx id self._next_txn_id = int(self.clock.time_msec()) self._order = 1 self._is_processing = False self._last_poked_id = -1 self._processing_pending_presence = False