def from_request(cls, request, raise_invalid_params=True, default_limit=None): direction = parse_string(request, "dir", default='f', allowed_values=['f', 'b']) from_tok = parse_string(request, "from") to_tok = parse_string(request, "to") try: if from_tok == "END": from_tok = None # For backwards compat. elif from_tok: from_tok = StreamToken.from_string(from_tok) except Exception: raise SynapseError(400, "'from' paramater is invalid") try: if to_tok: to_tok = StreamToken.from_string(to_tok) except Exception: raise SynapseError(400, "'to' paramater is invalid") limit = parse_integer(request, "limit", default=default_limit) if limit and limit < 0: raise SynapseError(400, "Limit must be 0 or above") try: return PaginationConfig(from_tok, to_tok, direction, limit) except Exception: logger.exception("Failed to create pagination config") raise SynapseError(400, "Invalid request.")
def from_request(cls, request, raise_invalid_params=True, default_limit=None): direction = parse_string(request, "dir", default="f", allowed_values=["f", "b"]) from_tok = parse_string(request, "from") to_tok = parse_string(request, "to") try: if from_tok == "END": from_tok = None # For backwards compat. elif from_tok: from_tok = StreamToken.from_string(from_tok) except Exception: raise SynapseError(400, "'from' parameter is invalid") try: if to_tok: to_tok = StreamToken.from_string(to_tok) except Exception: raise SynapseError(400, "'to' parameter is invalid") limit = parse_integer(request, "limit", default=default_limit) if limit and limit < 0: raise SynapseError(400, "Limit must be 0 or above") try: return PaginationConfig(from_tok, to_tok, direction, limit) except Exception: logger.exception("Failed to create pagination config") raise SynapseError(400, "Invalid request.")
def wait_for_events(self, user, rooms, filter, timeout, callback): """Wait until the callback returns a non empty response or the timeout fires. """ deferred = defer.Deferred() from_token = StreamToken("s0", "0", "0") listener = [ _NotificationListener( user=user, rooms=rooms, from_token=from_token, limit=1, timeout=timeout, deferred=deferred, ) ] if timeout: self._register_with_keys(listener[0]) result = yield callback() timer = [None] if timeout: timed_out = [False] def _timeout_listener(): timed_out[0] = True timer[0] = None listener[0].notify(self, [], from_token, from_token) # We create multiple notification listeners so we have to manage # canceling the timeout ourselves. timer[0] = self.clock.call_later(timeout / 1000., _timeout_listener) while not result and not timed_out[0]: yield deferred deferred = defer.Deferred() listener[0] = _NotificationListener( user=user, rooms=rooms, from_token=from_token, limit=1, timeout=timeout, deferred=deferred, ) self._register_with_keys(listener[0]) result = yield callback() if timer[0] is not None: try: self.clock.cancel_call_later(timer[0]) except: logger.exception("Failed to cancel notifer timer") defer.returnValue(result)
def from_request(cls, request, raise_invalid_params=True, default_limit=None): def get_param(name, default=None): lst = request.args.get(name, []) if len(lst) > 1: raise SynapseError(400, "%s must be specified only once" % (name, )) elif len(lst) == 1: return lst[0] else: return default direction = get_param("dir", 'f') if direction not in ['f', 'b']: raise SynapseError(400, "'dir' parameter is invalid.") from_tok = get_param("from") to_tok = get_param("to") try: if from_tok == "END": from_tok = None # For backwards compat. elif from_tok: from_tok = StreamToken.from_string(from_tok) except: raise SynapseError(400, "'from' paramater is invalid") try: if to_tok: to_tok = StreamToken.from_string(to_tok) except: raise SynapseError(400, "'to' paramater is invalid") limit = get_param("limit", None) if limit is not None and not limit.isdigit(): raise SynapseError(400, "'limit' parameter must be an integer.") if limit is None: limit = default_limit try: return PaginationConfig(from_tok, to_tok, direction, limit) except: logger.exception("Failed to create pagination config") raise SynapseError(400, "Invalid request.")
def from_request(cls, request, raise_invalid_params=True, default_limit=None): def get_param(name, default=None): lst = request.args.get(name, []) if len(lst) > 1: raise SynapseError( 400, "%s must be specified only once" % (name,) ) elif len(lst) == 1: return lst[0] else: return default direction = get_param("dir", 'f') if direction not in ['f', 'b']: raise SynapseError(400, "'dir' parameter is invalid.") from_tok = get_param("from") to_tok = get_param("to") try: if from_tok == "END": from_tok = None # For backwards compat. elif from_tok: from_tok = StreamToken.from_string(from_tok) except Exception: raise SynapseError(400, "'from' paramater is invalid") try: if to_tok: to_tok = StreamToken.from_string(to_tok) except Exception: raise SynapseError(400, "'to' paramater is invalid") limit = get_param("limit", None) if limit is not None and not limit.isdigit(): raise SynapseError(400, "'limit' parameter must be an integer.") if limit is None: limit = default_limit try: return PaginationConfig(from_tok, to_tok, direction, limit) except Exception: logger.exception("Failed to create pagination config") raise SynapseError(400, "Invalid request.")
def get_current_token(self, direction='f'): token = StreamToken( room_key=(yield self.sources["room"].get_current_key(direction)), presence_key=(yield self.sources["presence"].get_current_key()), typing_key=(yield self.sources["typing"].get_current_key()), receipt_key=(yield self.sources["receipt"].get_current_key()), ) defer.returnValue(token)
def _room_initial_sync_parted(self, user_id, room_id, pagin_config, membership, member_event_id, is_peeking): room_state = yield self.store.get_state_for_events( [member_event_id], None ) room_state = room_state[member_event_id] limit = pagin_config.limit if pagin_config else None if limit is None: limit = 10 stream_token = yield self.store.get_stream_token_for_event( member_event_id ) messages, token = yield self.store.get_recent_events_for_room( room_id, limit=limit, end_token=stream_token ) messages = yield self._filter_events_for_client( user_id, messages, is_peeking=is_peeking ) start_token = StreamToken(token[0], 0, 0, 0, 0) end_token = StreamToken(token[1], 0, 0, 0, 0) time_now = self.clock.time_msec() defer.returnValue({ "membership": membership, "room_id": room_id, "messages": { "chunk": [serialize_event(m, time_now) for m in messages], "start": start_token.to_string(), "end": end_token.to_string(), }, "state": [serialize_event(s, time_now) for s in room_state.values()], "presence": [], "receipts": [], })
def get_current_token(self, direction='f'): push_rules_key, _ = self.store.get_push_rules_stream_token() token = StreamToken( room_key=(yield self.sources["room"].get_current_key(direction)), presence_key=(yield self.sources["presence"].get_current_key()), typing_key=(yield self.sources["typing"].get_current_key()), receipt_key=(yield self.sources["receipt"].get_current_key()), account_data_key=(yield self.sources["account_data"].get_current_key()), push_rules_key=push_rules_key, ) defer.returnValue(token)
def get_current_token(self): token = StreamToken( room_key=( yield self.sources["room"].get_current_key() ), presence_key=( yield self.sources["presence"].get_current_key() ), typing_key=( yield self.sources["typing"].get_current_key() ) ) defer.returnValue(token)
def on_GET(self, request): user, client = yield self.auth.get_user_by_req(request) timeout = parse_integer(request, "timeout", default=0) limit = parse_integer(request, "limit", required=True) gap = parse_boolean(request, "gap", default=True) sort = parse_string(request, "sort", default="timeline,asc", allowed_values=self.ALLOWED_SORT) since = parse_string(request, "since") set_presence = parse_string(request, "set_presence", default="online", allowed_values=self.ALLOWED_PRESENCE) backfill = parse_boolean(request, "backfill", default=False) filter_id = parse_string(request, "filter", default=None) logger.info( "/sync: user=%r, timeout=%r, limit=%r, gap=%r, sort=%r, since=%r," " set_presence=%r, backfill=%r, filter_id=%r" % (user, timeout, limit, gap, sort, since, set_presence, backfill, filter_id) ) # TODO(mjark): Load filter and apply overrides. try: filter = yield self.filtering.get_user_filter(user.localpart, filter_id) except: filter = Filter({}) # filter = filter.apply_overrides(http_request) # if filter.matches(event): # # stuff sync_config = SyncConfig( user=user, client_info=client, gap=gap, limit=limit, sort=sort, backfill=backfill, filter=filter ) if since is not None: since_token = StreamToken.from_string(since) else: since_token = None sync_result = yield self.sync_handler.wait_for_sync_for_user( sync_config, since_token=since_token, timeout=timeout ) time_now = self.clock.time_msec() response_content = { "public_user_data": self.encode_user_data(sync_result.public_user_data, filter, time_now), "private_user_data": self.encode_user_data(sync_result.private_user_data, filter, time_now), "rooms": self.encode_rooms(sync_result.rooms, filter, time_now, client.token_id), "next_batch": sync_result.next_batch.to_string(), } defer.returnValue((200, response_content))
def on_GET(self, request): requester = yield self.auth.get_user_by_req(request, allow_guest=True) from_token_string = parse_string(request, "from") # We want to enforce they do pass us one, but we ignore it and return # changes after the "to" as well as before. parse_string(request, "to") from_token = StreamToken.from_string(from_token_string) user_id = requester.user.to_string() results = yield self.device_handler.get_user_ids_changed(user_id, from_token) defer.returnValue((200, results))
def get_current_token_for_room(self, room_id): push_rules_key, _ = self.store.get_push_rules_stream_token() to_device_key = self.store.get_to_device_stream_token() token = StreamToken( room_key=(yield self.sources["room"].get_current_key_for_room(room_id)), presence_key=(yield self.sources["presence"].get_current_key()), typing_key=(yield self.sources["typing"].get_current_key()), receipt_key=(yield self.sources["receipt"].get_current_key()), account_data_key=(yield self.sources["account_data"].get_current_key()), push_rules_key=push_rules_key, to_device_key=to_device_key, ) defer.returnValue(token)
async def on_GET(self, request): requester = await self.auth.get_user_by_req(request, allow_guest=True) from_token_string = parse_string(request, "from") set_tag("from", from_token_string) # We want to enforce they do pass us one, but we ignore it and return # changes after the "to" as well as before. set_tag("to", parse_string(request, "to")) from_token = StreamToken.from_string(from_token_string) user_id = requester.user.to_string() results = await self.device_handler.get_user_ids_changed(user_id, from_token) return 200, results
async def check_for_updates( before_token: StreamToken, after_token: StreamToken ) -> EventStreamResult: if not after_token.is_after(before_token): return EventStreamResult([], (from_token, from_token)) events = [] # type: List[EventBase] end_token = from_token for name, source in self.event_sources.sources.items(): keyname = "%s_key" % name before_id = getattr(before_token, keyname) after_id = getattr(after_token, keyname) if before_id == after_id: continue new_events, new_key = await source.get_new_events( user=user, from_key=getattr(from_token, keyname), limit=limit, is_guest=is_peeking, room_ids=room_ids, explicit_room_id=explicit_room_id, ) if name == "room": new_events = await filter_events_for_client( self.storage, user.to_string(), new_events, is_peeking=is_peeking, ) elif name == "presence": now = self.clock.time_msec() new_events[:] = [ { "type": "m.presence", "content": format_user_presence_state(event, now), } for event in new_events ] events.extend(new_events) end_token = end_token.copy_and_replace(keyname, new_key) return EventStreamResult(events, (from_token, end_token))
def on_GET(self, request, room_id): # TODO support Pagination stream API (limit/tokens) requester = yield self.auth.get_user_by_req(request) handler = self.message_handler # request the state as of a given event, as identified by a stream token, # for consistency with /messages etc. # useful for getting the membership in retrospect as of a given /sync # response. at_token_string = parse_string(request, "at") if at_token_string is None: at_token = None else: at_token = StreamToken.from_string(at_token_string) # let you filter down on particular memberships. # XXX: this may not be the best shape for this API - we could pass in a filter # instead, except filters aren't currently aware of memberships. # See https://github.com/matrix-org/matrix-doc/issues/1337 for more details. membership = parse_string(request, "membership") not_membership = parse_string(request, "not_membership") events = yield handler.get_state_events( room_id=room_id, user_id=requester.user.to_string(), at_token=at_token, state_filter=StateFilter.from_types([(EventTypes.Member, None)]), ) print("honey singh") print(events) chunk = [] for event in events: if ((membership and event['content'].get("membership") != membership) or (not_membership and event['content'].get("membership") == not_membership)): continue # chunk.append(event) # print("great") # room_members = yield self.message_handler.get_all_members_handler(room_id) # print("great 2.0") # print(room_members) defer.returnValue((200, {"chunk": chunk}))
def wait_for_events(self, user, rooms, filter, timeout, callback): """Wait until the callback returns a non empty response or the timeout fires. """ deferred = defer.Deferred() from_token = StreamToken("s0", "0", "0") listener = [_NotificationListener( user=user, rooms=rooms, from_token=from_token, limit=1, timeout=timeout, deferred=deferred, )] if timeout: self._register_with_keys(listener[0]) result = yield callback() if timeout: timed_out = [False] def _timeout_listener(): timed_out[0] = True listener[0].notify(self, [], from_token, from_token) self.clock.call_later(timeout/1000., _timeout_listener) while not result and not timed_out[0]: yield deferred deferred = defer.Deferred() listener[0] = _NotificationListener( user=user, rooms=rooms, from_token=from_token, limit=1, timeout=timeout, deferred=deferred, ) self._register_with_keys(listener[0]) result = yield callback() defer.returnValue(result)
def on_GET(self, request): requester = yield self.auth.get_user_by_req(request, allow_guest=True) from_token_string = parse_string(request, "from") # We want to enforce they do pass us one, but we ignore it and return # changes after the "to" as well as before. parse_string(request, "to") from_token = StreamToken.from_string(from_token_string) user_id = requester.user.to_string() results = yield self.device_handler.get_user_ids_changed( user_id, from_token, ) defer.returnValue((200, results))
def get_current_token(self) -> StreamToken: push_rules_key = self.store.get_max_push_rules_stream_id() to_device_key = self.store.get_to_device_stream_token() device_list_key = self.store.get_device_stream_token() token = StreamToken( room_key=self.sources.room.get_current_key(), presence_key=self.sources.presence.get_current_key(), typing_key=self.sources.typing.get_current_key(), receipt_key=self.sources.receipt.get_current_key(), account_data_key=self.sources.account_data.get_current_key(), push_rules_key=push_rules_key, to_device_key=to_device_key, device_list_key=device_list_key, # Groups key is unused. groups_key=0, ) return token
def get_current_token(self) -> StreamToken: push_rules_key = self.store.get_max_push_rules_stream_id() to_device_key = self.store.get_to_device_stream_token() device_list_key = self.store.get_device_stream_token() groups_key = self.store.get_group_stream_token() token = StreamToken( room_key=self.sources["room"].get_current_key(), presence_key=self.sources["presence"].get_current_key(), typing_key=self.sources["typing"].get_current_key(), receipt_key=self.sources["receipt"].get_current_key(), account_data_key=self.sources["account_data"].get_current_key(), push_rules_key=push_rules_key, to_device_key=to_device_key, device_list_key=device_list_key, groups_key=groups_key, ) return token
def on_GET(self, request, room_id): # TODO support Pagination stream API (limit/tokens) requester = yield self.auth.get_user_by_req(request) handler = self.message_handler # request the state as of a given event, as identified by a stream token, # for consistency with /messages etc. # useful for getting the membership in retrospect as of a given /sync # response. at_token_string = parse_string(request, "at") if at_token_string is None: at_token = None else: at_token = StreamToken.from_string(at_token_string) # let you filter down on particular memberships. # XXX: this may not be the best shape for this API - we could pass in a filter # instead, except filters aren't currently aware of memberships. # See https://github.com/matrix-org/matrix-doc/issues/1337 for more details. membership = parse_string(request, "membership") not_membership = parse_string(request, "not_membership") events = yield handler.get_state_events( room_id=room_id, user_id=requester.user.to_string(), at_token=at_token, state_filter=StateFilter.from_types([(EventTypes.Member, None)]), ) chunk = [] for event in events: if ( (membership and event['content'].get("membership") != membership) or (not_membership and event['content'].get("membership") == not_membership) ): continue chunk.append(event) defer.returnValue((200, { "chunk": chunk }))
def get_current_token_for_pagination(self): """Get the current token for a given room to be used to paginate events. The returned token does not have the current values for fields other than `room`, since they are not used during pagination. Returns: Deferred[StreamToken] """ token = StreamToken( room_key=(yield self.sources["room"].get_current_key()), presence_key=0, typing_key=0, receipt_key=0, account_data_key=0, push_rules_key=0, to_device_key=0, device_list_key=0, groups_key=0, ) return token
async def get_current_token_for_pagination(self, room_id: str) -> StreamToken: """Get the current token for a given room to be used to paginate events. The returned token does not have the current values for fields other than `room`, since they are not used during pagination. Returns: The current token for pagination. """ token = StreamToken( room_key=await self.sources.room.get_current_key_for_room(room_id), presence_key=0, typing_key=0, receipt_key=0, account_data_key=0, push_rules_key=0, to_device_key=0, device_list_key=0, groups_key=0, ) return token
def _get_recent_references_for_event_txn( txn: LoggingTransaction, ) -> Tuple[List[_RelatedEvent], Optional[StreamToken]]: txn.execute(sql, where_args + [limit + 1]) last_topo_id = None last_stream_id = None events = [] for row in txn: # Do not include edits for redacted events as they leak event # content. if not is_redacted or row[1] != RelationTypes.REPLACE: events.append(_RelatedEvent(row[0], row[2])) last_topo_id = row[3] last_stream_id = row[4] # If there are more events, generate the next pagination key. next_token = None if len(events) > limit and last_topo_id and last_stream_id: next_key = RoomStreamToken(last_topo_id, last_stream_id) if from_token: next_token = from_token.copy_and_replace( StreamKeyType.ROOM, next_key) else: next_token = StreamToken( room_key=next_key, presence_key=0, typing_key=0, receipt_key=0, account_data_key=0, push_rules_key=0, to_device_key=0, device_list_key=0, groups_key=0, ) return events[:limit], next_token
def _room_initial_sync_parted(self, user_id, room_id, pagin_config, membership, member_event_id, is_guest): room_state = yield self.store.get_state_for_events( [member_event_id], None ) room_state = room_state[member_event_id] limit = pagin_config.limit if pagin_config else None if limit is None: limit = 10 stream_token = yield self.store.get_stream_token_for_event( member_event_id ) messages, token = yield self.store.get_recent_events_for_room( room_id, limit=limit, end_token=stream_token ) messages = yield self._filter_events_for_client( user_id, messages, is_guest=is_guest ) start_token = StreamToken(token[0], 0, 0, 0, 0) end_token = StreamToken(token[1], 0, 0, 0, 0) time_now = self.clock.time_msec() defer.returnValue({ "membership": membership, "room_id": room_id, "messages": { "chunk": [serialize_event(m, time_now) for m in messages], "start": start_token.to_string(), "end": end_token.to_string(), }, "state": [serialize_event(s, time_now) for s in room_state.values()], "presence": [], "receipts": [], })
def on_GET(self, request): if b"from" in request.args: # /events used to use 'from', but /sync uses 'since'. # Lets be helpful and whine if we see a 'from'. raise SynapseError( 400, "'from' is not a valid query parameter. Did you mean 'since'?") requester = yield self.auth.get_user_by_req(request, allow_guest=True) user = requester.user device_id = requester.device_id timeout = parse_integer(request, "timeout", default=0) since = parse_string(request, "since") set_presence = parse_string( request, "set_presence", default="online", allowed_values=self.ALLOWED_PRESENCE, ) filter_id = parse_string(request, "filter", default=None) full_state = parse_boolean(request, "full_state", default=False) logger.debug( "/sync: user=%r, timeout=%r, since=%r," " set_presence=%r, filter_id=%r, device_id=%r" % (user, timeout, since, set_presence, filter_id, device_id)) request_key = (user, timeout, since, filter_id, full_state, device_id) if filter_id: if filter_id.startswith("{"): try: filter_object = json.loads(filter_id) set_timeline_upper_limit( filter_object, self.hs.config.filter_timeline_limit) except Exception: raise SynapseError(400, "Invalid filter JSON") self.filtering.check_valid_filter(filter_object) filter = FilterCollection(filter_object) else: filter = yield self.filtering.get_user_filter( user.localpart, filter_id) else: filter = DEFAULT_FILTER_COLLECTION sync_config = SyncConfig( user=user, filter_collection=filter, is_guest=requester.is_guest, request_key=request_key, device_id=device_id, ) if since is not None: since_token = StreamToken.from_string(since) else: since_token = None # send any outstanding server notices to the user. yield self._server_notices_sender.on_user_syncing(user.to_string()) affect_presence = set_presence != PresenceState.OFFLINE if affect_presence: yield self.presence_handler.set_state(user, {"presence": set_presence}, True) context = yield self.presence_handler.user_syncing( user.to_string(), affect_presence=affect_presence) with context: sync_result = yield self.sync_handler.wait_for_sync_for_user( sync_config, since_token=since_token, timeout=timeout, full_state=full_state, ) time_now = self.clock.time_msec() response_content = yield self.encode_response( time_now, sync_result, requester.access_token_id, filter) return 200, response_content
def on_GET(self, request): user, token_id, is_guest = yield self.auth.get_user_by_req( request, allow_guest=True ) timeout = parse_integer(request, "timeout", default=0) since = parse_string(request, "since") set_presence = parse_string( request, "set_presence", default="online", allowed_values=self.ALLOWED_PRESENCE ) filter_id = parse_string(request, "filter", default=None) full_state = parse_boolean(request, "full_state", default=False) logger.info( "/sync: user=%r, timeout=%r, since=%r," " set_presence=%r, filter_id=%r" % ( user, timeout, since, set_presence, filter_id ) ) if filter_id and filter_id.startswith('{'): try: filter_object = json.loads(filter_id) except: raise SynapseError(400, "Invalid filter JSON") self.filtering._check_valid_filter(filter_object) filter = FilterCollection(filter_object) else: try: filter = yield self.filtering.get_user_filter( user.localpart, filter_id ) except: filter = FilterCollection({}) if is_guest and filter.list_rooms() is None: raise SynapseError( 400, "Guest users must provide a list of rooms in the filter" ) sync_config = SyncConfig( user=user, is_guest=is_guest, filter=filter, ) if since is not None: since_token = StreamToken.from_string(since) else: since_token = None if set_presence == "online": yield self.event_stream_handler.started_stream(user) try: sync_result = yield self.sync_handler.wait_for_sync_for_user( sync_config, since_token=since_token, timeout=timeout, full_state=full_state ) finally: if set_presence == "online": self.event_stream_handler.stopped_stream(user) time_now = self.clock.time_msec() joined = self.encode_joined( sync_result.joined, filter, time_now, token_id ) invited = self.encode_invited( sync_result.invited, filter, time_now, token_id ) archived = self.encode_archived( sync_result.archived, filter, time_now, token_id ) response_content = { "account_data": self.encode_account_data( sync_result.account_data, filter, time_now ), "presence": self.encode_presence( sync_result.presence, filter, time_now ), "rooms": { "join": joined, "invite": invited, "leave": archived, }, "next_batch": sync_result.next_batch.to_string(), } defer.returnValue((200, response_content))
def on_GET(self, request): if "from" in request.args: # /events used to use 'from', but /sync uses 'since'. # Lets be helpful and whine if we see a 'from'. raise SynapseError( 400, "'from' is not a valid query parameter. Did you mean 'since'?" ) requester = yield self.auth.get_user_by_req( request, allow_guest=True ) user = requester.user timeout = parse_integer(request, "timeout", default=0) since = parse_string(request, "since") set_presence = parse_string( request, "set_presence", default="online", allowed_values=self.ALLOWED_PRESENCE ) filter_id = parse_string(request, "filter", default=None) full_state = parse_boolean(request, "full_state", default=False) logger.info( "/sync: user=%r, timeout=%r, since=%r," " set_presence=%r, filter_id=%r" % ( user, timeout, since, set_presence, filter_id ) ) if filter_id: if filter_id.startswith('{'): try: filter_object = json.loads(filter_id) except: raise SynapseError(400, "Invalid filter JSON") self.filtering.check_valid_filter(filter_object) filter = FilterCollection(filter_object) else: filter = yield self.filtering.get_user_filter( user.localpart, filter_id ) else: filter = DEFAULT_FILTER_COLLECTION sync_config = SyncConfig( user=user, filter_collection=filter, is_guest=requester.is_guest, ) if since is not None: since_token = StreamToken.from_string(since) else: since_token = None if set_presence == "online": yield self.event_stream_handler.started_stream(user) try: sync_result = yield self.sync_handler.wait_for_sync_for_user( sync_config, since_token=since_token, timeout=timeout, full_state=full_state ) finally: if set_presence == "online": self.event_stream_handler.stopped_stream(user) time_now = self.clock.time_msec() joined = self.encode_joined( sync_result.joined, time_now, requester.access_token_id ) invited = self.encode_invited( sync_result.invited, time_now, requester.access_token_id ) archived = self.encode_archived( sync_result.archived, time_now, requester.access_token_id ) response_content = { "account_data": {"events": sync_result.account_data}, "presence": self.encode_presence( sync_result.presence, time_now ), "rooms": { "join": joined, "invite": invited, "leave": archived, }, "next_batch": sync_result.next_batch.to_string(), } defer.returnValue((200, response_content))
def on_GET(self, request): user, token_id, _ = yield self.auth.get_user_by_req(request) timeout = parse_integer(request, "timeout", default=0) since = parse_string(request, "since") set_presence = parse_string( request, "set_presence", default="online", allowed_values=self.ALLOWED_PRESENCE ) filter_id = parse_string(request, "filter", default=None) full_state = parse_boolean(request, "full_state", default=False) logger.info( "/sync: user=%r, timeout=%r, since=%r," " set_presence=%r, filter_id=%r" % ( user, timeout, since, set_presence, filter_id ) ) try: filter = yield self.filtering.get_user_filter( user.localpart, filter_id ) except: filter = FilterCollection({}) sync_config = SyncConfig( user=user, filter=filter, ) if since is not None: since_token = StreamToken.from_string(since) else: since_token = None if set_presence == "online": yield self.event_stream_handler.started_stream(user) try: sync_result = yield self.sync_handler.wait_for_sync_for_user( sync_config, since_token=since_token, timeout=timeout, full_state=full_state ) finally: if set_presence == "online": self.event_stream_handler.stopped_stream(user) time_now = self.clock.time_msec() joined = self.encode_joined( sync_result.joined, filter, time_now, token_id ) invited = self.encode_invited( sync_result.invited, filter, time_now, token_id ) archived = self.encode_archived( sync_result.archived, filter, time_now, token_id ) response_content = { "presence": self.encode_presence( sync_result.presence, filter, time_now ), "rooms": { "join": joined, "invite": invited, "leave": archived, }, "next_batch": sync_result.next_batch.to_string(), } defer.returnValue((200, response_content))
def on_GET(self, request): if "from" in request.args: # /events used to use 'from', but /sync uses 'since'. # Lets be helpful and whine if we see a 'from'. raise SynapseError( 400, "'from' is not a valid query parameter. Did you mean 'since'?") requester = yield self.auth.get_user_by_req(request, allow_guest=True) user = requester.user device_id = requester.device_id timeout = parse_integer(request, "timeout", default=0) since = parse_string(request, "since") set_presence = parse_string(request, "set_presence", default="online", allowed_values=self.ALLOWED_PRESENCE) filter_id = parse_string(request, "filter", default=None) full_state = parse_boolean(request, "full_state", default=False) logger.info("/sync: user=%r, timeout=%r, since=%r," " set_presence=%r, filter_id=%r, device_id=%r" % (user, timeout, since, set_presence, filter_id, device_id)) request_key = (user, timeout, since, filter_id, full_state, device_id) if filter_id: if filter_id.startswith('{'): try: filter_object = json.loads(filter_id) except: raise SynapseError(400, "Invalid filter JSON") self.filtering.check_valid_filter(filter_object) filter = FilterCollection(filter_object) else: filter = yield self.filtering.get_user_filter( user.localpart, filter_id) else: filter = DEFAULT_FILTER_COLLECTION sync_config = SyncConfig( user=user, filter_collection=filter, is_guest=requester.is_guest, request_key=request_key, device_id=device_id, ) if since is not None: since_token = StreamToken.from_string(since) else: since_token = None affect_presence = set_presence != PresenceState.OFFLINE if affect_presence: yield self.presence_handler.set_state(user, {"presence": set_presence}, True) context = yield self.presence_handler.user_syncing( user.to_string(), affect_presence=affect_presence, ) with context: sync_result = yield self.sync_handler.wait_for_sync_for_user( sync_config, since_token=since_token, timeout=timeout, full_state=full_state) time_now = self.clock.time_msec() joined = self.encode_joined(sync_result.joined, time_now, requester.access_token_id, filter.event_fields) invited = self.encode_invited(sync_result.invited, time_now, requester.access_token_id) archived = self.encode_archived( sync_result.archived, time_now, requester.access_token_id, filter.event_fields, ) response_content = { "account_data": { "events": sync_result.account_data }, "to_device": { "events": sync_result.to_device }, "device_lists": { "changed": list(sync_result.device_lists), }, "presence": self.encode_presence(sync_result.presence, time_now), "rooms": { "join": joined, "invite": invited, "leave": archived, }, "next_batch": sync_result.next_batch.to_string(), } defer.returnValue((200, response_content))
def get_and_dispatch(self): from_tok = StreamToken.from_string(self.last_token) config = PaginationConfig(from_token=from_tok, limit='1') timeout = (300 + random.randint(-60, 60)) * 1000 chunk = yield self.evStreamHandler.get_stream( self.user_id, config, timeout=timeout, affect_presence=False, only_keys=( "room", "receipt", ), ) # limiting to 1 may get 1 event plus 1 presence event, so # pick out the actual event single_event = None read_receipt = None for c in chunk['chunk']: if 'event_id' in c: # Hmmm... single_event = c elif c['type'] == 'm.receipt': read_receipt = c have_updated_badge = False if read_receipt: for receipt_part in read_receipt['content'].values(): if 'm.read' in receipt_part: if self.user_id in receipt_part['m.read'].keys(): have_updated_badge = True if not single_event: if have_updated_badge: yield self.update_badge() self.last_token = chunk['end'] yield self.store.update_pusher_last_token(self.app_id, self.pushkey, self.user_id, self.last_token) return if not self.alive: return processed = False rule_evaluator = yield \ evaluator_for_user_id( self.user_id, single_event['room_id'], self.store ) actions = yield rule_evaluator.actions_for_event(single_event) tweaks = rule_evaluator.tweaks_for_actions(actions) if 'notify' in actions: self.badge = yield self._get_badge_count() rejected = yield self.dispatch_push(single_event, tweaks, self.badge) self.has_unread = True if isinstance(rejected, list) or isinstance(rejected, tuple): processed = True for pk in rejected: if pk != self.pushkey: # for sanity, we only remove the pushkey if it # was the one we actually sent... logger.warn(("Ignoring rejected pushkey %s because we" " didn't send it"), pk) else: logger.info("Pushkey %s was rejected: removing", pk) yield self.hs.get_pusherpool().remove_pusher( self.app_id, pk, self.user_id) else: if have_updated_badge: yield self.update_badge() processed = True if not self.alive: return if processed: self.backoff_delay = Pusher.INITIAL_BACKOFF self.last_token = chunk['end'] yield self.store.update_pusher_last_token_and_success( self.app_id, self.pushkey, self.user_id, self.last_token, self.clock.time_msec()) if self.failing_since: self.failing_since = None yield self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.user_id, self.failing_since) else: if not self.failing_since: self.failing_since = self.clock.time_msec() yield self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.user_id, self.failing_since) if (self.failing_since and self.failing_since < self.clock.time_msec() - Pusher.GIVE_UP_AFTER): # we really only give up so that if the URL gets # fixed, we don't suddenly deliver a load # of old notifications. logger.warn( "Giving up on a notification to user %s, " "pushkey %s", self.user_id, self.pushkey) self.backoff_delay = Pusher.INITIAL_BACKOFF self.last_token = chunk['end'] yield self.store.update_pusher_last_token( self.app_id, self.pushkey, self.user_id, self.last_token) self.failing_since = None yield self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.user_id, self.failing_since) else: logger.warn( "Failed to dispatch push for user %s " "(failing for %dms)." "Trying again in %dms", self.user_id, self.clock.time_msec() - self.failing_since, self.backoff_delay) yield synapse.util. async .sleep(self.backoff_delay / 1000.0) self.backoff_delay *= 2 if self.backoff_delay > Pusher.MAX_BACKOFF: self.backoff_delay = Pusher.MAX_BACKOFF
def wait_for_events(self, user, rooms, timeout, callback, from_token=StreamToken("s0", "0", "0")): """Wait until the callback returns a non empty response or the timeout fires. """ user = str(user) user_stream = self.user_to_user_stream.get(user) if user_stream is None: appservice = yield self.store.get_app_service_by_user_id(user) current_token = yield self.event_sources.get_current_token() rooms = yield self.store.get_rooms_for_user(user) rooms = [room.room_id for room in rooms] user_stream = _NotifierUserStream( user=user, rooms=rooms, appservice=appservice, current_token=current_token, time_now_ms=self.clock.time_msec(), ) self._register_with_keys(user_stream) result = None if timeout: # Will be set to a _NotificationListener that we'll be waiting on. # Allows us to cancel it. listener = None def timed_out(): if listener: listener.deferred.cancel() timer = self.clock.call_later(timeout / 1000., timed_out) prev_token = from_token while not result: try: current_token = user_stream.current_token result = yield callback(prev_token, current_token) if result: break # Now we wait for the _NotifierUserStream to be told there # is a new token. # We need to supply the token we supplied to callback so # that we don't miss any current_token updates. prev_token = current_token listener = user_stream.new_listener(prev_token) yield listener.deferred except defer.CancelledError: break self.clock.cancel_call_later(timer, ignore_errs=True) else: current_token = user_stream.current_token result = yield callback(from_token, current_token) defer.returnValue(result)
def get_and_dispatch(self): from_tok = StreamToken.from_string(self.last_token) config = PaginationConfig(from_token=from_tok, limit='1') timeout = (300 + random.randint(-60, 60)) * 1000 chunk = yield self.evStreamHandler.get_stream(self.user_name, config, timeout=timeout, affect_presence=False, only_room_events=True) # limiting to 1 may get 1 event plus 1 presence event, so # pick out the actual event single_event = None for c in chunk['chunk']: if 'event_id' in c: # Hmmm... single_event = c break if not single_event: self.last_token = chunk['end'] logger.debug("Event stream timeout for pushkey %s", self.pushkey) yield self.store.update_pusher_last_token(self.app_id, self.pushkey, self.user_name, self.last_token) return if not self.alive: return processed = False rule_evaluator = yield \ push_rule_evaluator.evaluator_for_user_name_and_profile_tag( self.user_name, self.profile_tag, single_event['room_id'], self.store ) actions = yield rule_evaluator.actions_for_event(single_event) tweaks = rule_evaluator.tweaks_for_actions(actions) if len(actions) == 0: logger.warn("Empty actions! Using default action.") actions = Pusher.DEFAULT_ACTIONS if 'notify' not in actions and 'dont_notify' not in actions: logger.warn( "Neither notify nor dont_notify in actions: adding default") actions.extend(Pusher.DEFAULT_ACTIONS) if 'dont_notify' in actions: logger.debug("%s for %s: dont_notify", single_event['event_id'], self.user_name) processed = True else: rejected = yield self.dispatch_push(single_event, tweaks) self.has_unread = True if isinstance(rejected, list) or isinstance(rejected, tuple): processed = True for pk in rejected: if pk != self.pushkey: # for sanity, we only remove the pushkey if it # was the one we actually sent... logger.warn(("Ignoring rejected pushkey %s because we" " didn't send it"), pk) else: logger.info("Pushkey %s was rejected: removing", pk) yield self.hs.get_pusherpool().remove_pusher( self.app_id, pk, self.user_name) if not self.alive: return if processed: self.backoff_delay = Pusher.INITIAL_BACKOFF self.last_token = chunk['end'] yield self.store.update_pusher_last_token_and_success( self.app_id, self.pushkey, self.user_name, self.last_token, self.clock.time_msec()) if self.failing_since: self.failing_since = None yield self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.user_name, self.failing_since) else: if not self.failing_since: self.failing_since = self.clock.time_msec() yield self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.user_name, self.failing_since) if (self.failing_since and self.failing_since < self.clock.time_msec() - Pusher.GIVE_UP_AFTER): # we really only give up so that if the URL gets # fixed, we don't suddenly deliver a load # of old notifications. logger.warn( "Giving up on a notification to user %s, " "pushkey %s", self.user_name, self.pushkey) self.backoff_delay = Pusher.INITIAL_BACKOFF self.last_token = chunk['end'] yield self.store.update_pusher_last_token( self.app_id, self.pushkey, self.user_name, self.last_token) self.failing_since = None yield self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.user_name, self.failing_since) else: logger.warn( "Failed to dispatch push for user %s " "(failing for %dms)." "Trying again in %dms", self.user_name, self.clock.time_msec() - self.failing_since, self.backoff_delay) yield synapse.util. async .sleep(self.backoff_delay / 1000.0) self.backoff_delay *= 2 if self.backoff_delay > Pusher.MAX_BACKOFF: self.backoff_delay = Pusher.MAX_BACKOFF
def on_GET(self, request): if "from" in request.args: # /events used to use 'from', but /sync uses 'since'. # Lets be helpful and whine if we see a 'from'. raise SynapseError( 400, "'from' is not a valid query parameter. Did you mean 'since'?" ) requester = yield self.auth.get_user_by_req( request, allow_guest=True ) user = requester.user device_id = requester.device_id timeout = parse_integer(request, "timeout", default=0) since = parse_string(request, "since") set_presence = parse_string( request, "set_presence", default="online", allowed_values=self.ALLOWED_PRESENCE ) filter_id = parse_string(request, "filter", default=None) full_state = parse_boolean(request, "full_state", default=False) logger.debug( "/sync: user=%r, timeout=%r, since=%r," " set_presence=%r, filter_id=%r, device_id=%r" % ( user, timeout, since, set_presence, filter_id, device_id ) ) request_key = (user, timeout, since, filter_id, full_state, device_id) if filter_id: if filter_id.startswith('{'): try: filter_object = json.loads(filter_id) set_timeline_upper_limit(filter_object, self.hs.config.filter_timeline_limit) except Exception: raise SynapseError(400, "Invalid filter JSON") self.filtering.check_valid_filter(filter_object) filter = FilterCollection(filter_object) else: filter = yield self.filtering.get_user_filter( user.localpart, filter_id ) else: filter = DEFAULT_FILTER_COLLECTION sync_config = SyncConfig( user=user, filter_collection=filter, is_guest=requester.is_guest, request_key=request_key, device_id=device_id, ) if since is not None: since_token = StreamToken.from_string(since) else: since_token = None affect_presence = set_presence != PresenceState.OFFLINE if affect_presence: yield self.presence_handler.set_state(user, {"presence": set_presence}, True) context = yield self.presence_handler.user_syncing( user.to_string(), affect_presence=affect_presence, ) with context: sync_result = yield self.sync_handler.wait_for_sync_for_user( sync_config, since_token=since_token, timeout=timeout, full_state=full_state ) time_now = self.clock.time_msec() response_content = self.encode_response( time_now, sync_result, requester.access_token_id, filter ) defer.returnValue((200, response_content))
def on_GET(self, request): user, client = yield self.auth.get_user_by_req(request) timeout = self.parse_integer(request, "timeout", default=0) limit = self.parse_integer(request, "limit", required=True) gap = self.parse_boolean(request, "gap", default=True) sort = self.parse_string( request, "sort", default="timeline,asc", allowed_values=self.ALLOWED_SORT ) since = self.parse_string(request, "since") set_presence = self.parse_string( request, "set_presence", default="online", allowed_values=self.ALLOWED_PRESENCE ) backfill = self.parse_boolean(request, "backfill", default=False) filter_id = self.parse_string(request, "filter", default=None) logger.info( "/sync: user=%r, timeout=%r, limit=%r, gap=%r, sort=%r, since=%r," " set_presence=%r, backfill=%r, filter_id=%r" % ( user, timeout, limit, gap, sort, since, set_presence, backfill, filter_id ) ) # TODO(mjark): Load filter and apply overrides. try: filter = yield self.filtering.get_user_filter( user.localpart, filter_id ) except: filter = Filter({}) # filter = filter.apply_overrides(http_request) # if filter.matches(event): # # stuff sync_config = SyncConfig( user=user, client_info=client, gap=gap, limit=limit, sort=sort, backfill=backfill, filter=filter, ) if since is not None: since_token = StreamToken.from_string(since) else: since_token = None sync_result = yield self.sync_handler.wait_for_sync_for_user( sync_config, since_token=since_token, timeout=timeout ) time_now = self.clock.time_msec() response_content = { "public_user_data": self.encode_user_data( sync_result.public_user_data, filter, time_now ), "private_user_data": self.encode_user_data( sync_result.private_user_data, filter, time_now ), "rooms": self.encode_rooms( sync_result.rooms, filter, time_now, client.token_id ), "next_batch": sync_result.next_batch.to_string(), } defer.returnValue((200, response_content))
async def on_GET(self, request): if b"from" in request.args: # /events used to use 'from', but /sync uses 'since'. # Lets be helpful and whine if we see a 'from'. raise SynapseError( 400, "'from' is not a valid query parameter. Did you mean 'since'?") requester = await self.auth.get_user_by_req(request, allow_guest=True) user = requester.user device_id = requester.device_id timeout = parse_integer(request, "timeout", default=0) since = parse_string(request, "since") set_presence = parse_string( request, "set_presence", default="online", allowed_values=self.ALLOWED_PRESENCE, ) filter_id = parse_string(request, "filter", default=None) full_state = parse_boolean(request, "full_state", default=False) logger.debug( "/sync: user=%r, timeout=%r, since=%r, " "set_presence=%r, filter_id=%r, device_id=%r", user, timeout, since, set_presence, filter_id, device_id, ) request_key = (user, timeout, since, filter_id, full_state, device_id) if filter_id is None: filter_collection = DEFAULT_FILTER_COLLECTION elif filter_id.startswith("{"): try: filter_object = json.loads(filter_id) set_timeline_upper_limit(filter_object, self.hs.config.filter_timeline_limit) except Exception: raise SynapseError(400, "Invalid filter JSON") self.filtering.check_valid_filter(filter_object) filter_collection = FilterCollection(filter_object) else: try: filter_collection = await self.filtering.get_user_filter( user.localpart, filter_id) except StoreError as err: if err.code != 404: raise # fix up the description and errcode to be more useful raise SynapseError(400, "No such filter", errcode=Codes.INVALID_PARAM) sync_config = SyncConfig( user=user, filter_collection=filter_collection, is_guest=requester.is_guest, request_key=request_key, device_id=device_id, ) if since is not None: since_token = StreamToken.from_string(since) else: since_token = None # send any outstanding server notices to the user. await self._server_notices_sender.on_user_syncing(user.to_string()) affect_presence = set_presence != PresenceState.OFFLINE if affect_presence: await self.presence_handler.set_state(user, {"presence": set_presence}, True) context = await self.presence_handler.user_syncing( user.to_string(), affect_presence=affect_presence) with context: sync_result = await self.sync_handler.wait_for_sync_for_user( sync_config, since_token=since_token, timeout=timeout, full_state=full_state, ) # the client may have disconnected by now; don't bother to serialize the # response if so. if request._disconnected: logger.info("Client has disconnected; not serializing response.") return 200, {} time_now = self.clock.time_msec() response_content = await self.encode_response( time_now, sync_result, requester.access_token_id, filter_collection) logger.debug("Event formatting complete") return 200, response_content
def get_and_dispatch(self): from_tok = StreamToken.from_string(self.last_token) config = PaginationConfig(from_token=from_tok, limit='1') timeout = (300 + random.randint(-60, 60)) * 1000 chunk = yield self.evStreamHandler.get_stream( self.user_id, config, timeout=timeout, affect_presence=False, only_keys=("room", "receipt",), ) # limiting to 1 may get 1 event plus 1 presence event, so # pick out the actual event single_event = None read_receipt = None for c in chunk['chunk']: if 'event_id' in c: # Hmmm... single_event = c elif c['type'] == 'm.receipt': read_receipt = c have_updated_badge = False if read_receipt: for receipt_part in read_receipt['content'].values(): if 'm.read' in receipt_part: if self.user_id in receipt_part['m.read'].keys(): have_updated_badge = True if not single_event: if have_updated_badge: yield self.update_badge() self.last_token = chunk['end'] yield self.store.update_pusher_last_token( self.app_id, self.pushkey, self.user_id, self.last_token ) return if not self.alive: return processed = False rule_evaluator = yield \ push_rule_evaluator.evaluator_for_user_id_and_profile_tag( self.user_id, self.profile_tag, single_event['room_id'], self.store ) actions = yield rule_evaluator.actions_for_event(single_event) tweaks = rule_evaluator.tweaks_for_actions(actions) if 'notify' in actions: self.badge = yield self._get_badge_count() rejected = yield self.dispatch_push(single_event, tweaks, self.badge) self.has_unread = True if isinstance(rejected, list) or isinstance(rejected, tuple): processed = True for pk in rejected: if pk != self.pushkey: # for sanity, we only remove the pushkey if it # was the one we actually sent... logger.warn( ("Ignoring rejected pushkey %s because we" " didn't send it"), pk ) else: logger.info( "Pushkey %s was rejected: removing", pk ) yield self.hs.get_pusherpool().remove_pusher( self.app_id, pk, self.user_id ) else: if have_updated_badge: yield self.update_badge() processed = True if not self.alive: return if processed: self.backoff_delay = Pusher.INITIAL_BACKOFF self.last_token = chunk['end'] yield self.store.update_pusher_last_token_and_success( self.app_id, self.pushkey, self.user_id, self.last_token, self.clock.time_msec() ) if self.failing_since: self.failing_since = None yield self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.user_id, self.failing_since) else: if not self.failing_since: self.failing_since = self.clock.time_msec() yield self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.user_id, self.failing_since ) if (self.failing_since and self.failing_since < self.clock.time_msec() - Pusher.GIVE_UP_AFTER): # we really only give up so that if the URL gets # fixed, we don't suddenly deliver a load # of old notifications. logger.warn("Giving up on a notification to user %s, " "pushkey %s", self.user_id, self.pushkey) self.backoff_delay = Pusher.INITIAL_BACKOFF self.last_token = chunk['end'] yield self.store.update_pusher_last_token( self.app_id, self.pushkey, self.user_id, self.last_token ) self.failing_since = None yield self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.user_id, self.failing_since ) else: logger.warn("Failed to dispatch push for user %s " "(failing for %dms)." "Trying again in %dms", self.user_id, self.clock.time_msec() - self.failing_since, self.backoff_delay) yield synapse.util.async.sleep(self.backoff_delay / 1000.0) self.backoff_delay *= 2 if self.backoff_delay > Pusher.MAX_BACKOFF: self.backoff_delay = Pusher.MAX_BACKOFF
def get_and_dispatch(self): from_tok = StreamToken.from_string(self.last_token) config = PaginationConfig(from_token=from_tok, limit='1') timeout = (300 + random.randint(-60, 60)) * 1000 chunk = yield self.evStreamHandler.get_stream( self.user_name, config, timeout=timeout, affect_presence=False ) # limiting to 1 may get 1 event plus 1 presence event, so # pick out the actual event single_event = None for c in chunk['chunk']: if 'event_id' in c: # Hmmm... single_event = c break if not single_event: self.last_token = chunk['end'] logger.debug("Event stream timeout for pushkey %s", self.pushkey) return if not self.alive: return processed = False actions = yield self._actions_for_event(single_event) tweaks = _tweaks_for_actions(actions) if len(actions) == 0: logger.warn("Empty actions! Using default action.") actions = Pusher.DEFAULT_ACTIONS if 'notify' not in actions and 'dont_notify' not in actions: logger.warn("Neither notify nor dont_notify in actions: adding default") actions.extend(Pusher.DEFAULT_ACTIONS) if 'dont_notify' in actions: logger.debug( "%s for %s: dont_notify", single_event['event_id'], self.user_name ) processed = True else: rejected = yield self.dispatch_push(single_event, tweaks) self.has_unread = True if isinstance(rejected, list) or isinstance(rejected, tuple): processed = True for pk in rejected: if pk != self.pushkey: # for sanity, we only remove the pushkey if it # was the one we actually sent... logger.warn( ("Ignoring rejected pushkey %s because we" " didn't send it"), pk ) else: logger.info( "Pushkey %s was rejected: removing", pk ) yield self.hs.get_pusherpool().remove_pusher( self.app_id, pk, self.user_name ) if not self.alive: return if processed: self.backoff_delay = Pusher.INITIAL_BACKOFF self.last_token = chunk['end'] self.store.update_pusher_last_token_and_success( self.app_id, self.pushkey, self.user_name, self.last_token, self.clock.time_msec() ) if self.failing_since: self.failing_since = None self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.user_name, self.failing_since) else: if not self.failing_since: self.failing_since = self.clock.time_msec() self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.user_name, self.failing_since ) if (self.failing_since and self.failing_since < self.clock.time_msec() - Pusher.GIVE_UP_AFTER): # we really only give up so that if the URL gets # fixed, we don't suddenly deliver a load # of old notifications. logger.warn("Giving up on a notification to user %s, " "pushkey %s", self.user_name, self.pushkey) self.backoff_delay = Pusher.INITIAL_BACKOFF self.last_token = chunk['end'] self.store.update_pusher_last_token( self.app_id, self.pushkey, self.user_name, self.last_token ) self.failing_since = None self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.user_name, self.failing_since ) else: logger.warn("Failed to dispatch push for user %s " "(failing for %dms)." "Trying again in %dms", self.user_name, self.clock.time_msec() - self.failing_since, self.backoff_delay) yield synapse.util.async.sleep(self.backoff_delay / 1000.0) self.backoff_delay *= 2 if self.backoff_delay > Pusher.MAX_BACKOFF: self.backoff_delay = Pusher.MAX_BACKOFF
def start(self): if not self.last_token: # First-time setup: get a token to start from (we can't # just start from no token, ie. 'now' # because we need the result to be reproduceable in case # we fail to dispatch the push) config = PaginationConfig(from_token=None, limit='1') chunk = yield self.evStreamHandler.get_stream( self.user_name, config, timeout=0) self.last_token = chunk['end'] self.store.update_pusher_last_token( self.app_id, self.pushkey, self.last_token) logger.info("Pusher %s for user %s starting from token %s", self.pushkey, self.user_name, self.last_token) while self.alive: from_tok = StreamToken.from_string(self.last_token) config = PaginationConfig(from_token=from_tok, limit='1') chunk = yield self.evStreamHandler.get_stream( self.user_name, config, timeout=100*365*24*60*60*1000, affect_presence=False ) # limiting to 1 may get 1 event plus 1 presence event, so # pick out the actual event single_event = None for c in chunk['chunk']: if 'event_id' in c: # Hmmm... single_event = c break if not single_event: self.last_token = chunk['end'] continue if not self.alive: continue processed = False actions = yield self._actions_for_event(single_event) tweaks = _tweaks_for_actions(actions) if len(actions) == 0: logger.warn("Empty actions! Using default action.") actions = Pusher.DEFAULT_ACTIONS if 'notify' not in actions and 'dont_notify' not in actions: logger.warn("Neither notify nor dont_notify in actions: adding default") actions.extend(Pusher.DEFAULT_ACTIONS) if 'dont_notify' in actions: logger.debug( "%s for %s: dont_notify", single_event['event_id'], self.user_name ) processed = True else: rejected = yield self.dispatch_push(single_event, tweaks) self.has_unread = True if isinstance(rejected, list) or isinstance(rejected, tuple): processed = True for pk in rejected: if pk != self.pushkey: # for sanity, we only remove the pushkey if it # was the one we actually sent... logger.warn( ("Ignoring rejected pushkey %s because we" " didn't send it"), pk ) else: logger.info( "Pushkey %s was rejected: removing", pk ) yield self.hs.get_pusherpool().remove_pusher( self.app_id, pk ) if not self.alive: continue if processed: self.backoff_delay = Pusher.INITIAL_BACKOFF self.last_token = chunk['end'] self.store.update_pusher_last_token_and_success( self.app_id, self.pushkey, self.last_token, self.clock.time_msec() ) if self.failing_since: self.failing_since = None self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.failing_since) else: if not self.failing_since: self.failing_since = self.clock.time_msec() self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.failing_since ) if (self.failing_since and self.failing_since < self.clock.time_msec() - Pusher.GIVE_UP_AFTER): # we really only give up so that if the URL gets # fixed, we don't suddenly deliver a load # of old notifications. logger.warn("Giving up on a notification to user %s, " "pushkey %s", self.user_name, self.pushkey) self.backoff_delay = Pusher.INITIAL_BACKOFF self.last_token = chunk['end'] self.store.update_pusher_last_token( self.app_id, self.pushkey, self.last_token ) self.failing_since = None self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.failing_since ) else: logger.warn("Failed to dispatch push for user %s " "(failing for %dms)." "Trying again in %dms", self.user_name, self.clock.time_msec() - self.failing_since, self.backoff_delay) yield synapse.util.async.sleep(self.backoff_delay / 1000.0) self.backoff_delay *= 2 if self.backoff_delay > Pusher.MAX_BACKOFF: self.backoff_delay = Pusher.MAX_BACKOFF