def from_request(cls, request, raise_invalid_params=True, default_limit=None): direction = parse_string(request, "dir", default="f", allowed_values=["f", "b"]) from_tok = parse_string(request, "from") to_tok = parse_string(request, "to") try: if from_tok == "END": from_tok = None # For backwards compat. elif from_tok: from_tok = StreamToken.from_string(from_tok) except Exception: raise SynapseError(400, "'from' parameter is invalid") try: if to_tok: to_tok = StreamToken.from_string(to_tok) except Exception: raise SynapseError(400, "'to' parameter is invalid") limit = parse_integer(request, "limit", default=default_limit) if limit and limit < 0: raise SynapseError(400, "Limit must be 0 or above") try: return PaginationConfig(from_tok, to_tok, direction, limit) except Exception: logger.exception("Failed to create pagination config") raise SynapseError(400, "Invalid request.")
def from_request(cls, request, raise_invalid_params=True, default_limit=None): direction = parse_string(request, "dir", default='f', allowed_values=['f', 'b']) from_tok = parse_string(request, "from") to_tok = parse_string(request, "to") try: if from_tok == "END": from_tok = None # For backwards compat. elif from_tok: from_tok = StreamToken.from_string(from_tok) except Exception: raise SynapseError(400, "'from' paramater is invalid") try: if to_tok: to_tok = StreamToken.from_string(to_tok) except Exception: raise SynapseError(400, "'to' paramater is invalid") limit = parse_integer(request, "limit", default=default_limit) if limit and limit < 0: raise SynapseError(400, "Limit must be 0 or above") try: return PaginationConfig(from_tok, to_tok, direction, limit) except Exception: logger.exception("Failed to create pagination config") raise SynapseError(400, "Invalid request.")
def from_request(cls, request, raise_invalid_params=True, default_limit=None): def get_param(name, default=None): lst = request.args.get(name, []) if len(lst) > 1: raise SynapseError(400, "%s must be specified only once" % (name, )) elif len(lst) == 1: return lst[0] else: return default direction = get_param("dir", 'f') if direction not in ['f', 'b']: raise SynapseError(400, "'dir' parameter is invalid.") from_tok = get_param("from") to_tok = get_param("to") try: if from_tok == "END": from_tok = None # For backwards compat. elif from_tok: from_tok = StreamToken.from_string(from_tok) except: raise SynapseError(400, "'from' paramater is invalid") try: if to_tok: to_tok = StreamToken.from_string(to_tok) except: raise SynapseError(400, "'to' paramater is invalid") limit = get_param("limit", None) if limit is not None and not limit.isdigit(): raise SynapseError(400, "'limit' parameter must be an integer.") if limit is None: limit = default_limit try: return PaginationConfig(from_tok, to_tok, direction, limit) except: logger.exception("Failed to create pagination config") raise SynapseError(400, "Invalid request.")
def from_request(cls, request, raise_invalid_params=True, default_limit=None): def get_param(name, default=None): lst = request.args.get(name, []) if len(lst) > 1: raise SynapseError( 400, "%s must be specified only once" % (name,) ) elif len(lst) == 1: return lst[0] else: return default direction = get_param("dir", 'f') if direction not in ['f', 'b']: raise SynapseError(400, "'dir' parameter is invalid.") from_tok = get_param("from") to_tok = get_param("to") try: if from_tok == "END": from_tok = None # For backwards compat. elif from_tok: from_tok = StreamToken.from_string(from_tok) except Exception: raise SynapseError(400, "'from' paramater is invalid") try: if to_tok: to_tok = StreamToken.from_string(to_tok) except Exception: raise SynapseError(400, "'to' paramater is invalid") limit = get_param("limit", None) if limit is not None and not limit.isdigit(): raise SynapseError(400, "'limit' parameter must be an integer.") if limit is None: limit = default_limit try: return PaginationConfig(from_tok, to_tok, direction, limit) except Exception: logger.exception("Failed to create pagination config") raise SynapseError(400, "Invalid request.")
def on_GET(self, request): user, client = yield self.auth.get_user_by_req(request) timeout = parse_integer(request, "timeout", default=0) limit = parse_integer(request, "limit", required=True) gap = parse_boolean(request, "gap", default=True) sort = parse_string(request, "sort", default="timeline,asc", allowed_values=self.ALLOWED_SORT) since = parse_string(request, "since") set_presence = parse_string(request, "set_presence", default="online", allowed_values=self.ALLOWED_PRESENCE) backfill = parse_boolean(request, "backfill", default=False) filter_id = parse_string(request, "filter", default=None) logger.info( "/sync: user=%r, timeout=%r, limit=%r, gap=%r, sort=%r, since=%r," " set_presence=%r, backfill=%r, filter_id=%r" % (user, timeout, limit, gap, sort, since, set_presence, backfill, filter_id) ) # TODO(mjark): Load filter and apply overrides. try: filter = yield self.filtering.get_user_filter(user.localpart, filter_id) except: filter = Filter({}) # filter = filter.apply_overrides(http_request) # if filter.matches(event): # # stuff sync_config = SyncConfig( user=user, client_info=client, gap=gap, limit=limit, sort=sort, backfill=backfill, filter=filter ) if since is not None: since_token = StreamToken.from_string(since) else: since_token = None sync_result = yield self.sync_handler.wait_for_sync_for_user( sync_config, since_token=since_token, timeout=timeout ) time_now = self.clock.time_msec() response_content = { "public_user_data": self.encode_user_data(sync_result.public_user_data, filter, time_now), "private_user_data": self.encode_user_data(sync_result.private_user_data, filter, time_now), "rooms": self.encode_rooms(sync_result.rooms, filter, time_now, client.token_id), "next_batch": sync_result.next_batch.to_string(), } defer.returnValue((200, response_content))
def on_GET(self, request): requester = yield self.auth.get_user_by_req(request, allow_guest=True) from_token_string = parse_string(request, "from") # We want to enforce they do pass us one, but we ignore it and return # changes after the "to" as well as before. parse_string(request, "to") from_token = StreamToken.from_string(from_token_string) user_id = requester.user.to_string() results = yield self.device_handler.get_user_ids_changed(user_id, from_token) defer.returnValue((200, results))
async def on_GET(self, request): requester = await self.auth.get_user_by_req(request, allow_guest=True) from_token_string = parse_string(request, "from") set_tag("from", from_token_string) # We want to enforce they do pass us one, but we ignore it and return # changes after the "to" as well as before. set_tag("to", parse_string(request, "to")) from_token = StreamToken.from_string(from_token_string) user_id = requester.user.to_string() results = await self.device_handler.get_user_ids_changed(user_id, from_token) return 200, results
def on_GET(self, request, room_id): # TODO support Pagination stream API (limit/tokens) requester = yield self.auth.get_user_by_req(request) handler = self.message_handler # request the state as of a given event, as identified by a stream token, # for consistency with /messages etc. # useful for getting the membership in retrospect as of a given /sync # response. at_token_string = parse_string(request, "at") if at_token_string is None: at_token = None else: at_token = StreamToken.from_string(at_token_string) # let you filter down on particular memberships. # XXX: this may not be the best shape for this API - we could pass in a filter # instead, except filters aren't currently aware of memberships. # See https://github.com/matrix-org/matrix-doc/issues/1337 for more details. membership = parse_string(request, "membership") not_membership = parse_string(request, "not_membership") events = yield handler.get_state_events( room_id=room_id, user_id=requester.user.to_string(), at_token=at_token, state_filter=StateFilter.from_types([(EventTypes.Member, None)]), ) print("honey singh") print(events) chunk = [] for event in events: if ((membership and event['content'].get("membership") != membership) or (not_membership and event['content'].get("membership") == not_membership)): continue # chunk.append(event) # print("great") # room_members = yield self.message_handler.get_all_members_handler(room_id) # print("great 2.0") # print(room_members) defer.returnValue((200, {"chunk": chunk}))
def on_GET(self, request): requester = yield self.auth.get_user_by_req(request, allow_guest=True) from_token_string = parse_string(request, "from") # We want to enforce they do pass us one, but we ignore it and return # changes after the "to" as well as before. parse_string(request, "to") from_token = StreamToken.from_string(from_token_string) user_id = requester.user.to_string() results = yield self.device_handler.get_user_ids_changed( user_id, from_token, ) defer.returnValue((200, results))
def on_GET(self, request, room_id): # TODO support Pagination stream API (limit/tokens) requester = yield self.auth.get_user_by_req(request) handler = self.message_handler # request the state as of a given event, as identified by a stream token, # for consistency with /messages etc. # useful for getting the membership in retrospect as of a given /sync # response. at_token_string = parse_string(request, "at") if at_token_string is None: at_token = None else: at_token = StreamToken.from_string(at_token_string) # let you filter down on particular memberships. # XXX: this may not be the best shape for this API - we could pass in a filter # instead, except filters aren't currently aware of memberships. # See https://github.com/matrix-org/matrix-doc/issues/1337 for more details. membership = parse_string(request, "membership") not_membership = parse_string(request, "not_membership") events = yield handler.get_state_events( room_id=room_id, user_id=requester.user.to_string(), at_token=at_token, state_filter=StateFilter.from_types([(EventTypes.Member, None)]), ) chunk = [] for event in events: if ( (membership and event['content'].get("membership") != membership) or (not_membership and event['content'].get("membership") == not_membership) ): continue chunk.append(event) defer.returnValue((200, { "chunk": chunk }))
def on_GET(self, request): if "from" in request.args: # /events used to use 'from', but /sync uses 'since'. # Lets be helpful and whine if we see a 'from'. raise SynapseError( 400, "'from' is not a valid query parameter. Did you mean 'since'?" ) requester = yield self.auth.get_user_by_req( request, allow_guest=True ) user = requester.user timeout = parse_integer(request, "timeout", default=0) since = parse_string(request, "since") set_presence = parse_string( request, "set_presence", default="online", allowed_values=self.ALLOWED_PRESENCE ) filter_id = parse_string(request, "filter", default=None) full_state = parse_boolean(request, "full_state", default=False) logger.info( "/sync: user=%r, timeout=%r, since=%r," " set_presence=%r, filter_id=%r" % ( user, timeout, since, set_presence, filter_id ) ) if filter_id: if filter_id.startswith('{'): try: filter_object = json.loads(filter_id) except: raise SynapseError(400, "Invalid filter JSON") self.filtering.check_valid_filter(filter_object) filter = FilterCollection(filter_object) else: filter = yield self.filtering.get_user_filter( user.localpart, filter_id ) else: filter = DEFAULT_FILTER_COLLECTION sync_config = SyncConfig( user=user, filter_collection=filter, is_guest=requester.is_guest, ) if since is not None: since_token = StreamToken.from_string(since) else: since_token = None if set_presence == "online": yield self.event_stream_handler.started_stream(user) try: sync_result = yield self.sync_handler.wait_for_sync_for_user( sync_config, since_token=since_token, timeout=timeout, full_state=full_state ) finally: if set_presence == "online": self.event_stream_handler.stopped_stream(user) time_now = self.clock.time_msec() joined = self.encode_joined( sync_result.joined, time_now, requester.access_token_id ) invited = self.encode_invited( sync_result.invited, time_now, requester.access_token_id ) archived = self.encode_archived( sync_result.archived, time_now, requester.access_token_id ) response_content = { "account_data": {"events": sync_result.account_data}, "presence": self.encode_presence( sync_result.presence, time_now ), "rooms": { "join": joined, "invite": invited, "leave": archived, }, "next_batch": sync_result.next_batch.to_string(), } defer.returnValue((200, response_content))
def on_GET(self, request): if b"from" in request.args: # /events used to use 'from', but /sync uses 'since'. # Lets be helpful and whine if we see a 'from'. raise SynapseError( 400, "'from' is not a valid query parameter. Did you mean 'since'?") requester = yield self.auth.get_user_by_req(request, allow_guest=True) user = requester.user device_id = requester.device_id timeout = parse_integer(request, "timeout", default=0) since = parse_string(request, "since") set_presence = parse_string( request, "set_presence", default="online", allowed_values=self.ALLOWED_PRESENCE, ) filter_id = parse_string(request, "filter", default=None) full_state = parse_boolean(request, "full_state", default=False) logger.debug( "/sync: user=%r, timeout=%r, since=%r," " set_presence=%r, filter_id=%r, device_id=%r" % (user, timeout, since, set_presence, filter_id, device_id)) request_key = (user, timeout, since, filter_id, full_state, device_id) if filter_id: if filter_id.startswith("{"): try: filter_object = json.loads(filter_id) set_timeline_upper_limit( filter_object, self.hs.config.filter_timeline_limit) except Exception: raise SynapseError(400, "Invalid filter JSON") self.filtering.check_valid_filter(filter_object) filter = FilterCollection(filter_object) else: filter = yield self.filtering.get_user_filter( user.localpart, filter_id) else: filter = DEFAULT_FILTER_COLLECTION sync_config = SyncConfig( user=user, filter_collection=filter, is_guest=requester.is_guest, request_key=request_key, device_id=device_id, ) if since is not None: since_token = StreamToken.from_string(since) else: since_token = None # send any outstanding server notices to the user. yield self._server_notices_sender.on_user_syncing(user.to_string()) affect_presence = set_presence != PresenceState.OFFLINE if affect_presence: yield self.presence_handler.set_state(user, {"presence": set_presence}, True) context = yield self.presence_handler.user_syncing( user.to_string(), affect_presence=affect_presence) with context: sync_result = yield self.sync_handler.wait_for_sync_for_user( sync_config, since_token=since_token, timeout=timeout, full_state=full_state, ) time_now = self.clock.time_msec() response_content = yield self.encode_response( time_now, sync_result, requester.access_token_id, filter) return 200, response_content
def on_GET(self, request): if "from" in request.args: # /events used to use 'from', but /sync uses 'since'. # Lets be helpful and whine if we see a 'from'. raise SynapseError( 400, "'from' is not a valid query parameter. Did you mean 'since'?") requester = yield self.auth.get_user_by_req(request, allow_guest=True) user = requester.user device_id = requester.device_id timeout = parse_integer(request, "timeout", default=0) since = parse_string(request, "since") set_presence = parse_string(request, "set_presence", default="online", allowed_values=self.ALLOWED_PRESENCE) filter_id = parse_string(request, "filter", default=None) full_state = parse_boolean(request, "full_state", default=False) logger.info("/sync: user=%r, timeout=%r, since=%r," " set_presence=%r, filter_id=%r, device_id=%r" % (user, timeout, since, set_presence, filter_id, device_id)) request_key = (user, timeout, since, filter_id, full_state, device_id) if filter_id: if filter_id.startswith('{'): try: filter_object = json.loads(filter_id) except: raise SynapseError(400, "Invalid filter JSON") self.filtering.check_valid_filter(filter_object) filter = FilterCollection(filter_object) else: filter = yield self.filtering.get_user_filter( user.localpart, filter_id) else: filter = DEFAULT_FILTER_COLLECTION sync_config = SyncConfig( user=user, filter_collection=filter, is_guest=requester.is_guest, request_key=request_key, device_id=device_id, ) if since is not None: since_token = StreamToken.from_string(since) else: since_token = None affect_presence = set_presence != PresenceState.OFFLINE if affect_presence: yield self.presence_handler.set_state(user, {"presence": set_presence}, True) context = yield self.presence_handler.user_syncing( user.to_string(), affect_presence=affect_presence, ) with context: sync_result = yield self.sync_handler.wait_for_sync_for_user( sync_config, since_token=since_token, timeout=timeout, full_state=full_state) time_now = self.clock.time_msec() joined = self.encode_joined(sync_result.joined, time_now, requester.access_token_id, filter.event_fields) invited = self.encode_invited(sync_result.invited, time_now, requester.access_token_id) archived = self.encode_archived( sync_result.archived, time_now, requester.access_token_id, filter.event_fields, ) response_content = { "account_data": { "events": sync_result.account_data }, "to_device": { "events": sync_result.to_device }, "device_lists": { "changed": list(sync_result.device_lists), }, "presence": self.encode_presence(sync_result.presence, time_now), "rooms": { "join": joined, "invite": invited, "leave": archived, }, "next_batch": sync_result.next_batch.to_string(), } defer.returnValue((200, response_content))
def on_GET(self, request): user, token_id, _ = yield self.auth.get_user_by_req(request) timeout = parse_integer(request, "timeout", default=0) since = parse_string(request, "since") set_presence = parse_string( request, "set_presence", default="online", allowed_values=self.ALLOWED_PRESENCE ) filter_id = parse_string(request, "filter", default=None) full_state = parse_boolean(request, "full_state", default=False) logger.info( "/sync: user=%r, timeout=%r, since=%r," " set_presence=%r, filter_id=%r" % ( user, timeout, since, set_presence, filter_id ) ) try: filter = yield self.filtering.get_user_filter( user.localpart, filter_id ) except: filter = FilterCollection({}) sync_config = SyncConfig( user=user, filter=filter, ) if since is not None: since_token = StreamToken.from_string(since) else: since_token = None if set_presence == "online": yield self.event_stream_handler.started_stream(user) try: sync_result = yield self.sync_handler.wait_for_sync_for_user( sync_config, since_token=since_token, timeout=timeout, full_state=full_state ) finally: if set_presence == "online": self.event_stream_handler.stopped_stream(user) time_now = self.clock.time_msec() joined = self.encode_joined( sync_result.joined, filter, time_now, token_id ) invited = self.encode_invited( sync_result.invited, filter, time_now, token_id ) archived = self.encode_archived( sync_result.archived, filter, time_now, token_id ) response_content = { "presence": self.encode_presence( sync_result.presence, filter, time_now ), "rooms": { "join": joined, "invite": invited, "leave": archived, }, "next_batch": sync_result.next_batch.to_string(), } defer.returnValue((200, response_content))
def get_and_dispatch(self): from_tok = StreamToken.from_string(self.last_token) config = PaginationConfig(from_token=from_tok, limit='1') timeout = (300 + random.randint(-60, 60)) * 1000 chunk = yield self.evStreamHandler.get_stream( self.user_id, config, timeout=timeout, affect_presence=False, only_keys=( "room", "receipt", ), ) # limiting to 1 may get 1 event plus 1 presence event, so # pick out the actual event single_event = None read_receipt = None for c in chunk['chunk']: if 'event_id' in c: # Hmmm... single_event = c elif c['type'] == 'm.receipt': read_receipt = c have_updated_badge = False if read_receipt: for receipt_part in read_receipt['content'].values(): if 'm.read' in receipt_part: if self.user_id in receipt_part['m.read'].keys(): have_updated_badge = True if not single_event: if have_updated_badge: yield self.update_badge() self.last_token = chunk['end'] yield self.store.update_pusher_last_token(self.app_id, self.pushkey, self.user_id, self.last_token) return if not self.alive: return processed = False rule_evaluator = yield \ evaluator_for_user_id( self.user_id, single_event['room_id'], self.store ) actions = yield rule_evaluator.actions_for_event(single_event) tweaks = rule_evaluator.tweaks_for_actions(actions) if 'notify' in actions: self.badge = yield self._get_badge_count() rejected = yield self.dispatch_push(single_event, tweaks, self.badge) self.has_unread = True if isinstance(rejected, list) or isinstance(rejected, tuple): processed = True for pk in rejected: if pk != self.pushkey: # for sanity, we only remove the pushkey if it # was the one we actually sent... logger.warn(("Ignoring rejected pushkey %s because we" " didn't send it"), pk) else: logger.info("Pushkey %s was rejected: removing", pk) yield self.hs.get_pusherpool().remove_pusher( self.app_id, pk, self.user_id) else: if have_updated_badge: yield self.update_badge() processed = True if not self.alive: return if processed: self.backoff_delay = Pusher.INITIAL_BACKOFF self.last_token = chunk['end'] yield self.store.update_pusher_last_token_and_success( self.app_id, self.pushkey, self.user_id, self.last_token, self.clock.time_msec()) if self.failing_since: self.failing_since = None yield self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.user_id, self.failing_since) else: if not self.failing_since: self.failing_since = self.clock.time_msec() yield self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.user_id, self.failing_since) if (self.failing_since and self.failing_since < self.clock.time_msec() - Pusher.GIVE_UP_AFTER): # we really only give up so that if the URL gets # fixed, we don't suddenly deliver a load # of old notifications. logger.warn( "Giving up on a notification to user %s, " "pushkey %s", self.user_id, self.pushkey) self.backoff_delay = Pusher.INITIAL_BACKOFF self.last_token = chunk['end'] yield self.store.update_pusher_last_token( self.app_id, self.pushkey, self.user_id, self.last_token) self.failing_since = None yield self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.user_id, self.failing_since) else: logger.warn( "Failed to dispatch push for user %s " "(failing for %dms)." "Trying again in %dms", self.user_id, self.clock.time_msec() - self.failing_since, self.backoff_delay) yield synapse.util. async .sleep(self.backoff_delay / 1000.0) self.backoff_delay *= 2 if self.backoff_delay > Pusher.MAX_BACKOFF: self.backoff_delay = Pusher.MAX_BACKOFF
def get_and_dispatch(self): from_tok = StreamToken.from_string(self.last_token) config = PaginationConfig(from_token=from_tok, limit='1') timeout = (300 + random.randint(-60, 60)) * 1000 chunk = yield self.evStreamHandler.get_stream(self.user_name, config, timeout=timeout, affect_presence=False, only_room_events=True) # limiting to 1 may get 1 event plus 1 presence event, so # pick out the actual event single_event = None for c in chunk['chunk']: if 'event_id' in c: # Hmmm... single_event = c break if not single_event: self.last_token = chunk['end'] logger.debug("Event stream timeout for pushkey %s", self.pushkey) yield self.store.update_pusher_last_token(self.app_id, self.pushkey, self.user_name, self.last_token) return if not self.alive: return processed = False rule_evaluator = yield \ push_rule_evaluator.evaluator_for_user_name_and_profile_tag( self.user_name, self.profile_tag, single_event['room_id'], self.store ) actions = yield rule_evaluator.actions_for_event(single_event) tweaks = rule_evaluator.tweaks_for_actions(actions) if len(actions) == 0: logger.warn("Empty actions! Using default action.") actions = Pusher.DEFAULT_ACTIONS if 'notify' not in actions and 'dont_notify' not in actions: logger.warn( "Neither notify nor dont_notify in actions: adding default") actions.extend(Pusher.DEFAULT_ACTIONS) if 'dont_notify' in actions: logger.debug("%s for %s: dont_notify", single_event['event_id'], self.user_name) processed = True else: rejected = yield self.dispatch_push(single_event, tweaks) self.has_unread = True if isinstance(rejected, list) or isinstance(rejected, tuple): processed = True for pk in rejected: if pk != self.pushkey: # for sanity, we only remove the pushkey if it # was the one we actually sent... logger.warn(("Ignoring rejected pushkey %s because we" " didn't send it"), pk) else: logger.info("Pushkey %s was rejected: removing", pk) yield self.hs.get_pusherpool().remove_pusher( self.app_id, pk, self.user_name) if not self.alive: return if processed: self.backoff_delay = Pusher.INITIAL_BACKOFF self.last_token = chunk['end'] yield self.store.update_pusher_last_token_and_success( self.app_id, self.pushkey, self.user_name, self.last_token, self.clock.time_msec()) if self.failing_since: self.failing_since = None yield self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.user_name, self.failing_since) else: if not self.failing_since: self.failing_since = self.clock.time_msec() yield self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.user_name, self.failing_since) if (self.failing_since and self.failing_since < self.clock.time_msec() - Pusher.GIVE_UP_AFTER): # we really only give up so that if the URL gets # fixed, we don't suddenly deliver a load # of old notifications. logger.warn( "Giving up on a notification to user %s, " "pushkey %s", self.user_name, self.pushkey) self.backoff_delay = Pusher.INITIAL_BACKOFF self.last_token = chunk['end'] yield self.store.update_pusher_last_token( self.app_id, self.pushkey, self.user_name, self.last_token) self.failing_since = None yield self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.user_name, self.failing_since) else: logger.warn( "Failed to dispatch push for user %s " "(failing for %dms)." "Trying again in %dms", self.user_name, self.clock.time_msec() - self.failing_since, self.backoff_delay) yield synapse.util. async .sleep(self.backoff_delay / 1000.0) self.backoff_delay *= 2 if self.backoff_delay > Pusher.MAX_BACKOFF: self.backoff_delay = Pusher.MAX_BACKOFF
def get_and_dispatch(self): from_tok = StreamToken.from_string(self.last_token) config = PaginationConfig(from_token=from_tok, limit='1') timeout = (300 + random.randint(-60, 60)) * 1000 chunk = yield self.evStreamHandler.get_stream( self.user_name, config, timeout=timeout, affect_presence=False ) # limiting to 1 may get 1 event plus 1 presence event, so # pick out the actual event single_event = None for c in chunk['chunk']: if 'event_id' in c: # Hmmm... single_event = c break if not single_event: self.last_token = chunk['end'] logger.debug("Event stream timeout for pushkey %s", self.pushkey) return if not self.alive: return processed = False actions = yield self._actions_for_event(single_event) tweaks = _tweaks_for_actions(actions) if len(actions) == 0: logger.warn("Empty actions! Using default action.") actions = Pusher.DEFAULT_ACTIONS if 'notify' not in actions and 'dont_notify' not in actions: logger.warn("Neither notify nor dont_notify in actions: adding default") actions.extend(Pusher.DEFAULT_ACTIONS) if 'dont_notify' in actions: logger.debug( "%s for %s: dont_notify", single_event['event_id'], self.user_name ) processed = True else: rejected = yield self.dispatch_push(single_event, tweaks) self.has_unread = True if isinstance(rejected, list) or isinstance(rejected, tuple): processed = True for pk in rejected: if pk != self.pushkey: # for sanity, we only remove the pushkey if it # was the one we actually sent... logger.warn( ("Ignoring rejected pushkey %s because we" " didn't send it"), pk ) else: logger.info( "Pushkey %s was rejected: removing", pk ) yield self.hs.get_pusherpool().remove_pusher( self.app_id, pk, self.user_name ) if not self.alive: return if processed: self.backoff_delay = Pusher.INITIAL_BACKOFF self.last_token = chunk['end'] self.store.update_pusher_last_token_and_success( self.app_id, self.pushkey, self.user_name, self.last_token, self.clock.time_msec() ) if self.failing_since: self.failing_since = None self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.user_name, self.failing_since) else: if not self.failing_since: self.failing_since = self.clock.time_msec() self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.user_name, self.failing_since ) if (self.failing_since and self.failing_since < self.clock.time_msec() - Pusher.GIVE_UP_AFTER): # we really only give up so that if the URL gets # fixed, we don't suddenly deliver a load # of old notifications. logger.warn("Giving up on a notification to user %s, " "pushkey %s", self.user_name, self.pushkey) self.backoff_delay = Pusher.INITIAL_BACKOFF self.last_token = chunk['end'] self.store.update_pusher_last_token( self.app_id, self.pushkey, self.user_name, self.last_token ) self.failing_since = None self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.user_name, self.failing_since ) else: logger.warn("Failed to dispatch push for user %s " "(failing for %dms)." "Trying again in %dms", self.user_name, self.clock.time_msec() - self.failing_since, self.backoff_delay) yield synapse.util.async.sleep(self.backoff_delay / 1000.0) self.backoff_delay *= 2 if self.backoff_delay > Pusher.MAX_BACKOFF: self.backoff_delay = Pusher.MAX_BACKOFF
def on_GET(self, request): if "from" in request.args: # /events used to use 'from', but /sync uses 'since'. # Lets be helpful and whine if we see a 'from'. raise SynapseError( 400, "'from' is not a valid query parameter. Did you mean 'since'?" ) requester = yield self.auth.get_user_by_req( request, allow_guest=True ) user = requester.user device_id = requester.device_id timeout = parse_integer(request, "timeout", default=0) since = parse_string(request, "since") set_presence = parse_string( request, "set_presence", default="online", allowed_values=self.ALLOWED_PRESENCE ) filter_id = parse_string(request, "filter", default=None) full_state = parse_boolean(request, "full_state", default=False) logger.debug( "/sync: user=%r, timeout=%r, since=%r," " set_presence=%r, filter_id=%r, device_id=%r" % ( user, timeout, since, set_presence, filter_id, device_id ) ) request_key = (user, timeout, since, filter_id, full_state, device_id) if filter_id: if filter_id.startswith('{'): try: filter_object = json.loads(filter_id) set_timeline_upper_limit(filter_object, self.hs.config.filter_timeline_limit) except Exception: raise SynapseError(400, "Invalid filter JSON") self.filtering.check_valid_filter(filter_object) filter = FilterCollection(filter_object) else: filter = yield self.filtering.get_user_filter( user.localpart, filter_id ) else: filter = DEFAULT_FILTER_COLLECTION sync_config = SyncConfig( user=user, filter_collection=filter, is_guest=requester.is_guest, request_key=request_key, device_id=device_id, ) if since is not None: since_token = StreamToken.from_string(since) else: since_token = None affect_presence = set_presence != PresenceState.OFFLINE if affect_presence: yield self.presence_handler.set_state(user, {"presence": set_presence}, True) context = yield self.presence_handler.user_syncing( user.to_string(), affect_presence=affect_presence, ) with context: sync_result = yield self.sync_handler.wait_for_sync_for_user( sync_config, since_token=since_token, timeout=timeout, full_state=full_state ) time_now = self.clock.time_msec() response_content = self.encode_response( time_now, sync_result, requester.access_token_id, filter ) defer.returnValue((200, response_content))
def start(self): if not self.last_token: # First-time setup: get a token to start from (we can't # just start from no token, ie. 'now' # because we need the result to be reproduceable in case # we fail to dispatch the push) config = PaginationConfig(from_token=None, limit='1') chunk = yield self.evStreamHandler.get_stream( self.user_name, config, timeout=0) self.last_token = chunk['end'] self.store.update_pusher_last_token( self.app_id, self.pushkey, self.last_token) logger.info("Pusher %s for user %s starting from token %s", self.pushkey, self.user_name, self.last_token) while self.alive: from_tok = StreamToken.from_string(self.last_token) config = PaginationConfig(from_token=from_tok, limit='1') chunk = yield self.evStreamHandler.get_stream( self.user_name, config, timeout=100*365*24*60*60*1000, affect_presence=False ) # limiting to 1 may get 1 event plus 1 presence event, so # pick out the actual event single_event = None for c in chunk['chunk']: if 'event_id' in c: # Hmmm... single_event = c break if not single_event: self.last_token = chunk['end'] continue if not self.alive: continue processed = False actions = yield self._actions_for_event(single_event) tweaks = _tweaks_for_actions(actions) if len(actions) == 0: logger.warn("Empty actions! Using default action.") actions = Pusher.DEFAULT_ACTIONS if 'notify' not in actions and 'dont_notify' not in actions: logger.warn("Neither notify nor dont_notify in actions: adding default") actions.extend(Pusher.DEFAULT_ACTIONS) if 'dont_notify' in actions: logger.debug( "%s for %s: dont_notify", single_event['event_id'], self.user_name ) processed = True else: rejected = yield self.dispatch_push(single_event, tweaks) self.has_unread = True if isinstance(rejected, list) or isinstance(rejected, tuple): processed = True for pk in rejected: if pk != self.pushkey: # for sanity, we only remove the pushkey if it # was the one we actually sent... logger.warn( ("Ignoring rejected pushkey %s because we" " didn't send it"), pk ) else: logger.info( "Pushkey %s was rejected: removing", pk ) yield self.hs.get_pusherpool().remove_pusher( self.app_id, pk ) if not self.alive: continue if processed: self.backoff_delay = Pusher.INITIAL_BACKOFF self.last_token = chunk['end'] self.store.update_pusher_last_token_and_success( self.app_id, self.pushkey, self.last_token, self.clock.time_msec() ) if self.failing_since: self.failing_since = None self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.failing_since) else: if not self.failing_since: self.failing_since = self.clock.time_msec() self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.failing_since ) if (self.failing_since and self.failing_since < self.clock.time_msec() - Pusher.GIVE_UP_AFTER): # we really only give up so that if the URL gets # fixed, we don't suddenly deliver a load # of old notifications. logger.warn("Giving up on a notification to user %s, " "pushkey %s", self.user_name, self.pushkey) self.backoff_delay = Pusher.INITIAL_BACKOFF self.last_token = chunk['end'] self.store.update_pusher_last_token( self.app_id, self.pushkey, self.last_token ) self.failing_since = None self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.failing_since ) else: logger.warn("Failed to dispatch push for user %s " "(failing for %dms)." "Trying again in %dms", self.user_name, self.clock.time_msec() - self.failing_since, self.backoff_delay) yield synapse.util.async.sleep(self.backoff_delay / 1000.0) self.backoff_delay *= 2 if self.backoff_delay > Pusher.MAX_BACKOFF: self.backoff_delay = Pusher.MAX_BACKOFF
def on_GET(self, request): user, client = yield self.auth.get_user_by_req(request) timeout = self.parse_integer(request, "timeout", default=0) limit = self.parse_integer(request, "limit", required=True) gap = self.parse_boolean(request, "gap", default=True) sort = self.parse_string( request, "sort", default="timeline,asc", allowed_values=self.ALLOWED_SORT ) since = self.parse_string(request, "since") set_presence = self.parse_string( request, "set_presence", default="online", allowed_values=self.ALLOWED_PRESENCE ) backfill = self.parse_boolean(request, "backfill", default=False) filter_id = self.parse_string(request, "filter", default=None) logger.info( "/sync: user=%r, timeout=%r, limit=%r, gap=%r, sort=%r, since=%r," " set_presence=%r, backfill=%r, filter_id=%r" % ( user, timeout, limit, gap, sort, since, set_presence, backfill, filter_id ) ) # TODO(mjark): Load filter and apply overrides. try: filter = yield self.filtering.get_user_filter( user.localpart, filter_id ) except: filter = Filter({}) # filter = filter.apply_overrides(http_request) # if filter.matches(event): # # stuff sync_config = SyncConfig( user=user, client_info=client, gap=gap, limit=limit, sort=sort, backfill=backfill, filter=filter, ) if since is not None: since_token = StreamToken.from_string(since) else: since_token = None sync_result = yield self.sync_handler.wait_for_sync_for_user( sync_config, since_token=since_token, timeout=timeout ) time_now = self.clock.time_msec() response_content = { "public_user_data": self.encode_user_data( sync_result.public_user_data, filter, time_now ), "private_user_data": self.encode_user_data( sync_result.private_user_data, filter, time_now ), "rooms": self.encode_rooms( sync_result.rooms, filter, time_now, client.token_id ), "next_batch": sync_result.next_batch.to_string(), } defer.returnValue((200, response_content))
async def on_GET(self, request): if b"from" in request.args: # /events used to use 'from', but /sync uses 'since'. # Lets be helpful and whine if we see a 'from'. raise SynapseError( 400, "'from' is not a valid query parameter. Did you mean 'since'?") requester = await self.auth.get_user_by_req(request, allow_guest=True) user = requester.user device_id = requester.device_id timeout = parse_integer(request, "timeout", default=0) since = parse_string(request, "since") set_presence = parse_string( request, "set_presence", default="online", allowed_values=self.ALLOWED_PRESENCE, ) filter_id = parse_string(request, "filter", default=None) full_state = parse_boolean(request, "full_state", default=False) logger.debug( "/sync: user=%r, timeout=%r, since=%r, " "set_presence=%r, filter_id=%r, device_id=%r", user, timeout, since, set_presence, filter_id, device_id, ) request_key = (user, timeout, since, filter_id, full_state, device_id) if filter_id is None: filter_collection = DEFAULT_FILTER_COLLECTION elif filter_id.startswith("{"): try: filter_object = json.loads(filter_id) set_timeline_upper_limit(filter_object, self.hs.config.filter_timeline_limit) except Exception: raise SynapseError(400, "Invalid filter JSON") self.filtering.check_valid_filter(filter_object) filter_collection = FilterCollection(filter_object) else: try: filter_collection = await self.filtering.get_user_filter( user.localpart, filter_id) except StoreError as err: if err.code != 404: raise # fix up the description and errcode to be more useful raise SynapseError(400, "No such filter", errcode=Codes.INVALID_PARAM) sync_config = SyncConfig( user=user, filter_collection=filter_collection, is_guest=requester.is_guest, request_key=request_key, device_id=device_id, ) if since is not None: since_token = StreamToken.from_string(since) else: since_token = None # send any outstanding server notices to the user. await self._server_notices_sender.on_user_syncing(user.to_string()) affect_presence = set_presence != PresenceState.OFFLINE if affect_presence: await self.presence_handler.set_state(user, {"presence": set_presence}, True) context = await self.presence_handler.user_syncing( user.to_string(), affect_presence=affect_presence) with context: sync_result = await self.sync_handler.wait_for_sync_for_user( sync_config, since_token=since_token, timeout=timeout, full_state=full_state, ) # the client may have disconnected by now; don't bother to serialize the # response if so. if request._disconnected: logger.info("Client has disconnected; not serializing response.") return 200, {} time_now = self.clock.time_msec() response_content = await self.encode_response( time_now, sync_result, requester.access_token_id, filter_collection) logger.debug("Event formatting complete") return 200, response_content
def on_GET(self, request): user, token_id, is_guest = yield self.auth.get_user_by_req( request, allow_guest=True ) timeout = parse_integer(request, "timeout", default=0) since = parse_string(request, "since") set_presence = parse_string( request, "set_presence", default="online", allowed_values=self.ALLOWED_PRESENCE ) filter_id = parse_string(request, "filter", default=None) full_state = parse_boolean(request, "full_state", default=False) logger.info( "/sync: user=%r, timeout=%r, since=%r," " set_presence=%r, filter_id=%r" % ( user, timeout, since, set_presence, filter_id ) ) if filter_id and filter_id.startswith('{'): try: filter_object = json.loads(filter_id) except: raise SynapseError(400, "Invalid filter JSON") self.filtering._check_valid_filter(filter_object) filter = FilterCollection(filter_object) else: try: filter = yield self.filtering.get_user_filter( user.localpart, filter_id ) except: filter = FilterCollection({}) if is_guest and filter.list_rooms() is None: raise SynapseError( 400, "Guest users must provide a list of rooms in the filter" ) sync_config = SyncConfig( user=user, is_guest=is_guest, filter=filter, ) if since is not None: since_token = StreamToken.from_string(since) else: since_token = None if set_presence == "online": yield self.event_stream_handler.started_stream(user) try: sync_result = yield self.sync_handler.wait_for_sync_for_user( sync_config, since_token=since_token, timeout=timeout, full_state=full_state ) finally: if set_presence == "online": self.event_stream_handler.stopped_stream(user) time_now = self.clock.time_msec() joined = self.encode_joined( sync_result.joined, filter, time_now, token_id ) invited = self.encode_invited( sync_result.invited, filter, time_now, token_id ) archived = self.encode_archived( sync_result.archived, filter, time_now, token_id ) response_content = { "account_data": self.encode_account_data( sync_result.account_data, filter, time_now ), "presence": self.encode_presence( sync_result.presence, filter, time_now ), "rooms": { "join": joined, "invite": invited, "leave": archived, }, "next_batch": sync_result.next_batch.to_string(), } defer.returnValue((200, response_content))
def get_and_dispatch(self): from_tok = StreamToken.from_string(self.last_token) config = PaginationConfig(from_token=from_tok, limit='1') timeout = (300 + random.randint(-60, 60)) * 1000 chunk = yield self.evStreamHandler.get_stream( self.user_id, config, timeout=timeout, affect_presence=False, only_keys=("room", "receipt",), ) # limiting to 1 may get 1 event plus 1 presence event, so # pick out the actual event single_event = None read_receipt = None for c in chunk['chunk']: if 'event_id' in c: # Hmmm... single_event = c elif c['type'] == 'm.receipt': read_receipt = c have_updated_badge = False if read_receipt: for receipt_part in read_receipt['content'].values(): if 'm.read' in receipt_part: if self.user_id in receipt_part['m.read'].keys(): have_updated_badge = True if not single_event: if have_updated_badge: yield self.update_badge() self.last_token = chunk['end'] yield self.store.update_pusher_last_token( self.app_id, self.pushkey, self.user_id, self.last_token ) return if not self.alive: return processed = False rule_evaluator = yield \ push_rule_evaluator.evaluator_for_user_id_and_profile_tag( self.user_id, self.profile_tag, single_event['room_id'], self.store ) actions = yield rule_evaluator.actions_for_event(single_event) tweaks = rule_evaluator.tweaks_for_actions(actions) if 'notify' in actions: self.badge = yield self._get_badge_count() rejected = yield self.dispatch_push(single_event, tweaks, self.badge) self.has_unread = True if isinstance(rejected, list) or isinstance(rejected, tuple): processed = True for pk in rejected: if pk != self.pushkey: # for sanity, we only remove the pushkey if it # was the one we actually sent... logger.warn( ("Ignoring rejected pushkey %s because we" " didn't send it"), pk ) else: logger.info( "Pushkey %s was rejected: removing", pk ) yield self.hs.get_pusherpool().remove_pusher( self.app_id, pk, self.user_id ) else: if have_updated_badge: yield self.update_badge() processed = True if not self.alive: return if processed: self.backoff_delay = Pusher.INITIAL_BACKOFF self.last_token = chunk['end'] yield self.store.update_pusher_last_token_and_success( self.app_id, self.pushkey, self.user_id, self.last_token, self.clock.time_msec() ) if self.failing_since: self.failing_since = None yield self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.user_id, self.failing_since) else: if not self.failing_since: self.failing_since = self.clock.time_msec() yield self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.user_id, self.failing_since ) if (self.failing_since and self.failing_since < self.clock.time_msec() - Pusher.GIVE_UP_AFTER): # we really only give up so that if the URL gets # fixed, we don't suddenly deliver a load # of old notifications. logger.warn("Giving up on a notification to user %s, " "pushkey %s", self.user_id, self.pushkey) self.backoff_delay = Pusher.INITIAL_BACKOFF self.last_token = chunk['end'] yield self.store.update_pusher_last_token( self.app_id, self.pushkey, self.user_id, self.last_token ) self.failing_since = None yield self.store.update_pusher_failing_since( self.app_id, self.pushkey, self.user_id, self.failing_since ) else: logger.warn("Failed to dispatch push for user %s " "(failing for %dms)." "Trying again in %dms", self.user_id, self.clock.time_msec() - self.failing_since, self.backoff_delay) yield synapse.util.async.sleep(self.backoff_delay / 1000.0) self.backoff_delay *= 2 if self.backoff_delay > Pusher.MAX_BACKOFF: self.backoff_delay = Pusher.MAX_BACKOFF