def gather_new_streams( user_profile: UserProfile, threshold: datetime.datetime) -> Tuple[int, Dict[str, List[str]]]: if user_profile.is_guest: new_streams = list( get_active_streams(user_profile.realm).filter( is_web_public=True, date_created__gt=threshold)) elif user_profile.can_access_public_streams(): new_streams = list( get_active_streams(user_profile.realm).filter( invite_only=False, date_created__gt=threshold)) base_url = f"{user_profile.realm.uri}/#narrow/stream/" streams_html = [] streams_plain = [] for stream in new_streams: narrow_url = base_url + encode_stream(stream.id, stream.name) stream_link = f"<a href='{narrow_url}'>{stream.name}</a>" streams_html.append(stream_link) streams_plain.append(stream.name) return len(new_streams), {"html": streams_html, "plain": streams_plain}
def ok_to_include_history(narrow: OptionalNarrowListT, user_profile: UserProfile) -> bool: # There are occasions where we need to find Message rows that # have no corresponding UserMessage row, because the user is # reading a public stream that might include messages that # were sent while the user was not subscribed, but which they are # allowed to see. We have to be very careful about constructing # queries in those situations, so this function should return True # only if we are 100% sure that we're gonna add a clause to the # query that narrows to a particular public stream on the user's realm. # If we screw this up, then we can get into a nasty situation of # polluting our narrow results with messages from other realms. include_history = False if narrow is not None: for term in narrow: if term['operator'] == "stream" and not term.get('negated', False): operand: Union[str, int] = term['operand'] if isinstance(operand, str): include_history = can_access_stream_history_by_name(user_profile, operand) else: include_history = can_access_stream_history_by_id(user_profile, operand) elif (term['operator'] == "streams" and term['operand'] == "public" and not term.get('negated', False) and user_profile.can_access_public_streams()): include_history = True # Disable historical messages if the user is narrowing on anything # that's a property on the UserMessage table. There cannot be # historical messages in these cases anyway. for term in narrow: if term['operator'] == "is": include_history = False return include_history
def has_message_access( user_profile: UserProfile, message: Message, *, has_user_message: bool, stream: Optional[Stream] = None, is_subscribed: Optional[bool] = None, ) -> bool: """ Returns whether a user has access to a given message. * The user_message parameter must be provided if the user has a UserMessage row for the target message. * The optional stream parameter is validated; is_subscribed is not. """ # If you have a user_message object, you have access. if has_user_message: return True if message.recipient.type != Recipient.STREAM: # You can't access private messages you didn't receive return False if stream is None: stream = Stream.objects.get(id=message.recipient.type_id) else: assert stream.recipient_id == message.recipient_id if stream.realm != user_profile.realm: # You can't access public stream messages in other realms return False if not stream.is_history_public_to_subscribers(): # You can't access messages you didn't directly receive # unless history is public to subscribers. return False if stream.is_public() and user_profile.can_access_public_streams(): return True # is_history_public_to_subscribers, so check if you're subscribed if is_subscribed is not None: return is_subscribed return Subscription.objects.filter( user_profile=user_profile, active=True, recipient=message.recipient ).exists()
def gather_new_streams(user_profile: UserProfile, threshold: datetime.datetime) -> Tuple[int, Dict[str, List[str]]]: if user_profile.can_access_public_streams(): new_streams = list(get_active_streams(user_profile.realm).filter( invite_only=False, date_created__gt=threshold)) else: new_streams = [] base_url = "%s/#narrow/stream/" % (user_profile.realm.uri,) streams_html = [] streams_plain = [] for stream in new_streams: narrow_url = base_url + encode_stream(stream.id, stream.name) stream_link = "<a href='%s'>%s</a>" % (narrow_url, stream.name) streams_html.append(stream_link) streams_plain.append(stream.name) return len(new_streams), {"html": streams_html, "plain": streams_plain}
def events_register_backend( request: HttpRequest, user_profile: UserProfile, apply_markdown: bool = REQ(default=False, json_validator=check_bool), client_gravatar: bool = REQ(default=False, json_validator=check_bool), slim_presence: bool = REQ(default=False, json_validator=check_bool), all_public_streams: Optional[bool] = REQ(default=None, json_validator=check_bool), include_subscribers: bool = REQ(default=False, json_validator=check_bool), client_capabilities: Optional[Dict[str, bool]] = REQ( json_validator=check_dict( [ # This field was accidentally made required when it was added in v2.0.0-781; # this was not realized until after the release of Zulip 2.1.2. (It remains # required to help ensure backwards compatibility of client code.) ("notification_settings_null", check_bool), ], [ # Any new fields of `client_capabilities` should be optional. Add them here. ("bulk_message_deletion", check_bool), ("user_avatar_url_field_optional", check_bool), ("stream_typing_notifications", check_bool), ], value_validator=check_bool, ), default=None, ), event_types: Optional[Sequence[str]] = REQ( json_validator=check_list(check_string), default=None ), fetch_event_types: Optional[Sequence[str]] = REQ( json_validator=check_list(check_string), default=None ), narrow: NarrowT = REQ( json_validator=check_list(check_list(check_string, length=2)), default=[] ), queue_lifespan_secs: int = REQ(converter=int, default=0, documentation_pending=True), ) -> HttpResponse: if all_public_streams and not user_profile.can_access_public_streams(): return json_error(_("User not authorized for this query")) all_public_streams = _default_all_public_streams(user_profile, all_public_streams) narrow = _default_narrow(user_profile, narrow) if client_capabilities is None: client_capabilities = {} ret = do_events_register( user_profile, request.client, apply_markdown, client_gravatar, slim_presence, event_types, queue_lifespan_secs, all_public_streams, narrow=narrow, include_subscribers=include_subscribers, client_capabilities=client_capabilities, fetch_event_types=fetch_event_types, ) return json_success(ret)
def get_events_backend( request: HttpRequest, user_profile: UserProfile, # user_client is intended only for internal Django=>Tornado requests # and thus shouldn't be documented for external use. user_client: Optional[Client] = REQ( converter=get_client, default=None, intentionally_undocumented=True ), last_event_id: Optional[int] = REQ(converter=int, default=None), queue_id: Optional[str] = REQ(default=None), # apply_markdown, client_gravatar, all_public_streams, and various # other parameters are only used when registering a new queue via this # endpoint. This is a feature used primarily by get_events_internal # and not expected to be used by third-party clients. apply_markdown: bool = REQ( default=False, json_validator=check_bool, intentionally_undocumented=True ), client_gravatar: bool = REQ( default=False, json_validator=check_bool, intentionally_undocumented=True ), slim_presence: bool = REQ( default=False, json_validator=check_bool, intentionally_undocumented=True ), all_public_streams: bool = REQ( default=False, json_validator=check_bool, intentionally_undocumented=True ), event_types: Optional[Sequence[str]] = REQ( default=None, json_validator=check_list(check_string), intentionally_undocumented=True ), dont_block: bool = REQ(default=False, json_validator=check_bool), narrow: Sequence[Sequence[str]] = REQ( default=[], json_validator=check_list(check_list(check_string)), intentionally_undocumented=True, ), lifespan_secs: int = REQ( default=0, converter=to_non_negative_int, intentionally_undocumented=True ), bulk_message_deletion: bool = REQ( default=False, json_validator=check_bool, intentionally_undocumented=True ), stream_typing_notifications: bool = REQ( default=False, json_validator=check_bool, intentionally_undocumented=True ), ) -> HttpResponse: if all_public_streams and not user_profile.can_access_public_streams(): raise JsonableError(_("User not authorized for this query")) # Extract the Tornado handler from the request tornado_handler = get_request_notes(request).tornado_handler assert tornado_handler is not None handler = tornado_handler() assert handler is not None if user_client is None: valid_user_client = get_request_notes(request).client assert valid_user_client is not None else: valid_user_client = user_client events_query = dict( user_profile_id=user_profile.id, queue_id=queue_id, last_event_id=last_event_id, event_types=event_types, client_type_name=valid_user_client.name, all_public_streams=all_public_streams, lifespan_secs=lifespan_secs, narrow=narrow, dont_block=dont_block, handler_id=handler.handler_id, ) if queue_id is None: events_query["new_queue_data"] = dict( user_profile_id=user_profile.id, realm_id=user_profile.realm_id, event_types=event_types, client_type_name=valid_user_client.name, apply_markdown=apply_markdown, client_gravatar=client_gravatar, slim_presence=slim_presence, all_public_streams=all_public_streams, queue_timeout=lifespan_secs, last_connection_time=time.time(), narrow=narrow, bulk_message_deletion=bulk_message_deletion, stream_typing_notifications=stream_typing_notifications, ) result = fetch_events(events_query) if "extra_log_data" in result: log_data = get_request_notes(request).log_data assert log_data is not None log_data["extra"] = result["extra_log_data"] if result["type"] == "async": # Mark this response with .asynchronous; this will result in # Tornado discarding the response and instead long-polling the # request. See zulip_finish for more design details. handler._request = request response = json_success() response.asynchronous = True return response if result["type"] == "error": raise result["exception"] return json_success(result["response"])
def do_get_streams( user_profile: UserProfile, include_public: bool = True, include_web_public: bool = False, include_subscribed: bool = True, include_all_active: bool = False, include_default: bool = False, include_owner_subscribed: bool = False, ) -> List[APIStreamDict]: # This function is only used by API clients now. if include_all_active and not user_profile.is_realm_admin: raise JsonableError(_("User not authorized for this query")) include_public = include_public and user_profile.can_access_public_streams( ) # Start out with all active streams in the realm. query = Stream.objects.filter(realm=user_profile.realm, deactivated=False) if include_all_active: streams = Stream.get_client_data(query) else: # We construct a query as the or (|) of the various sources # this user requested streams from. query_filter: Optional[Q] = None def add_filter_option(option: Q) -> None: nonlocal query_filter if query_filter is None: query_filter = option else: query_filter |= option if include_subscribed: subscribed_stream_ids = get_subscribed_stream_ids_for_user( user_profile) recipient_check = Q(id__in=set(subscribed_stream_ids)) add_filter_option(recipient_check) if include_public: invite_only_check = Q(invite_only=False) add_filter_option(invite_only_check) if include_web_public: # This should match get_web_public_streams_queryset web_public_check = Q( is_web_public=True, invite_only=False, history_public_to_subscribers=True, deactivated=False, ) add_filter_option(web_public_check) if include_owner_subscribed and user_profile.is_bot: bot_owner = user_profile.bot_owner assert bot_owner is not None owner_stream_ids = get_subscribed_stream_ids_for_user(bot_owner) owner_subscribed_check = Q(id__in=set(owner_stream_ids)) add_filter_option(owner_subscribed_check) if query_filter is not None: query = query.filter(query_filter) streams = Stream.get_client_data(query) else: # Don't bother going to the database with no valid sources streams = [] streams.sort(key=lambda elt: elt["name"]) if include_default: is_default = {} default_streams = get_default_streams_for_realm(user_profile.realm_id) for default_stream in default_streams: is_default[default_stream.id] = True for stream in streams: stream["is_default"] = is_default.get(stream["stream_id"], False) return streams
def gather_subscriptions_helper( user_profile: UserProfile, include_subscribers: bool = True, ) -> SubscriptionInfo: realm = user_profile.realm all_streams: QuerySet[RawStreamDict] = get_active_streams(realm).values( *Stream.API_FIELDS, # The realm_id and recipient_id are generally not needed in the API. "realm_id", "recipient_id", # email_token isn't public to some users with access to # the stream, so doesn't belong in API_FIELDS. "email_token", ) recip_id_to_stream_id: Dict[int, int] = { stream["recipient_id"]: stream["id"] for stream in all_streams } all_streams_map: Dict[int, RawStreamDict] = {stream["id"]: stream for stream in all_streams} sub_dicts_query: Iterable[RawSubscriptionDict] = ( get_stream_subscriptions_for_user(user_profile) .values( *Subscription.API_FIELDS, "recipient_id", "active", ) .order_by("recipient_id") ) # We only care about subscriptions for active streams. sub_dicts: List[RawSubscriptionDict] = [ sub_dict for sub_dict in sub_dicts_query if recip_id_to_stream_id.get(sub_dict["recipient_id"]) ] def get_stream_id(sub_dict: RawSubscriptionDict) -> int: return recip_id_to_stream_id[sub_dict["recipient_id"]] traffic_stream_ids = {get_stream_id(sub_dict) for sub_dict in sub_dicts} recent_traffic = get_streams_traffic(stream_ids=traffic_stream_ids) # Okay, now we finally get to populating our main results, which # will be these three lists. subscribed: List[SubscriptionStreamDict] = [] unsubscribed: List[SubscriptionStreamDict] = [] never_subscribed: List[NeverSubscribedStreamDict] = [] sub_unsub_stream_ids = set() for sub_dict in sub_dicts: stream_id = get_stream_id(sub_dict) sub_unsub_stream_ids.add(stream_id) raw_stream_dict = all_streams_map[stream_id] stream_dict = build_stream_dict_for_sub( user=user_profile, sub_dict=sub_dict, raw_stream_dict=raw_stream_dict, recent_traffic=recent_traffic, ) # is_active is represented in this structure by which list we include it in. is_active = sub_dict["active"] if is_active: subscribed.append(stream_dict) else: unsubscribed.append(stream_dict) if user_profile.can_access_public_streams(): never_subscribed_stream_ids = set(all_streams_map) - sub_unsub_stream_ids else: web_public_stream_ids = {stream["id"] for stream in all_streams if stream["is_web_public"]} never_subscribed_stream_ids = web_public_stream_ids - sub_unsub_stream_ids never_subscribed_streams = [ all_streams_map[stream_id] for stream_id in never_subscribed_stream_ids ] for raw_stream_dict in never_subscribed_streams: is_public = not raw_stream_dict["invite_only"] if is_public or user_profile.is_realm_admin: slim_stream_dict = build_stream_dict_for_never_sub( raw_stream_dict=raw_stream_dict, recent_traffic=recent_traffic ) never_subscribed.append(slim_stream_dict) if include_subscribers: # The highly optimized bulk_get_subscriber_user_ids wants to know which # streams we are subscribed to, for validation purposes, and it uses that # info to know if it's allowed to find OTHER subscribers. subscribed_stream_ids = { get_stream_id(sub_dict) for sub_dict in sub_dicts if sub_dict["active"] } subscriber_map = bulk_get_subscriber_user_ids( all_streams, user_profile, subscribed_stream_ids, ) for lst in [subscribed, unsubscribed]: for stream_dict in lst: assert isinstance(stream_dict["stream_id"], int) stream_id = stream_dict["stream_id"] stream_dict["subscribers"] = subscriber_map[stream_id] for slim_stream_dict in never_subscribed: assert isinstance(slim_stream_dict["stream_id"], int) stream_id = slim_stream_dict["stream_id"] slim_stream_dict["subscribers"] = subscriber_map[stream_id] subscribed.sort(key=lambda x: x["name"]) unsubscribed.sort(key=lambda x: x["name"]) never_subscribed.sort(key=lambda x: x["name"]) return SubscriptionInfo( subscriptions=subscribed, unsubscribed=unsubscribed, never_subscribed=never_subscribed, )