Example #1
0
    def consume(self, event: Dict[str, Any]) -> None:
        try:
            if event.get("type", "add") == "remove":
                message_ids = event.get("message_ids")
                if message_ids is None:
                    # TODO/compatibility: Previously, we sent only one `message_id` in
                    # a payload for notification remove events. This was later changed
                    # to send a list of `message_ids` (with that field name), but we need
                    # compatibility code for events present in the queue during upgrade.
                    # Remove this when one can no longer upgrade from 1.9.2 (or earlier)
                    # to any version after 2.0.0
                    message_ids = [event["message_id"]]
                handle_remove_push_notification(event["user_profile_id"],
                                                message_ids)
            else:
                handle_push_notification(event["user_profile_id"], event)
        except PushNotificationBouncerRetryLaterError:

            def failure_processor(event: Dict[str, Any]) -> None:
                logger.warning(
                    "Maximum retries exceeded for trigger:%s event:push_notification",
                    event["user_profile_id"],
                )

            retry_event(self.queue_name, event, failure_processor)
Example #2
0
    def consume(self, event: Dict[str, Any]) -> None:
        if event['type'] == 'mark_stream_messages_as_read':
            user_profile = get_user_profile_by_id(event['user_profile_id'])
            client = Client.objects.get(id=event['client_id'])

            for stream_id in event['stream_ids']:
                # Since the user just unsubscribed, we don't require
                # an active Subscription object (otherwise, private
                # streams would never be accessible)
                (stream, recipient,
                 sub) = access_stream_by_id(user_profile,
                                            stream_id,
                                            require_active=False)
                do_mark_stream_messages_as_read(user_profile, client, stream)
        elif event['type'] == 'clear_push_device_tokens':
            try:
                clear_push_device_tokens(event["user_profile_id"])
            except PushNotificationBouncerRetryLaterError:

                def failure_processor(event: Dict[str, Any]) -> None:
                    logger.warning(
                        "Maximum retries exceeded for trigger:%s event:clear_push_device_tokens"
                        % (event['user_profile_id'], ))

                retry_event(self.queue_name, event, failure_processor)
        elif event['type'] == 'realm_export':
            start = time.time()
            realm = Realm.objects.get(id=event['realm_id'])
            output_dir = tempfile.mkdtemp(prefix="zulip-export-")

            public_url = export_realm_wrapper(realm=realm,
                                              output_dir=output_dir,
                                              threads=6,
                                              upload=True,
                                              public_only=True,
                                              delete_after_upload=True)
            assert public_url is not None

            # Update the extra_data field now that the export is complete.
            export_event = RealmAuditLog.objects.get(id=event['id'])
            export_event.extra_data = ujson.dumps(
                dict(export_path=urllib.parse.urlparse(public_url).path, ))
            export_event.save(update_fields=['extra_data'])

            # Send a private message notification letting the user who
            # triggered the export know the export finished.
            user_profile = get_user_profile_by_id(event['user_profile_id'])
            content = "Your data export is complete and has been uploaded here:\n\n%s" % (
                public_url, )
            internal_send_private_message(realm=user_profile.realm,
                                          sender=get_system_bot(
                                              settings.NOTIFICATION_BOT),
                                          recipient_user=user_profile,
                                          content=content)

            # For future frontend use, also notify administrator
            # clients that the export happened.
            notify_realm_export(user_profile)
            logging.info("Completed data export for %s in %s" %
                         (user_profile.realm.string_id, time.time() - start))
Example #3
0
 def consume(self, data: Dict[str, Any]) -> None:
     # TODO: This is the only implementation with Dict cf Mapping; should we simplify?
     user_profile = get_user_profile_by_id(data['user_id'])
     logging.info("Processing signup for user %s in realm %s" %
                  (user_profile.email, user_profile.realm.string_id))
     if settings.MAILCHIMP_API_KEY and settings.PRODUCTION:
         endpoint = "https://%s.api.mailchimp.com/3.0/lists/%s/members" % \
                    (settings.MAILCHIMP_API_KEY.split('-')[1], settings.ZULIP_FRIENDS_LIST_ID)
         params = dict(data)
         del params['user_id']
         params['list_id'] = settings.ZULIP_FRIENDS_LIST_ID
         params['status'] = 'subscribed'
         r = requests.post(endpoint,
                           auth=('apikey', settings.MAILCHIMP_API_KEY),
                           json=params,
                           timeout=10)
         if r.status_code == 400 and ujson.loads(
                 r.text)['title'] == 'Member Exists':
             logging.warning(
                 "Attempted to sign up already existing email to list: %s" %
                 (data['email_address'], ))
         elif r.status_code == 400:
             retry_event('signups', data, lambda e: r.raise_for_status())
         else:
             r.raise_for_status()
Example #4
0
def handle_push_notification(user_profile_id, missed_message):
    # type: (int, Dict[str, Any]) -> None
    """
    missed_message is the event received by the
    zerver.worker.queue_processors.PushNotificationWorker.consume function.
    """
    try:
        user_profile = get_user_profile_by_id(user_profile_id)
        if not (receives_offline_notifications(user_profile)
                or receives_online_notifications(user_profile)):
            return

        umessage = UserMessage.objects.get(
            user_profile=user_profile,
            message__id=missed_message['message_id'])
        message = umessage.message
        if umessage.flags.read:
            return

        apns_payload = get_apns_payload(message)
        gcm_payload = get_gcm_payload(user_profile, message)

        if uses_notification_bouncer():
            try:
                send_notifications_to_bouncer(user_profile_id, apns_payload,
                                              gcm_payload)
            except requests.ConnectionError:
                if 'failed_tries' not in missed_message:
                    missed_message['failed_tries'] = 0

                def failure_processor(event):
                    # type: (Dict[str, Any]) -> None
                    logging.warning(
                        "Maximum retries exceeded for trigger:%s event:push_notification"
                        % (event['user_profile_id']))

                retry_event('missedmessage_mobile_notifications',
                            missed_message, failure_processor)

            return

        android_devices = list(
            PushDeviceToken.objects.filter(user=user_profile,
                                           kind=PushDeviceToken.GCM))

        apple_devices = list(
            PushDeviceToken.objects.filter(user=user_profile,
                                           kind=PushDeviceToken.APNS))

        if apple_devices:
            send_apple_push_notification(user_profile.id, apple_devices,
                                         apns_payload)

        if android_devices:
            send_android_push_notification(android_devices, gcm_payload)

    except UserMessage.DoesNotExist:
        logging.error("Could not find UserMessage with message_id %s" %
                      (missed_message['message_id'], ))
Example #5
0
    def wrapper(worker: ConcreteQueueWorker, data: Dict[str, Any]) -> None:
        try:
            func(worker, data)
        except (smtplib.SMTPServerDisconnected, socket.gaierror, EmailNotDeliveredException):
            def on_failure(event: Dict[str, Any]) -> None:
                logging.exception("Event {} failed".format(event))

            retry_event(worker.queue_name, data, on_failure)
Example #6
0
    def wrapper(worker: ConcreteQueueWorker, data: Dict[str, Any]) -> None:
        try:
            func(worker, data)
        except (smtplib.SMTPServerDisconnected, socket.gaierror, EmailNotDeliveredException):
            def on_failure(event: Dict[str, Any]) -> None:
                logging.exception("Event {} failed".format(event))

            retry_event(worker.queue_name, data, on_failure)
Example #7
0
    def wrapper(worker, data):
        # type: (QueueProcessingWorker, Dict[str, Any]) -> None
        try:
            func(worker, data)
        except (smtplib.SMTPServerDisconnected, socket.gaierror, EmailNotDeliveredException):
            def on_failure(event):
                # type: (Dict[str, Any]) -> None
                logging.exception("Event {} failed".format(event))

            retry_event(worker.queue_name, data, on_failure)
Example #8
0
    def wrapper(worker, data):
        # type: (QueueProcessingWorker, Dict[str, Any]) -> None
        try:
            func(worker, data)
        except (smtplib.SMTPServerDisconnected, socket.gaierror):

            def on_failure(event):
                # type: (Dict[str, Any]) -> None
                logging.exception("Event {} failed".format(event['id']))

            retry_event(worker.queue_name, data, on_failure)
Example #9
0
    def wrapper(worker: ConcreteQueueWorker, data: Dict[str, Any]) -> None:
        try:
            func(worker, data)
        except (smtplib.SMTPServerDisconnected, socket.gaierror, socket.timeout,
                EmailNotDeliveredException) as e:
            error_class_name = e.__class__.__name__

            def on_failure(event: Dict[str, Any]) -> None:
                logging.exception("Event %r failed due to exception %s", event, error_class_name, stack_info=True)

            retry_event(worker.queue_name, data, on_failure)
Example #10
0
 def consume(self, event: Dict[str, Any]) -> None:
     try:
         if event.get("type", "add") == "remove":
             message_ids = event.get('message_ids')
             if message_ids is None:  # legacy task across an upgrade
                 message_ids = [event['message_id']]
             handle_remove_push_notification(event['user_profile_id'], message_ids)
         else:
             handle_push_notification(event['user_profile_id'], event)
     except PushNotificationBouncerRetryLaterError:
         def failure_processor(event: Dict[str, Any]) -> None:
             logger.warning(
                 "Maximum retries exceeded for trigger:%s event:push_notification",
                 event['user_profile_id'])
         retry_event(self.queue_name, event, failure_processor)
Example #11
0
def request_retry(event, request_data, failure_message, exception=None):
    # type: (Dict[str, Any], Dict[str, Any], Text, Optional[Exception]) -> None
    def failure_processor(event):
        # type: (Dict[str, Any]) -> None
        """
        The name of the argument is 'event' on purpose. This argument will hide
        the 'event' argument of the request_retry function. Keeping the same name
        results in a smaller diff.
        """
        bot_user = get_user_profile_by_id(event['user_profile_id'])
        fail_with_message(event, "Maximum retries exceeded! " + failure_message)
        notify_bot_owner(event, request_data, exception=exception)
        logging.warning("Maximum retries exceeded for trigger:%s event:%s" % (bot_user.email, event['command']))

    retry_event('outgoing_webhooks', event, failure_processor)
Example #12
0
def request_retry(event, request_data, failure_message, exception=None):
    # type: (Dict[str, Any], Dict[str, Any], Text, Optional[Exception]) -> None
    def failure_processor(event):
        # type: (Dict[str, Any]) -> None
        """
        The name of the argument is 'event' on purpose. This argument will hide
        the 'event' argument of the request_retry function. Keeping the same name
        results in a smaller diff.
        """
        bot_user = get_user_profile_by_id(event['user_profile_id'])
        fail_with_message(event, "Maximum retries exceeded! " + failure_message)
        notify_bot_owner(event, request_data, exception=exception)
        logging.warning("Maximum retries exceeded for trigger:%s event:%s" % (bot_user.email, event['command']))

    retry_event('outgoing_webhooks', event, failure_processor)
Example #13
0
def request_retry(event: Dict[str, Any],
                  request_data: Dict[str, Any],
                  failure_message: Optional[str]=None) -> None:
    def failure_processor(event: Dict[str, Any]) -> None:
        """
        The name of the argument is 'event' on purpose. This argument will hide
        the 'event' argument of the request_retry function. Keeping the same name
        results in a smaller diff.
        """
        bot_user = get_user_profile_by_id(event['user_profile_id'])
        fail_with_message(event, "Bot is unavailable")
        notify_bot_owner(event, request_data, failure_message=failure_message)
        logging.warning("Maximum retries exceeded for trigger:%s event:%s" % (
            bot_user.email, event['command']))

    retry_event('outgoing_webhooks', event, failure_processor)
Example #14
0
def request_retry(event: Dict[str, Any],
                  request_data: Dict[str, Any],
                  failure_message: Optional[str] = None) -> None:
    def failure_processor(event: Dict[str, Any]) -> None:
        """
        The name of the argument is 'event' on purpose. This argument will hide
        the 'event' argument of the request_retry function. Keeping the same name
        results in a smaller diff.
        """
        bot_user = get_user_profile_by_id(event['user_profile_id'])
        fail_with_message(event, "Bot is unavailable")
        notify_bot_owner(event, request_data, failure_message=failure_message)
        logging.warning("Maximum retries exceeded for trigger:%s event:%s" %
                        (bot_user.email, event['command']))

    retry_event('outgoing_webhooks', event, failure_processor)
Example #15
0
 def consume(self, data: Dict[str, Any]) -> None:
     # TODO: This is the only implementation with Dict cf Mapping; should we simplify?
     user_profile = get_user_profile_by_id(data['user_id'])
     logging.info("Processing signup for user %s in realm %s" % (
         user_profile.email, user_profile.realm.string_id))
     if settings.MAILCHIMP_API_KEY and settings.PRODUCTION:
         endpoint = "https://%s.api.mailchimp.com/3.0/lists/%s/members" % \
                    (settings.MAILCHIMP_API_KEY.split('-')[1], settings.ZULIP_FRIENDS_LIST_ID)
         params = dict(data)
         del params['user_id']
         params['list_id'] = settings.ZULIP_FRIENDS_LIST_ID
         params['status'] = 'subscribed'
         r = requests.post(endpoint, auth=('apikey', settings.MAILCHIMP_API_KEY), json=params, timeout=10)
         if r.status_code == 400 and ujson.loads(r.text)['title'] == 'Member Exists':
             logging.warning("Attempted to sign up already existing email to list: %s" %
                             (data['email_address'],))
         elif r.status_code == 400:
             retry_event('signups', data, lambda e: r.raise_for_status())
         else:
             r.raise_for_status()
Example #16
0
def handle_push_notification(user_profile_id: int, missed_message: Dict[str, Any]) -> None:
    """
    missed_message is the event received by the
    zerver.worker.queue_processors.PushNotificationWorker.consume function.
    """
    user_profile = get_user_profile_by_id(user_profile_id)
    if not (receives_offline_push_notifications(user_profile) or
            receives_online_notifications(user_profile)):
        return

    user_profile = get_user_profile_by_id(user_profile_id)
    (message, user_message) = access_message(user_profile, missed_message['message_id'])
    if user_message is not None:
        # If ther user has read the message already, don't push-notify.
        #
        # TODO: It feels like this is already handled when things are
        # put in the queue; maybe we should centralize this logic with
        # the `zerver/tornado/event_queue.py` logic?
        if user_message.flags.read:
            return
    else:
        # Users should only be getting push notifications into this
        # queue for messages they haven't received if they're
        # long-term idle; anything else is likely a bug.
        if not user_profile.long_term_idle:
            logging.error("Could not find UserMessage with message_id %s and user_id %s" % (
                missed_message['message_id'], user_profile_id))
            return

    message.trigger = missed_message['trigger']
    message.stream_name = missed_message.get('stream_name', None)

    apns_payload = get_apns_payload(message)
    gcm_payload = get_gcm_payload(user_profile, message)
    logging.info("Sending push notification to user %s" % (user_profile_id,))

    if uses_notification_bouncer():
        try:
            send_notifications_to_bouncer(user_profile_id,
                                          apns_payload,
                                          gcm_payload)
        except requests.ConnectionError:
            def failure_processor(event: Dict[str, Any]) -> None:
                logging.warning(
                    "Maximum retries exceeded for trigger:%s event:push_notification" % (
                        event['user_profile_id']))
            retry_event('missedmessage_mobile_notifications', missed_message,
                        failure_processor)
        return

    android_devices = list(PushDeviceToken.objects.filter(user=user_profile,
                                                          kind=PushDeviceToken.GCM))

    apple_devices = list(PushDeviceToken.objects.filter(user=user_profile,
                                                        kind=PushDeviceToken.APNS))

    if apple_devices:
        send_apple_push_notification(user_profile.id, apple_devices,
                                     apns_payload)

    if android_devices:
        send_android_push_notification(android_devices, gcm_payload)
Example #17
0
def handle_push_notification(user_profile_id: int, missed_message: Dict[str, Any]) -> None:
    """
    missed_message is the event received by the
    zerver.worker.queue_processors.PushNotificationWorker.consume function.
    """
    user_profile = get_user_profile_by_id(user_profile_id)
    if not (receives_offline_push_notifications(user_profile) or
            receives_online_notifications(user_profile)):
        return

    user_profile = get_user_profile_by_id(user_profile_id)
    (message, user_message) = access_message(user_profile, missed_message['message_id'])
    if user_message is not None:
        # If ther user has read the message already, don't push-notify.
        #
        # TODO: It feels like this is already handled when things are
        # put in the queue; maybe we should centralize this logic with
        # the `zerver/tornado/event_queue.py` logic?
        if user_message.flags.read:
            return

        # Otherwise, we mark the message as having an active mobile
        # push notification, so that we can send revocation messages
        # later.
        user_message.flags.active_mobile_push_notification = True
        user_message.save(update_fields=["flags"])
    else:
        # Users should only be getting push notifications into this
        # queue for messages they haven't received if they're
        # long-term idle; anything else is likely a bug.
        if not user_profile.long_term_idle:
            logging.error("Could not find UserMessage with message_id %s and user_id %s" % (
                missed_message['message_id'], user_profile_id))
            return

    message.trigger = missed_message['trigger']
    message.stream_name = missed_message.get('stream_name', None)

    apns_payload = get_apns_payload(message)
    gcm_payload = get_gcm_payload(user_profile, message)
    logging.info("Sending push notification to user %s" % (user_profile_id,))

    if uses_notification_bouncer():
        try:
            send_notifications_to_bouncer(user_profile_id,
                                          apns_payload,
                                          gcm_payload)
        except requests.ConnectionError:
            def failure_processor(event: Dict[str, Any]) -> None:
                logging.warning(
                    "Maximum retries exceeded for trigger:%s event:push_notification" % (
                        event['user_profile_id']))
            retry_event('missedmessage_mobile_notifications', missed_message,
                        failure_processor)
        return

    android_devices = list(PushDeviceToken.objects.filter(user=user_profile,
                                                          kind=PushDeviceToken.GCM))

    apple_devices = list(PushDeviceToken.objects.filter(user=user_profile,
                                                        kind=PushDeviceToken.APNS))

    if apple_devices:
        send_apple_push_notification(user_profile.id, apple_devices,
                                     apns_payload)

    if android_devices:
        send_android_push_notification(android_devices, gcm_payload)
Example #18
0
    def consume(self, event: Dict[str, Any]) -> None:
        start = time.time()
        if event["type"] == "mark_stream_messages_as_read":
            user_profile = get_user_profile_by_id(event["user_profile_id"])

            for recipient_id in event["stream_recipient_ids"]:
                count = do_mark_stream_messages_as_read(
                    user_profile, recipient_id)
                logger.info(
                    "Marked %s messages as read for user %s, stream_recipient_id %s",
                    count,
                    user_profile.id,
                    recipient_id,
                )
        elif event["type"] == "mark_stream_messages_as_read_for_everyone":
            # This event is generated by the stream deactivation code path.
            batch_size = 100
            offset = 0
            while True:
                messages = Message.objects.filter(
                    recipient_id=event["stream_recipient_id"]).order_by(
                        "id")[offset:offset + batch_size]
                UserMessage.objects.filter(message__in=messages).extra(
                    where=[UserMessage.where_unread()]).update(
                        flags=F("flags").bitor(UserMessage.flags.read))
                offset += len(messages)
                if len(messages) < batch_size:
                    break
            logger.info(
                "Marked %s messages as read for all users, stream_recipient_id %s",
                offset,
                event["stream_recipient_id"],
            )
        elif event["type"] == "clear_push_device_tokens":
            try:
                clear_push_device_tokens(event["user_profile_id"])
            except PushNotificationBouncerRetryLaterError:

                def failure_processor(event: Dict[str, Any]) -> None:
                    logger.warning(
                        "Maximum retries exceeded for trigger:%s event:clear_push_device_tokens",
                        event["user_profile_id"],
                    )

                retry_event(self.queue_name, event, failure_processor)
        elif event["type"] == "realm_export":
            realm = Realm.objects.get(id=event["realm_id"])
            output_dir = tempfile.mkdtemp(prefix="zulip-export-")
            export_event = RealmAuditLog.objects.get(id=event["id"])
            user_profile = get_user_profile_by_id(event["user_profile_id"])

            try:
                public_url = export_realm_wrapper(
                    realm=realm,
                    output_dir=output_dir,
                    threads=6,
                    upload=True,
                    public_only=True,
                    delete_after_upload=True,
                )
            except Exception:
                export_event.extra_data = orjson.dumps(
                    dict(failed_timestamp=timezone_now().timestamp(),
                         )).decode()
                export_event.save(update_fields=["extra_data"])
                logging.error(
                    "Data export for %s failed after %s",
                    user_profile.realm.string_id,
                    time.time() - start,
                )
                notify_realm_export(user_profile)
                return

            assert public_url is not None

            # Update the extra_data field now that the export is complete.
            export_event.extra_data = orjson.dumps(
                dict(export_path=urllib.parse.urlparse(public_url).path,
                     )).decode()
            export_event.save(update_fields=["extra_data"])

            # Send a private message notification letting the user who
            # triggered the export know the export finished.
            with override_language(user_profile.default_language):
                content = _(
                    "Your data export is complete and has been uploaded here:\n\n{public_url}"
                ).format(public_url=public_url)
            internal_send_private_message(
                sender=get_system_bot(settings.NOTIFICATION_BOT, realm.id),
                recipient_user=user_profile,
                content=content,
            )

            # For future frontend use, also notify administrator
            # clients that the export happened.
            notify_realm_export(user_profile)
            logging.info(
                "Completed data export for %s in %s",
                user_profile.realm.string_id,
                time.time() - start,
            )

        end = time.time()
        logger.info("deferred_work processed %s event (%dms)", event["type"],
                    (end - start) * 1000)
Example #19
0
def handle_push_notification(user_profile_id: int, missed_message: Dict[str, Any]) -> None:
    """
    missed_message is the event received by the
    zerver.worker.queue_processors.PushNotificationWorker.consume function.
    """
    if not push_notifications_enabled():
        return
    user_profile = get_user_profile_by_id(user_profile_id)
    if not (receives_offline_push_notifications(user_profile) or
            receives_online_notifications(user_profile)):
        return

    user_profile = get_user_profile_by_id(user_profile_id)
    try:
        (message, user_message) = access_message(user_profile, missed_message['message_id'])
    except JsonableError:
        if ArchivedMessage.objects.filter(id=missed_message['message_id']).exists():
            # If the cause is a race with the message being deleted,
            # that's normal and we have no need to log an error.
            return
        logging.error("Unexpected message access failure handling push notifications: %s %s" % (
            user_profile.id, missed_message['message_id']))
        return

    if user_message is not None:
        # If the user has read the message already, don't push-notify.
        #
        # TODO: It feels like this is already handled when things are
        # put in the queue; maybe we should centralize this logic with
        # the `zerver/tornado/event_queue.py` logic?
        if user_message.flags.read:
            return

        # Otherwise, we mark the message as having an active mobile
        # push notification, so that we can send revocation messages
        # later.
        user_message.flags.active_mobile_push_notification = True
        user_message.save(update_fields=["flags"])
    else:
        # Users should only be getting push notifications into this
        # queue for messages they haven't received if they're
        # long-term idle; anything else is likely a bug.
        if not user_profile.long_term_idle:
            logger.error("Could not find UserMessage with message_id %s and user_id %s" % (
                missed_message['message_id'], user_profile_id))
            return

    message.trigger = missed_message['trigger']

    apns_payload = get_apns_payload(user_profile, message)
    gcm_payload = get_gcm_payload(user_profile, message)
    gcm_options = {'priority': 'high'}  # type: Dict[str, Any]
    logger.info("Sending push notifications to mobile clients for user %s" % (user_profile_id,))

    if uses_notification_bouncer():
        try:
            send_notifications_to_bouncer(user_profile_id,
                                          apns_payload,
                                          gcm_payload,
                                          gcm_options)
        except requests.ConnectionError:
            def failure_processor(event: Dict[str, Any]) -> None:
                logger.warning(
                    "Maximum retries exceeded for trigger:%s event:push_notification" % (
                        event['user_profile_id']))
            retry_event('missedmessage_mobile_notifications', missed_message,
                        failure_processor)
        return

    android_devices = list(PushDeviceToken.objects.filter(user=user_profile,
                                                          kind=PushDeviceToken.GCM))

    apple_devices = list(PushDeviceToken.objects.filter(user=user_profile,
                                                        kind=PushDeviceToken.APNS))

    if apple_devices:
        send_apple_push_notification(user_profile.id, apple_devices,
                                     apns_payload)

    if android_devices:
        send_android_push_notification(android_devices, gcm_payload, gcm_options)
Example #20
0
    def consume(self, event: Dict[str, Any]) -> None:
        if event['type'] == 'mark_stream_messages_as_read':
            user_profile = get_user_profile_by_id(event['user_profile_id'])
            client = Client.objects.get(id=event['client_id'])

            for stream_id in event['stream_ids']:
                # Since the user just unsubscribed, we don't require
                # an active Subscription object (otherwise, private
                # streams would never be accessible)
                (stream, recipient, sub) = access_stream_by_id(user_profile, stream_id,
                                                               require_active=False)
                do_mark_stream_messages_as_read(user_profile, client, stream)
        elif event["type"] == 'mark_stream_messages_as_read_for_everyone':
            # This event is generated by the stream deactivation code path.
            batch_size = 100
            offset = 0
            while True:
                messages = Message.objects.filter(recipient_id=event["stream_recipient_id"]) \
                    .order_by("id")[offset:offset + batch_size]
                UserMessage.objects.filter(message__in=messages).extra(where=[UserMessage.where_unread()]) \
                    .update(flags=F('flags').bitor(UserMessage.flags.read))
                offset += len(messages)
                if len(messages) < batch_size:
                    break
        elif event['type'] == 'clear_push_device_tokens':
            try:
                clear_push_device_tokens(event["user_profile_id"])
            except PushNotificationBouncerRetryLaterError:
                def failure_processor(event: Dict[str, Any]) -> None:
                    logger.warning(
                        "Maximum retries exceeded for trigger:%s event:clear_push_device_tokens",
                        event['user_profile_id'])
                retry_event(self.queue_name, event, failure_processor)
        elif event['type'] == 'realm_export':
            start = time.time()
            realm = Realm.objects.get(id=event['realm_id'])
            output_dir = tempfile.mkdtemp(prefix="zulip-export-")
            export_event = RealmAuditLog.objects.get(id=event['id'])
            user_profile = get_user_profile_by_id(event['user_profile_id'])

            try:
                public_url = export_realm_wrapper(realm=realm, output_dir=output_dir,
                                                  threads=6, upload=True, public_only=True,
                                                  delete_after_upload=True)
            except Exception:
                export_event.extra_data = orjson.dumps(dict(
                    failed_timestamp=timezone_now().timestamp(),
                )).decode()
                export_event.save(update_fields=['extra_data'])
                logging.error(
                    "Data export for %s failed after %s",
                    user_profile.realm.string_id, time.time() - start,
                )
                notify_realm_export(user_profile)
                return

            assert public_url is not None

            # Update the extra_data field now that the export is complete.
            export_event.extra_data = orjson.dumps(dict(
                export_path=urllib.parse.urlparse(public_url).path,
            )).decode()
            export_event.save(update_fields=['extra_data'])

            # Send a private message notification letting the user who
            # triggered the export know the export finished.
            with override_language(user_profile.default_language):
                content = _("Your data export is complete and has been uploaded here:\n\n{public_url}").format(public_url=public_url)
            internal_send_private_message(
                realm=user_profile.realm,
                sender=get_system_bot(settings.NOTIFICATION_BOT),
                recipient_user=user_profile,
                content=content,
            )

            # For future frontend use, also notify administrator
            # clients that the export happened.
            notify_realm_export(user_profile)
            logging.info(
                "Completed data export for %s in %s",
                user_profile.realm.string_id, time.time() - start,
            )
Example #21
0
 def wrapped_process_notification(notices: List[Dict[str, Any]]) -> None:
     for notice in notices:
         try:
             process_notification(notice)
         except Exception:
             retry_event(queue_name, notice, failure_processor)